id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
1662147
|
import asyncio # noqa
import datetime
import logging
import typing # noqa
from ib_async.errors import UnsupportedFeature
from ib_async.event import Event
from ib_async.execution import Execution, CommissionReport
from ib_async.instrument import Instrument, SecurityType
from ib_async.messages import Outgoing
from ib_async.protocol import ProtocolInterface, IncomingMessage, ProtocolVersion, RequestId
LOG = logging.getLogger(__name__)
class ExecutionsMixin(ProtocolInterface):
def __init__(self):
super().__init__()
self.__pending_execs = {} # type: typing.Dict[RequestId, typing.List[Execution]]
on_execution = Event() # type: Event[Execution]
def get_executions(self, client_id=0, account_code="", time="", symbol="", security_type=SecurityType.Unspecified,
exchange="", side="") -> "asyncio.Future[Execution]":
request_id, future = self.make_future()
self.send_message(
Outgoing.REQ_EXECUTIONS, 3, request_id,
client_id, account_code, time, symbol, security_type, exchange, side
)
self.__pending_execs[request_id] = []
return future
def _handle_execution_data(self, request_id: RequestId, order_id: int, message: IncomingMessage):
if message.message_version <= 10:
raise UnsupportedFeature("execution details before version v10")
execution = Execution(self, message.read(Instrument))
execution.order_id = order_id
execution.execution_id = message.read()
execution.time = message.read(datetime.datetime)
execution.account_number = message.read()
execution.exchange = message.read()
execution.side = message.read()
execution.share = message.read(float)
execution.price = message.read(float)
execution.perm_id = message.read(int)
execution.client_id = message.read(int)
execution.liquidation = message.read(int)
execution.cumulative_quantity = message.read(float)
execution.average_price = message.read(float)
execution.order_ref = message.read()
execution.ev_rule = message.read()
execution.ev_multiplier = message.read(float)
execution.model_code = message.read(min_version=ProtocolVersion.MODELS_SUPPORT)
execution.last_liquidity = message.read(int, min_version=ProtocolVersion.LAST_LIQUIDITY)
execs = self.__pending_execs.get(request_id)
if execs is not None:
execs.append(execution)
self.on_execution(execution)
execution.instrument.on_execution(execution)
if execution.order:
execution.order.on_execution(execution)
def _handle_execution_data_end(self, request_id: RequestId):
self.resolve_future(request_id,
self.__pending_execs.get(request_id) or [])
on_commission_report = Event() # type: Event[CommissionReport]
def _handle_commission_report(self, commission_report: CommissionReport):
self.on_commission_report(commission_report)
|
1662166
|
from math import *
f = factorial
def C(n,k):
return f(n)/(f(k)*f(n-k))
for i in xrange(1,20):
s = 0
for j in xrange(0,i+1):
s += C(i,j)
print s,
print
|
1662180
|
value_decay = 0.95
tau_decay = 0.8
class Config(dict):
def __init__(self, **kwargs):
# mode 1: training mode, 2: AI vs Human, 3: Human vs Human, 0: Debug
self['mode'] = 1
# display mode
self['display'] = False
# screen size of renderer
self['screen_size'] = (720, 720)
# self play mode
self['is_self_play'] = True
# true: 3-3, 4-4, 6+ are not allowed for black
self['forbidden_moves'] = False
# PUCT: when c_puct gets smaller, the simulation becomes deeper
self['c_puct'] = 5
# simulation times
self['simulation_times'] = 400
# initial tau
self['initial_tau'] = 1
# proportion of dirichlet noise
self['epsilon'] = 0.25
# coef of dirichlet noise
self['alpha'] = 0.03
# use dirichlet
self['use_dirichlet'] = False
# board size
self['board_size'] = 15
# epoch: number of games played to train
self['epoch'] = 20
# sample percentage
self['sample_percentage'] = 1
# number of games in each training epoch
self['games_num'] = 30
# learning rate
self['learning_rate'] = 2e-3
# momentum
self['momentum'] = 9e-1
# coefficient of l2 penalty
self['l2'] = 1e-4
# path of network parameters
self['net_para_file'] = 'AlphaGomoku/network/model/model_' + str(self['board_size']) + '.h5'
# path of history of fitting
self['fit_history_file'] = 'AlphaGomoku/network/history/log_' + str(self['board_size'])
# human play data path
self['human_play_data_path'] = 'AlphaGomoku/dataset/human_play_data/human_' + str(self['board_size']) + '_'
# self play data path
self['self_play_data_path'] = 'AlphaGomoku/dataset/self_play_data/self_play_' + str(
self['board_size']) + '_'
# generated data path
self['generated_data_path'] = 'AlphaGomoku/dataset/generated_data/gen_'
# use previous model
self['use_previous_model'] = True
# number of games played for evaluation, must be an even number!!!
self['evaluate_games_num'] = 20
# epoch from which evaluation starts
self['evaluate_start_epoch'] = 1
# Mini-Batch Size
self['mini_batch_size'] = 512
# fit epochs, number of each sample used
self['fit_epochs'] = 10
# use supervised learning
self['is_supervised'] = False
# careful stage
self['careful_stage'] = 6
# number of threads
self['threading_num'] = 8
# virtual loss
self['virtual_loss'] = 10
# show evaluation score given by agent
self['show_score'] = True
self.update(**kwargs)
def update(self, **kwargs):
for key in kwargs:
self[key] = kwargs[key]
def set_mode(self, mode):
if mode not in [1, 2, 2.5, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 0]:
print('> Error: mode not found!')
mode = 1
if mode == 1:
self['display'] = False
self['is_self_play'] = True
self['mode'] = 1
self['show_score'] = False
print('> Training mode')
if mode == 2:
self['display'] = True
self['is_self_play'] = False
self['mode'] = 2
self['simulation_times'] = 800
self['show_score'] = False
print('> AI vs Human mode')
if mode == 2.5:
self['display'] = True
self['is_self_play'] = False
self['mode'] = 2.5
self['simulation_times'] = 800
self['show_score'] = False
print('> AI vs Human mode')
if mode == 3:
self['display'] = True
self['is_self_play'] = False
self['mode'] = 3
print('> Human vs Human mode')
if mode == 4:
self['display'] = False
self['is_self_play'] = False
self['show_score'] = False
self['mode'] = 4
self['simulation_times'] = 400
print('> AI vs AI mode')
if mode == 5:
self['display'] = True
self['is_self_play'] = False
self['mode'] = 5
self['games_num'] = 100
print('> Collect human play data mode')
if mode == 6:
self['display'] = False
self['is_self_play'] = True
self['mode'] = 6
self['games_num'] = 20
self['epoch'] = 10
self['simulation_times'] = 1600
self['careful_stage'] = 226
self['show_score'] = False
print('> Collect self play data mode')
if mode == 7:
self['display'] = False
self['is_self_play'] = True
self['mode'] = 7
self['is_supervised'] = True
self['show_score'] = False
print('> Train on external data mode')
if mode == 8:
self['display'] = True
self['is_self_play'] = False
self['mode'] = 8
print('> Collect human vs AI play data mode')
if mode == 9:
self['display'] = True
self['is_self_play'] = False
self['mode'] = 9
print('> AI(NaiveAgent) vs Human mode')
if mode == 10:
self['display'] = False
self['is_self_play'] = False
self['mode'] = 10
self['show_score'] = False
print('> AI vs AI(NaiveAgent) mode')
if mode == 11:
self['display'] = False
self['is_self_play'] = False
self['mode'] = 11
print('> Train on generated data mode')
self['simulation_times'] = 1600
self['games_num'] = 50
self['epoch'] = 100
self['show_score'] = False
if mode == 12:
self['display'] = False
self['is_self_play'] = False
self['mode'] = 12
self['games_num'] = 100
self['epoch'] = 20
self['show_score'] = True
print('> Collect self play data mode')
if mode == 13:
self['display'] = False
self['is_self_play'] = True
self['show_score'] = False
self['epoch'] = 10
self['games_num'] = 60
self['simulation_times'] = 1600
self['careful_stage'] = 226 # disable careful stage
self['mode'] = 13
print('> Self play and train mode')
if mode == 0:
self['display'] = True
self['is_self_play'] = True
self['mode'] = 0
self['simulation_times'] = 100
self['games_num'] = 3
self['epoch'] = 2
self['show_score'] = True
print('> Debug mode')
def print_current_config(self):
print('------------------')
print('> CURRENT CONFIG:')
for key in self:
print('{}: {}'.format(key, self[key]))
print('------------------')
|
1662202
|
from .o3d import kdtree as o3d_kdtree
from concurrent.futures import ThreadPoolExecutor
from importlib import import_module
import numpy as np
FAISS_INSTALLED = False
try:
faiss = import_module('faiss')
FAISS_INSTALLED = True
except Exception as e:
print(e)
print('Cannot import faiss for GPU nearest neighbout search, use Open3d instead.')
class _NearestNeighbors(object):
def __init__(self, set_k=None, **kwargs):
self.model = None
self.set_k = set_k
def train(self, data):
pass
def search(self, data, k, return_distance=True):
if self.set_k is not None:
assert self.set_k == k, \
'K not match to setting {}'.format(self.set_k)
D, I = None, None
return D, I
class Open3dNN(_NearestNeighbors):
def __init__(self, set_k=None, **kwargs):
super(Open3dNN, self).__init__(set_k, **kwargs)
self.model = None
def train(self, data):
assert data.shape[1] == 3, 'Must be shape [?, 3] for point data'
self.model = o3d_kdtree(data)
def search(self, data, k, return_distance=False):
assert self.model is not None, "Model have not been trained"
if data.shape[0] == 1:
[__, I, _] = self.model.search_knn_vector_3d(data[0], k)
else:
I = np.zeros((data.shape[0], k), dtype=np.int)
with ThreadPoolExecutor(256) as executor:
for i in range(I.shape[0]):
executor.submit(self._search_multiple, (self.model, I, data, k, i,))
return None, I
@staticmethod
def _search_multiple(knn_searcher, I, data, k, i):
[__, I_, _] = knn_searcher.search_knn_vector_3d(data[i, :], k)
I[i, :] = np.asarray(I_)
class FaissNN(_NearestNeighbors):
#GPU KNN Search for large scale
def __init__(self, set_k=None, **kwargs):
super(FaissNN, self).__init__(set_k, **kwargs)
self.IVF_number = 32786
self.GPU_id = None
if isinstance(kwargs, dict):
if 'IVF_number' in kwargs: self.IVF_number = kwargs['IVF_number']
if 'GPU_id' in kwargs: self.GPU_id = kwargs['GPU_id']
self.model = None
self.dimension = None
def train(self, data):
d = data.shape[1]
data = data.astype(np.float32)
self.model = faiss.index_factory(int(d), 'IVF{}_HNSW32,Flat'.format(self.IVF_number)) #_HNSW32
if self.GPU_id is not None and isinstance(self.GPU_id, int):
res = faiss.StandardGpuResources()
self.model = faiss.index_cpu_to_gpu(res, self.GPU_id, self.model)
elif isinstance(self.GPU_id, list):
#os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = ','.join([str(i) for i in self.GPU_id])
self.model = faiss.index_cpu_to_all_gpus(self.model)
else:
self.model = faiss.index_cpu_to_all_gpus(self.model)
self.model.train(data)
self.model.add(data)
self.model.nprobe = d ** 2
def search(self, data, k, return_distance=True):
data = data.astype(np.float32)
assert self.model is not None, "Model have not been trained"
#assert self.model.is_trained, "Model not trained."
D, I = self.model.search(data, k)
if return_distance: D = None
return D, I
if __name__ == "__main__":
import sys
import time
import os
#os.environ['CUDA_VISIBLE_DEVICES'] = '1'
nb = 10**5
nq = 10**5
np.random.seed(1)
datab = np.random.rand(nb, 3).astype('float32')
dataq = np.random.rand(nq, 3).astype('float32')
tic = time.time()
nn = SkNN(set_k=3)
nn.train(datab)
print(time.time() - tic)
tic = time.time()
D, I = nn.search(dataq, 3)
print(time.time() - tic)
|
1662215
|
import common as c
from config import os_name
import shutil
import os
c.print('>> Downloading charsetdetect for {}'.format(os_name))
src_dir = os.path.abspath(__file__ + '/../../../third-party/charsetdetect')
if not os.path.exists(src_dir):
c.run('git clone --depth 1 https://github.com/batterseapower/libcharsetdetect.git ' + src_dir)
else:
c.print('Folder {} already exists'.format(src_dir))
build_dir = os.path.abspath(src_dir + '/../charsetdetect-build')
shutil.rmtree(build_dir, ignore_errors=True)
os.mkdir(build_dir)
os.chdir(build_dir)
arch = ''
if os_name.startswith('win'):
arch = '-A ' + ('Win32' if os_name == 'win32' else 'x64')
c.run('cmake -DCMAKE_POSITION_INDEPENDENT_CODE=ON {} {}'.format(arch, src_dir))
c.run('cmake --build . --config Release')
|
1662222
|
from holidays.models import Holiday
def create_holiday(date, name):
holiday = Holiday.objects.create(
date=date,
name=name
)
return holiday
|
1662229
|
from recon.core.module import BaseModule
import os
import re
class Module(BaseModule):
meta = {
'name': 'Contacts to Domains Data Migrator',
'author': '<NAME> (@LaNMaSteR53)',
'description': 'Adds a new domain for all the hostnames associated with email addresses stored in the \'contacts\' table.',
'comments': (
'This modules considers that everything after the first element could contain other hosts besides the current. Therefore, hosts > 2 domains deep will create domains > 2 elements in length.',
),
'query': 'SELECT DISTINCT email FROM contacts WHERE email IS NOT NULL',
}
def module_run(self, emails):
# extract the host portion of each email address
hosts = [x.split('@')[1] for x in emails]
with open(os.path.join(self.data_path, 'suffixes.txt')) as f:
suffixes = [line.strip().lower() for line in f if len(line)>0 and line[0] is not '#']
domains = self.hosts_to_domains(hosts, suffixes)
for domain in domains:
self.add_domains(domain=domain)
|
1662242
|
import sys
import numpy as np
from abc import ABCMeta, abstractmethod
class OptimizationTestFunction:
__metaclass__ = ABCMeta
"""
General class for Test Functions used for optimization
"""
def __init__(self, mindim=1, maxdim=None, domain=np.array([-1, 1])):
self.mindim = mindim
self.maxdim = maxdim
self.domain = domain
@staticmethod
def function(x):
return np.sum(np.abs(x))
@abstractmethod
def minimum(self, ndim):
pass
def fminimum(self, ndim):
x = self.minimum(ndim)
return self.function(x)
def get_plot_matrices(self, shape=None):
if shape is None:
shape = [200, 200]
if self.domain.ndim == 1:
dx = float(self.domain[1] - self.domain[0]) / (shape[0])
X, Y = np.mgrid[self.domain[0]:self.domain[1]:dx, self.domain[0]:self.domain[1]:dx]
else:
dx = float(self.domain[0, 1] - self.domain[0, 0]) / (shape[0])
dy = float(self.domain[1, 1] - self.domain[1, 0]) / (shape[1])
X, Y = np.mgrid[self.domain[0, 0]:self.domain[0, 1]:dx, self.domain[1, 0]:self.domain[1, 1]:dy]
Z = self.function(np.array([X, Y]))
return X, Y, Z
class Sphere(OptimizationTestFunction):
def __init__(self):
OptimizationTestFunction.__init__(self, mindim=1, maxdim=None, domain=np.array([-5, 5]))
@staticmethod
def function(x):
x = np.array(x)
return np.sum(x.T * x.T, axis=-1).T
def minimum(self, ndim):
return np.zeros(ndim)
class Ackley(OptimizationTestFunction):
def __init__(self):
OptimizationTestFunction.__init__(self, mindim=1, maxdim=None, domain=np.array([-5, 5]))
@staticmethod
def function(x):
n = len(x)
exp1 = np.exp(-0.2 * np.sqrt(1.0 / n * np.sum(x * x)))
exp2 = np.exp(1.0 / n * np.sum((np.cos(2 * np.pi * x)).T, axis=-1).T)
return -20 * exp1 - exp2 + np.e + 20
def minimum(self, ndim):
return np.zeros(ndim)
class Rosenbrock(OptimizationTestFunction):
def __init__(self):
OptimizationTestFunction.__init__(self, mindim=1, maxdim=None, domain=np.array([-5, 5]))
@staticmethod
def function(x):
return np.sum((100.0 * (x[1:] - x[:-1] ** 2.0) ** 2.0 + (1 - x[:-1]) ** 2.0).T, axis=-1).T
def minimum(self, ndim):
return np.ones(ndim)
class Beale(OptimizationTestFunction):
def __init__(self):
OptimizationTestFunction.__init__(self, mindim=2, maxdim=2, domain=np.array([-4.5, 4.5]))
@staticmethod
def function(x):
return (1.5 - x[0] + x[0] * x[1]) ** 2 + (2.25 - x[0] + x[0] * x[1] * x[1]) ** 2 + (2.625 - x[0] + x[0] * x[1] *
x[1] * x[1]) ** 2
def minimum(self, ndim):
assert ndim == 2
return np.array([3.0, 0.5])
class GoldsteinPrice(OptimizationTestFunction):
def __init__(self):
OptimizationTestFunction.__init__(self, mindim=2, maxdim=2, domain=np.array([-2, 2]))
@staticmethod
def function(x):
factor1 = (19 - 14 * x[0] + 3 * x[0] ** 2 - 14 * x[1] + 6 * x[0] * x[1] + 3 * x[1] ** 2)
factor2 = (18 - 32 * x[0] + 12 * x[0] ** 2 + 48 * x[1] - 36 * x[0] * x[1] + 27 * x[1] ** 2)
return (1 + ((x[0] + x[1] + 1) ** 2) * factor1) * (30 + ((2 * x[0] - 3 * x[1]) ** 2) * factor2)
def minimum(self, ndim):
assert ndim == 2
return np.array([0.0, -1.0])
class Booth(OptimizationTestFunction):
def __init__(self):
OptimizationTestFunction.__init__(self, mindim=2, maxdim=2, domain=np.array([-10, 10]))
@staticmethod
def function(x):
return (x[0] + 2 * x[1] - 7) ** 2 + (2 * x[0] + x[1] - 5) ** 2
def minimum(self, ndim):
assert ndim == 2
return np.array([1.0, -3.0])
class BukinN6(OptimizationTestFunction):
def __init__(self):
OptimizationTestFunction.__init__(self, mindim=2, maxdim=2, domain=np.array([[-15, 15], [-3, 3]]))
@staticmethod
def function(x):
return 100 * np.sqrt(np.abs(x[1] - 0.01 * x[0] ** 2)) + 0.01 * np.abs(x[0] + 10)
def minimum(self, ndim):
assert ndim == 2
return np.array([10.0, 1.0])
class Matyas(OptimizationTestFunction):
def __init__(self):
OptimizationTestFunction.__init__(self, mindim=2, maxdim=2, domain=np.array([-10, 10]))
@staticmethod
def function(x):
return 0.26 * (x[0] ** 2 + x[1] ** 2) - 0.48 * x[0] * x[1]
def minimum(self, ndim):
assert ndim == 2
return np.array([1.0, 1.0])
class LeviN13(OptimizationTestFunction):
def __init__(self):
OptimizationTestFunction.__init__(self, mindim=2, maxdim=2, domain=np.array([-10, 10]))
@staticmethod
def function(x):
term1 = (np.sin(3 * np.pi * x[0])) ** 3
term2 = (x[0] - 1) ** 2 * (1 + (np.sin(3 * np.pi * x[1])) ** 3)
term3 = (x[1] - 1) ** 2 * (1 + (np.sin(2 * np.pi * x[1])) ** 2)
return term1 + term2 + term3
def minimum(self, ndim):
assert ndim == 2
return np.array([1.0, 1.0])
class ThreeHump(OptimizationTestFunction):
def __init__(self):
OptimizationTestFunction.__init__(self, mindim=2, maxdim=2, domain=np.array([-10, 10]))
@staticmethod
def function(x):
return 2 * x[1] ** 2 - 1.05 * x[0] ** 4 + 1.0 / 6.0 * x[0] ** 6 + x[0] * x[1] + x[1] ** 2
def minimum(self, ndim):
assert ndim == 2
return np.array([0.0, 0.0])
class Easom(OptimizationTestFunction):
def __init__(self):
OptimizationTestFunction.__init__(self, mindim=2, maxdim=2, domain=np.array([-100, 100]))
@staticmethod
def function(x):
return -np.cos(x[0]) * np.cos(x[1]) * np.exp(-1 * ((x[0] - np.pi) ** 2 + (x[1] - np.pi) ** 2))
def minimum(self, ndim):
assert ndim == 2
return np.array([np.pi, np.pi])
class CrossInTray(OptimizationTestFunction):
def __init__(self):
OptimizationTestFunction.__init__(self, mindim=2, maxdim=2, domain=np.array([-10, 10]))
@staticmethod
def function(x):
factor1 = np.exp(np.abs(100 - np.sqrt(x[0] ** 2 + x[1] ** 2) / np.pi))
return -1E-4 * (np.abs(np.sin(x[0]) * np.sin(x[1]) * factor1) + 1) ** 0.1
def minimum(self, ndim):
assert ndim == 2
return np.array([1.34941, 1.34941])
class Eggholder(OptimizationTestFunction):
def __init__(self):
OptimizationTestFunction.__init__(self, mindim=2, maxdim=2, domain=np.array([-512, 512]))
@staticmethod
def function(x):
return -1.0 * (x[1] + 47) * np.sin(np.sqrt(np.abs(x[1] + x[0] / 2.0 + 47))) - x[0] * np.sin(
np.sqrt(np.abs(x[0] - x[1] - 47)))
def minimum(self, ndim):
assert ndim == 2
return np.array([512, 404.2319])
class HolderTable(OptimizationTestFunction):
def __init__(self):
OptimizationTestFunction.__init__(self, mindim=2, maxdim=2, domain=np.array([-10, 10]))
@staticmethod
def function(x):
return -1.0 * np.abs(np.sin(x[0]) * np.cos(x[1]) * np.exp(np.abs(1 - np.sqrt(x[0] ** 2 + x[1] ** 2) / np.pi)))
def minimum(self, ndim):
assert ndim == 2
return np.array([8.05502, 9.664559])
class McCormick(OptimizationTestFunction):
def __init__(self):
OptimizationTestFunction.__init__(self, mindim=2, maxdim=2, domain=np.array([[-1.5, 4], [-3, 4]]))
@staticmethod
def function(x):
return np.sin(x[0] + x[1]) + (x[0] - x[1]) ** 2 - 1.5 * x[0] + 2.5 * x[1] + 1
def minimum(self, ndim):
assert ndim == 2
return np.array([8.05502, 9.66459])
class SchafferN2(OptimizationTestFunction):
def __init__(self):
OptimizationTestFunction.__init__(self, mindim=2, maxdim=2, domain=np.array([-100, 100]))
@staticmethod
def function(x):
return 0.5 + ((np.sin(x[0] ** 2 - x[1] ** 2)) ** 2 - 0.5) / (1 + 1E-3 * (x[0] ** 2 + x[1] ** 2)) ** 2
def minimum(self, ndim):
assert ndim == 2
return np.zeros(2)
class SchafferN4(OptimizationTestFunction):
def __init__(self):
OptimizationTestFunction.__init__(self, mindim=2, maxdim=2, domain=np.array([-100, 100]))
@staticmethod
def function(x):
return 0.5 + ((np.cos(np.sin(np.abs(x[0] ** 2 - x[1] ** 2)))) ** 2 - 0.5) / (1 + 1E-3 * (
x[0] ** 2 + x[1] ** 2)) ** 2
def minimum(self, ndim):
assert ndim == 2
return np.array([0, 1.25313])
class StyblinskiTang(OptimizationTestFunction):
def __init__(self):
OptimizationTestFunction.__init__(self, mindim=1, maxdim=None, domain=np.array([-5, 5]))
@staticmethod
def function(x):
return np.sum((x ** 4 - 16 * x ** 2 + 5 * x).T, axis=-1).T / 2.0
def minimum(self, ndim):
return -2.903534 * np.ones(ndim)
# class Simionescu(OptimizationTestFunction):
#
# def __init__(self):
# OptimizationTestFunction.__init__(self, mindim=2, maxdim=2, domain=np.array([-1.25, 1.25]))
#
# @staticmethod
# def function(x):
# rt = 1
# rs = 0.2
# n = 8
# return np.piecewise(x,
# [x[0]**2 + x[1]**2 <= (rt + rs*np.cos(n*np.arctan(x[0]/x[1])))**2,
# x[0]**2 + x[1]**2 > (rt + rs*np.cos(n*np.arctan(x[0]/x[1])))**2], [0.1*x[0]*x[1], 1])
#
#
# def minimum(self, ndim):
# assert ndim == 2
# return -0.84852813*np.ones(ndim)
def all_tests_functions():
current_module = sys.modules[__name__]
f = current_module.__dict__
return [f[x]() for x in f if hasattr(f[x], '__base__') and f[x].__base__ == OptimizationTestFunction]
|
1662249
|
import glob
import pandas as pd
import numpy as np
import os
filenames = glob.glob("*.csv")
filenames = [filename for filename in filenames if os.path.getsize(filename) > 10000]
#filenames = ["CreditRequirement.csv"]
timestamp_col = "Complete Timestamp" # column that indicates completion timestamp
case_id_col = "Case ID"
activity_col = "Activity"
def add_all_columns(group):
group = group.sort_values(timestamp_col, ascending=True, kind="mergesort")
group["event_nr"] = range(1,group.shape[0]+1)
group["unique_events"] = group[activity_col].nunique()
group["total_events"] = len(group[activity_col])
end_date = group[timestamp_col].iloc[-1]
tmp = end_date - group[timestamp_col]
tmp = tmp.fillna(0)
start_date = group[timestamp_col].iloc[0]
elapsed = group[timestamp_col] - start_date
elapsed = elapsed.fillna(0)
group["elapsed"] = elapsed.apply(lambda x: float(x / np.timedelta64(1, 'D')))
group["remtime"] = tmp.apply(lambda x: float(x / np.timedelta64(1, 'D'))) # D is for days
#group["case_length"] = group.shape[0]
return group
with open("log_summary.tsv", 'w') as fout:
fout.write("%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s\n" % (
"log", "total_cases", "unique_activities", "total_events","avg_unique_events_per_trace", "mean_case_length",
"std_case_length", "mean_case_duration","std_case_duration","mean_remtime","std_remtime"))
for filename in filenames:
print(filename)
# dtypes = {col:"str" for col in ["proctime", "elapsed", "label", "last"]} # prevent type coercion
data = pd.read_csv(filename, sep=";")
data[timestamp_col] = pd.to_datetime(data[timestamp_col])
data = data.groupby(case_id_col).apply(add_all_columns)
df0 = data.loc[data["event_nr"] == 1].copy()
df0["UER"] = df0["unique_events"] / df0["total_events"]
#print("Avg percentage of unique timestamps per trace: %.3f" %np.mean(df0["UTR"]))
#print("%s out of %s unique timestamps" %(len(data[timestamp_col].unique()),data[timestamp_col].count()))
global_unique_timestamps = len(data[timestamp_col].unique()) / data[timestamp_col].count()
#print("%s cases that reach length %d" %(df.shape[0],cutoff))
#print("In %s of them elapsed time is still 0" %len(df.loc[df["elapsed"]==0]))
#print("%s cases that reach length %d" %(df.shape[0],cutoff))
fout.write("%s, %s, %s, %s, %.3f, %.3f, %.3f, %.3f, %.3f, %.3f, %.3f\n"%(filename,
data[case_id_col].nunique(),
data[activity_col].nunique(),
data.shape[0],
np.mean(df0["UER"]),
np.mean(df0["total_events"]),
np.std(df0["total_events"]),
np.mean(df0["remtime"]),
np.std(df0["remtime"]),
np.mean(data["remtime"]),
np.std(data["remtime"])))
|
1662251
|
import pyarrow as pa
### When dtype -> arrow ambiguious, override
KNOWN_FIELDS = [
[0, 'contributors', pa.string()],
[1, 'coordinates', pa.string()],
[2, 'created_at', pa.string()],
#[3, 'display_text_range', pa.list_(pa.int64())],
[3, 'display_text_range', pa.string()],
[4, 'entities', pa.string()],
[5, 'extended_entities', pa.string()], #extended_entities_t ],
[7, 'favorited', pa.bool_()],
[8, 'favorite_count', pa.int64()],
[9, 'full_text', pa.string()],
[10, 'geo', pa.string()],
[11, 'id', pa.int64() ],
[12, 'id_str', pa.string() ],
[13, 'in_reply_to_screen_name', pa.string() ],
[14, 'in_reply_to_status_id', pa.int64() ],
[15, 'in_reply_to_status_id_str', pa.string() ],
[16, 'in_reply_to_user_id', pa.int64() ],
[17, 'in_reply_to_user_id_str', pa.string() ],
[18, 'is_quote_status', pa.bool_() ],
[19, 'lang', pa.string() ],
[20, 'place', pa.string()],
[21, 'possibly_sensitive', pa.bool_()],
[22, 'quoted_status', pa.string()],
[23, 'quoted_status_id', pa.int64()],
[24, 'quoted_status_id_str', pa.string()],
[25, 'quoted_status_permalink', pa.string()],
[26, 'retweet_count', pa.int64()],
[27, 'retweeted', pa.bool_()],
[28, 'retweeted_status', pa.string()],
[29, 'scopes', pa.string()],
[30, 'source', pa.string()],
[31, 'truncated', pa.bool_()],
[32, 'user', pa.string()],
#[33, 'withheld_in_countries', pa.list_(pa.string())],
[33, 'withheld_in_countries', pa.string()],
#[34, 'followers', pa.struct({'followers': pa.bool_()})]
[34, 'followers', pa.string()]
]
|
1662274
|
import sys
import os
from multiprocessing import forking, process, freeze_support
from multiprocessing.util import _logger, _log_to_stderr
WINEXE = forking.WINEXE
def get_preparation_data(name):
'''
Return info about parent needed by child to unpickle process object.
Monkey-patch from
'''
d = dict(
name=name,
sys_path=sys.path,
sys_argv=sys.argv,
log_to_stderr=_log_to_stderr,
orig_dir=process.ORIGINAL_DIR,
authkey=process.current_process().authkey,
)
if _logger is not None:
d['log_level'] = _logger.getEffectiveLevel()
if not WINEXE:
main_path = getattr(sys.modules['__main__'], '__file__', None)
if not main_path and sys.argv[0] not in ('', '-c'):
main_path = sys.argv[0]
if main_path is not None:
if (not os.path.isabs(main_path) and process.ORIGINAL_DIR
is not None):
main_path = os.path.join(process.ORIGINAL_DIR, main_path)
if not main_path.endswith('.exe'):
d['main_path'] = os.path.normpath(main_path)
return d
forking.get_preparation_data = get_preparation_data
freeze_support()
|
1662292
|
from pydantic import BaseModel
from typing import Optional
class ShopConfig(BaseModel):
shop_cpu: bool
shop_cpu_price: Optional[int] = None
shop_cpu_min: Optional[int] = None
shop_ram: bool
shop_ram_price: Optional[int] = None
shop_ram_min: Optional[int] = None
shop_hdd: bool
shop_hdd_price: Optional[int] = None
shop_hdd_min: Optional[int] = None
shop_backups: bool
shop_backups_price: Optional[int] = None
shop_port: bool
shop_port_price: Optional[int] = None
shop_database: bool
shop_database_price: Optional[int] = None
shop_slots: bool
shop_slots_price: Optional[int] = None
promo_code: bool
class ServerConfig(BaseModel):
server_creation: bool
server_deletion: bool
server_editing: bool
class UserConfig(BaseModel):
user_creation: bool
|
1662298
|
import re
from setuptools import setup, find_packages
install_requires = [
"boto3>=1.14.19,<1.15.0",
"click==7.0",
"pyaml==16.12.2",
"pytz",
]
tests_requires = [
"coverage[toml]==5.0.3",
"flake8==3.7.8",
"isort==5.0.6",
"moto==1.3.14",
"pytest==5.4.3",
"pytest-cov==2.10.0",
]
with open("README.rst") as fh:
long_description = re.sub(
"^.. start-no-pypi.*^.. end-no-pypi", "", fh.read(), flags=re.M | re.S
)
setup(
name="ecs-deplojo",
version="0.9.2",
author="<NAME>.",
author_email="<EMAIL>",
url="https://www.github.com/labd/ecs-deplojo/",
description="Deployment tool for Amazon ECS",
long_description=long_description,
zip_safe=False,
install_requires=install_requires,
tests_require=tests_requires,
extras_require={"test": tests_requires},
package_dir={"": "src"},
packages=find_packages("src"),
include_package_data=True,
entry_points={"console_scripts": {"ecs-deplojo = ecs_deplojo.cli:main"}},
license="MIT",
classifiers=[
"Development Status :: 5 - Production/Stable",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
],
)
|
1662351
|
import numpy as np
import tensorflow as tf
from external.bleu import *
from external.rouge import *
from external.squad import *
__all__ = ["evaluate_from_data", "evaluate_from_file"]
def _bleu(pred_data, ref_data):
"""BLEU score for translation task"""
max_order = 4
smooth = False
score, _, _, _, _, _ = compute_bleu(ref_data, pred_data, max_order, smooth)
bleu_score = 100 * score
return bleu_score
def _rouge(pred_data, ref_data):
"""ROUGE score for summarization task"""
score_map = rouge(pred_data, ref_data)
rouge_score = 100 * score_map["rouge_l/f_score"]
return rouge_score
def _squad_em(pred_data, ref_data):
"""EM score for reading comprehension task"""
em_score = eval_exact_match_score(pred_data, ref_data)
return em_score
def _squad_f1(pred_data, ref_data):
"""F1 score for reading comprehension task"""
f1_score = eval_f1_score(pred_data, ref_data)
return f1_score
def evaluate_from_data(pred_data, ref_data, metric):
"""compute evaluation score based on selected metric"""
pred_and_ref = [(pred, ref_list) for pred, ref_list in zip(pred_data, ref_data) if pred and ref_list]
pred_data = [pred for (pred, _) in pred_and_ref]
ref_data = [ref_list for (_, ref_list) in pred_and_ref]
if len(pred_data) == 0 or len(ref_data) == 0:
return 0.0
if metric == "bleu":
eval_score = _bleu(pred_data, ref_data)
elif metric == "rouge":
eval_score = _rouge(pred_data, ref_data)
elif metric == "exact":
eval_score = _squad_em(pred_data, ref_data)
elif metric == "f1":
eval_score = _squad_f1(pred_data, ref_data)
else:
raise ValueError("unsupported metric {0}".format(metric))
return eval_score
def evaluate_from_file(pred_file, ref_file, metric):
predict = []
with codecs.getreader("utf-8")(tf.gfile.GFile(pred_file, "rb")) as file_p:
for line in file_p:
predict.append(line.strip())
reference = []
with codecs.getreader("utf-8")(tf.gfile.GFile(ref_file, "rb")) as file_r:
for line in file_r:
reference.append(line.strip())
eval_score = evaluate(predict, reference, metric)
return eval_score
|
1662399
|
import numpy as np
from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit.Chem import Lipinski
from rdkit.Chem import Descriptors
from rdkit.DataStructs import FingerprintSimilarity, ConvertToNumpyArray
def clean_mol(smiles):
"""
Construct a molecule from a SMILES string, removing stereochemistry and
explicit hydrogens, and setting aromaticity.
"""
mol = Chem.MolFromSmiles(str(smiles), sanitize=64)
if mol is None:
raise ValueError("Invalid SMILES")
Chem.RemoveStereochemistry(mol)
Chem.SanitizeMol(mol)
mol = Chem.RemoveHs(mol)
return mol
def clean_mols(all_smiles):
"""
Construct a list of molecules from a list of SMILES strings, replacing
invalid molecules with None in the list.
"""
mols = []
for smiles in all_smiles:
try:
mol = clean_mol(smiles)
mols.append(mol)
except ValueError:
mols.append(None)
return mols
def in_Ro5(mol):
"""
Test whether a molecule is in Lipinski "Rule of 5" space, meaning
- 5 or fewer H bond donors
- 10 or fewer H bond acceptors
- MW < 500 Da
- logP < 5
"""
h_donor = Lipinski.NumHDonors(mol)
h_accept = Lipinski.NumHAcceptors(mol)
mw = Descriptors.MolWt(mol)
logP = Descriptors.MolLogP(mol)
Ro5 = h_donor <= 5 and h_accept <= 10 and mw <= 500 and logP < 5
return(Ro5)
def get_ecfp6_fingerprints(mols):
"""
Get ECFP6 fingerprints for a list of molecules which may include `None`s,
gracefully handling `None` values by returning a `None` value in that
position.
"""
fps = []
for mol in mols:
if mol is None:
fps.append(None)
else:
fp = AllChem.GetMorganFingerprintAsBitVect(mol, 3, nBits=1024)
fps.append(fp)
return(fps)
def get_bit_vector(fp):
arr = np.zeros((1,))
ConvertToNumpyArray(fp, arr)
return(arr)
def get_tanimoto(list1, list2):
tcs = []
for fp1 in list1:
for fp2 in list2:
if fp1 is None or fp2 is None:
tcs.append(None)
else:
tc = FingerprintSimilarity(fp1, fp2)
tcs.append(tc)
return(tcs)
|
1662400
|
from typing import Any
from allauth.account.adapter import DefaultAccountAdapter
from allauth.socialaccount.adapter import DefaultSocialAccountAdapter
from django.conf import settings
from django.http import HttpRequest
class AccountAdapter(DefaultAccountAdapter):
def is_open_for_signup(self, request: HttpRequest):
return getattr(settings, "ACCOUNT_ALLOW_REGISTRATION", True)
class SocialAccountAdapter(DefaultSocialAccountAdapter):
def is_open_for_signup(self, request: HttpRequest, sociallogin: Any):
return getattr(settings, "ACCOUNT_ALLOW_REGISTRATION", True)
def save_user(self, request, user, form):
"""
This is called when saving user via allauth registration.
We override this to set additional data on user object.
"""
# Do not persist the user yet so we pass commit=False
# (last argument)
user = super(SocialAccountAdapter, self).save_user(request, user, form)
user.name = form.cleaned_data.get('name')
user.agree = form.cleaned_data.get('agree')
user.save()
|
1662434
|
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
import tensorflow as tf
from dltk.core.modules.base import AbstractModule
class Linear(AbstractModule):
"""Linear layer module
This module builds a linear layer
"""
def __init__(self, out_units, use_bias=True, name='linear'):
"""Constructs linear layer
Parameters
----------
out_units : int
number of output units
use_bias : bool, optional
flag to toggle the addition of a bias
name : string
name of the module
"""
self.out_units = out_units
self.in_units = None
self.use_bias = use_bias
super(Linear, self).__init__(name=name)
def _build(self, inp):
"""Applies the linear layer operation to an input tensor
Parameters
----------
inp : tf.Tensor
input tensor
Returns
-------
tf.Tensor
transformed tensor
"""
assert len(inp.get_shape().as_list()) == 2, 'Layer needs 2D input.'
self.in_shape = tuple(inp.get_shape().as_list())
if self.in_units is None:
self.in_units = self.in_shape[-1]
assert self.in_units == self.in_shape[-1], 'Layer was initialised for a different number of input units.'
w_shape = (self.in_units, self.out_units)
self._w = tf.get_variable("w", shape=w_shape, initializer=tf.uniform_unit_scaling_initializer(),
collections=self.WEIGHT_COLLECTIONS)
self.variables.append(self._w)
if self.use_bias:
self._b = tf.get_variable("b", shape=(self.out_units,), initializer=tf.constant_initializer(),
collections=self.BIAS_COLLECTIONS)
self.variables.append(self._b)
outp = tf.nn.xw_plus_b(inp, self._w, self._b, 'linear')
else:
outp = tf.matmul(inp, self._w, 'linear')
return outp
|
1662459
|
from ._base import BaseGaussianMixture
from ..weight_models import EqualWeighting
class EqualWeightedMixture(BaseGaussianMixture):
def __init__(self, *, n=1, rng=None, **kwargs):
weight_model = EqualWeighting(n=n, rng=rng)
self.n = n
super().__init__(weight_model=weight_model, rng=rng, **kwargs)
|
1662484
|
from mock import call
from raptiformica.actions.modules import remove_keys
from tests.testcase import TestCase
class TestRemoveKeys(TestCase):
def setUp(self):
self.log = self.set_up_patch(
'raptiformica.actions.modules.log'
)
self.mapping = {
'some_key': 'some_value',
'some_other/key': 'some_other_value'
}
self.try_delete_config = self.set_up_patch(
'raptiformica.actions.modules.try_delete_config'
)
self.rmtree = self.set_up_patch(
'raptiformica.actions.modules.rmtree'
)
def test_remove_keys_logs_debug_messages(self):
remove_keys(self.mapping, '~/.raptiformica/modules/puppetfiles')
self.assertEqual(2, self.log.debug.call_count)
def test_remove_keys_deletes_all_keys_from_mapping(self):
remove_keys(self.mapping, '~/.raptiformica/modules/puppetfiles')
expected_calls = map(call, self.mapping.keys())
self.assertCountEqual(
self.try_delete_config.mock_calls, expected_calls
)
def test_remove_keys_removes_module_directory(self):
remove_keys(self.mapping, '~/.raptiformica/modules/puppetfiles')
self.rmtree.assert_called_once_with(
'~/.raptiformica/modules/puppetfiles',
ignore_errors=True
)
|
1662565
|
from dataset.datasets import MXFaceDataset, SyntheticDataset
from dataset.randaugment import RandAugment
from dataset.utils import *
|
1662569
|
from .bound_general import BoundedModule, BoundDataParallel
from .bounded_tensor import BoundedTensor, BoundedParameter
from .perturbations import PerturbationLpNorm, PerturbationSynonym
from .wrapper import CrossEntropyWrapper, CrossEntropyWrapperMultiInput
__version__ = '0.2'
|
1662585
|
import pyctrl.bbb as pyctrl
class Controller(pyctrl.Controller):
def __init__(self, *vargs, **kwargs):
# Initialize controller
super().__init__(*vargs, **kwargs)
def __reset(self):
# call super
super().__reset()
# add source: encoder1
self.add_device('encoder1',
'pyctrl.bbb.encoder', 'Encoder',
type = 'source',
outputs = ['encoder1'],
encoder = 1,
ratio = - 60 * 35.557)
# add source: encoder2
self.add_device('encoder2',
'pyctrl.bbb.encoder', 'Encoder',
type = 'source',
outputs = ['encoder2'],
encoder = 2,
ratio = 60 * 35.557)
# add source: imu
# self.add_device('mpu6050',
# 'pyctrl.bbb.mpu6050', 'Inclinometer',
# type = 'source',
# enable = True,
# outputs = ['imu'])
# add source: mic1
self.add_device('mic1',
'pyctrl.bbb.analog', 'Analog',
type = 'source',
pin = 'AIN0',
outputs = ['mic1'])
# add source: mic2
self.add_device('mic2',
'pyctrl.bbb.analog', 'Analog',
type = 'source',
pin = 'AIN1',
outputs = ['mic2'])
# add source: prox1
self.add_device('prox1',
'pyctrl.bbb.analog', 'Analog',
type = 'source',
pin = 'AIN2',
outputs = ['prox1'])
# add source: prox2
self.add_device('prox2',
'pyctrl.bbb.analog', 'Analog',
type = 'source',
pin = 'AIN3',
outputs = ['prox2'])
# add sink: motor1
self.add_device('motor1',
'pyctrl.bbb.motor', 'Motor',
type = 'sink',
enable = True,
inputs = ['motor1'],
pwm_pin = 'P9_14',
dir_A = 'P9_15',
dir_B = 'P9_23')
# add sink: motor2
self.add_device('motor2',
'pyctrl.bbb.motor', 'Motor',
type = 'sink',
enable = True,
inputs = ['motor2'],
pwm_pin='P9_16',
dir_B='P9_12',
dir_A='P9_27')
if __name__ == "__main__":
import time, math
import pyctrl.block as block
from pyctrl.block.linear import Feedback, Gain
# initialize robut
robut = Controller()
print("> WELCOME TO ROBUT")
print(robut.info('all'))
# install printer
robut.add_sink('printer',
block.Printer(endln = '\r'),
['clock',
'motor1', 'encoder1',
'motor2', 'encoder2',
#'imu',
'mic1','mic2',
'prox1','prox2'])
# install controller
robut.add_signal('reference1')
robut.add_filter('controller',
Feedback(block = Gain(gain = 1)),
['prox2', 'reference1'],
['motor1'])
with robut:
for k in range(100):
mic1 = robut.get_signal('mic1')
print('> mic1 = {}'.format(mic1))
time.sleep(1)
print("> BYE")
|
1662600
|
import re
import sqlite3
import time
#########################################################################
# Base class for generating a catebot response. This is intended to be a parent to classes that
# implement each type of response with overrides specific to them. The classes that are expected to be
# overridden are:
#
# getResponse(self, requests)
# getContextLink(self, key, httpLocation)
# getOverflowComment(self, keys)
# linkCrossReferences(self, response)
# parsedRequests(self, requests, includeRanges = True)
#
# The initializer is called with a dictionary, a base URL for links, and a Configuration object.
#
# NOTE: The base class is implemented with the methods that are expected to be overriden. In addition to serving
# as an example of how the overrides should be written, they also implement the behavior expected when quoting
# the CCC.
#
#########################################################################
class Response:
# Set up the Catechism and initialize variables
def __init__(self, dictionary, baseURL, configuration):
self._dictionary = dictionary
self._baseURL = baseURL
self._configuration = configuration
# Just returns the current character limit for the reddit comment. Makes it easy to find/change in the future.
# NOTE: reddit's character limit is 10,000 characters by default.
def getCharLimit(self):
return 9500
# Simply returns the comment footer found at the bottom of every comment posted by the bot.
def getCommentFooter(self):
return ('\n***\nCatebot ' + self._configuration.version + ' links: [Source Code](https://github.com/konohitowa/catebot)'
+ ' | [Feedback](https://github.com/konohitowa/catebot/issues)'
+ ' | [Contact Dev](http://www.reddit.com/message/compose/?to=kono_hito_wa)'
+ ' | [FAQ](https://github.com/konohitowa/catebot/blob/master/docs/CateBot%20Info.md#faq)'
+ ' | [Changelog](https://github.com/konohitowa/catebot/blob/master/docs/CHANGELOG.md)')
def getOverflowHeader(self, singular, plural, number):
noun = singular
if number > 1:
noun = plural
return 'The contents of the ' + noun + ' you quoted exceed the character limit ([' + str(self.getCharLimit()) + '](https://github.com/konohitowa/catebot/blob/master/docs/CateBot%20Info.md#wait-ive-counted-the-characters-and-i-didnt-hit-the-limit) characters). Instead, here are links to the ' + noun + '...\n\n'
def parsedRequests(self, requests, includeRanges = True):
validRequests = list()
for request in requests:
request = re.sub(r"\s+","",request)
if ',' in request:
sublist = request.split(',')
else:
sublist = [ request ]
for subrequest in sublist:
if '-' in subrequest:
startingRequest = subrequest.partition('-')[0]
if includeRanges:
endingRequest = subrequest.partition('-')[2]
if int(startingRequest) < int(endingRequest)+1:
for key in range(int(startingRequest), int(endingRequest)+1):
if str(key) in self._dictionary:
validRequests.append(str(key))
else:
validRequests.append(startingRequest)
elif subrequest in self._dictionary:
validRequests.append(subrequest)
return validRequests
# Constructs reddit comment response for Catechism requests.
def getResponse(self, requests):
validRequests = self.parsedRequests(requests)
if len(validRequests) > 0:
comment = ''
for request in validRequests:
content,location = self._dictionary[request]
comment += ('[**CCC ' + request + '**](' + self.getContextLink(request, location) + ') ' + content) + '\n\n'
comment = self.linkCrossReferences(comment)
if len(comment) > self.getCharLimit():
comment = self.getOverflowComment(validRequests)
comment += self.getCommentFooter()
return True,comment
else:
return False,""
# Takes the request key and http location as parameters. The function then constructs
# the appropriate context link. This link appears on each paragraph number.
def getContextLink(self, request, location):
return 'http://www.scborromeo.org/ccc/para/' + request + '.htm'
# Constructs and returns an overflow comment whenever the comment exceeds the character limit set by
# getCharLimit(). Instead of posting the contents of the request(s) in the comment, it links to webpages
# that contain the contents of the request(s).
def getOverflowComment(self, requests):
numberOfRequests = 0
comment = ''
for request in requests:
content,location = self._dictionary[request]
numberOfRequests += 1
comment += ('([' + request + '](' + self.getContextLink(request,location) + '))\n')
if len(comment) > self.getCharLimit():
comment += "\n\nAnd even when condensing the paragraphs to links, you still exceeded the quota..."
break
return self.getOverflowHeader('paragraph','paragraphs',numberOfRequests) + comment
def linkCrossReferences(self, comment):
xrefBlocks = reversed(list(re.finditer(r'\([\d\,\s\-]+\)$(?m)',comment)))
for xrefBlock in xrefBlocks:
xrefs = reversed(list(re.finditer(r'\d+',xrefBlock.group(0))))
for xref in xrefs:
paragraph = xref.group(0)
content,location = self._dictionary[paragraph]
start = xrefBlock.start()+xref.start()
end = xrefBlock.start()+xref.end()
comment = comment[:start]+"["+paragraph+"]("+self.getContextLink(paragraph, location)+")"+comment[end:]
return comment
#########################################################################
#
# Constructs reddit comment response for Balitmore Catechism requests of
# the form [bccd 1], [bccd 1-5], [bccd 1-5,9-10], and the same forms with
# a BCCD book #, such as [bccd #1 1-5, 10-12]. The default book is #2.
#
#########################################################################
class BaltimoreResponse(Response):
def parsedRequests(self, requests, includeRanges = True):
validRequests = list()
for taggedRequest in requests:
bookNumber = '2'
bookRequest, request = taggedRequest
bookRequest = re.sub(r"\s+","",bookRequest)
request = re.sub(r"\s+","",request)
bookMatch = re.match(r'#(\d+)', bookRequest)
if bookMatch:
bookNumber = bookMatch.group(1)
if int(bookNumber) < 1 or int(bookNumber) > 4:
bookNumber = '2'
if ',' in request:
sublist = request.split(',')
else:
sublist = [ request ]
for subrequest in sublist:
if '-' in subrequest:
startingRequest = subrequest.partition('-')[0]
if includeRanges:
endingRequest = subrequest.partition('-')[2]
if int(startingRequest) < int(endingRequest)+1:
for key in range(int(startingRequest), int(endingRequest)+1):
if str(key) in self._dictionary[bookNumber]:
validRequests.append({'Book': bookNumber, 'Request': str(key)})
elif startingRequest in self._dictionary[bookNumber]:
validRequests.append({'Book': bookNumber, 'Request': startingRequest})
elif subrequest in self._dictionary[bookNumber]:
validRequests.append({'Book': bookNumber, 'Request': subrequest})
return validRequests
def getResponse(self, requests):
validRequests = self.parsedRequests(requests)
if len(validRequests) > 0:
comment = ''
for request in validRequests:
bookNumber = request['Book']
requestNumber = request['Request']
qa = self._dictionary[bookNumber][requestNumber]
comment += ('[**BCCD #' + bookNumber + " Q." + requestNumber + '**](' + self.getContextLink(bookNumber, requestNumber, qa['Q']) + ') ' + qa['Q'] + '\n\nA. ' + qa['A']) + '\n\n'
comment = self.linkCrossReferences(comment)
if len(comment) > self.getCharLimit():
comment = self.getOverflowComment(validRequests)
comment += self.getCommentFooter()
return True,comment
else:
return False,""
# This needs to be updated when an actual linkable source is available
#q.2_who_is_god.3F #self._baseURL
def getContextLink(self, bookNumber, questionNumber, questionText):
modifiedQuestionText = re.sub(r'\s','_',questionText).lower()
modifiedQuestionText = re.sub(r',','.2C',modifiedQuestionText)
modifiedQuestionText = re.sub(r'\?','.3F',modifiedQuestionText)
partitionText = ""
if int(bookNumber) == 4:
partitionText = "_"
if int(questionNumber) < 211:
partitionText += "1"
else:
partitionText += "2"
return 'https://www.reddit.com/r/Catebot/wiki/bccd_' + bookNumber + partitionText + '#wiki_q.' + questionNumber + '_' + modifiedQuestionText
def getOverflowComment(self, requests):
numberOfRequests = 0
comment = ''
for request in requests:
numberOfRequests += 1
bookNumber = request['Book']
requestNumber = request['Request']
qa = self._dictionary[bookNumber][requestNumber]
comment += ('([' + requestNumber + '](' + self.getContextLink(bookNumber, requestNumber, qa['Q']) + '))\n')
if len(comment) > self.getCharLimit():
comment += "\n\nAnd even when condensing the requested questions to links, you still exceeded the quota..."
break
return self.getOverflowHeader('question','questions',numberOfRequests) + comment
# This needs to be filled out for the {} references in #3
def linkCrossReferences(self,comment):
return comment
xrefBlocks = reversed(list(re.finditer(r'cann*\.\s+\d+$(?m)',comment)))
for xrefBlock in xrefBlocks:
xrefs = reversed(list(re.finditer(r'\d+',xrefBlock.group(0))))
for xref in xrefs:
paragraph = xref.group(0)
content,location = self._Catechism[paragraph]
contextLink = self.__getCanonContextLink(paragraph, location)
start = xrefBlock.start()+xref.start()
end = xrefBlock.start()+xref.end()
comment = comment[:start]+"["+paragraph+"]("+contextLink+")"+comment[end:]
return comment
#########################################################################
#
# Constructs reddit comment response for Canon requests of form [can 12],
# [can 12s1], [can 12-14], [can 12,15-17].
#
#########################################################################
class CanonResponse(Response):
def parsedRequests(self, requests, includeRanges = True):
validRequests = list()
for request in requests:
request = re.sub(r"\s+","",request)
if ',' in request:
sublist = request.split(',')
else:
sublist = [ request ]
for subrequest in sublist:
if '-' in subrequest:
startingRequest = subrequest.partition('-')[0].partition('s')[0]
if includeRanges:
endingRequest = subrequest.partition('-')[2].partition('s')[0]
if int(startingRequest) < int(endingRequest)+1:
for key in range(int(startingRequest), int(endingRequest)+1):
if str(key) in self._dictionary:
validRequests.append(str(key))
elif startingRequest in self._dictionary:
validRequests.append(startingRequest)
else:
key = subrequest.partition('s')[0]
if key in self._dictionary:
validRequests.append(subrequest)
return validRequests
def getResponse(self, requests):
validRequests = self.parsedRequests(requests)
if len(validRequests) > 0:
comment = ''
for request in validRequests:
key = request.partition('s')[0]
section = request.partition('s')[2]
isSectioned,content,location = self._dictionary[key]
contextLink = self.getContextLink("", location)
if section and isSectioned:
try:
comment += ('[**Can. ' + key + '**](' + contextLink + ') ' + u"\u00A7" + section + " " + content[section]) + '\n\n'
except KeyError:
comment += '[**Can. ' + key + '**](' + contextLink + ') ' + u"\u00A7" + section + " doesn't exist\n\n"
elif not section and isSectioned:
comment += '[**Can. ' + key + '**](' + contextLink + ') '
for sect in sorted(content.keys(),key=int):
comment += u"\u00A7"+sect+" "+content[sect]+"\n\n"
else:
comment += '[**Can. ' + key + '**](' + contextLink + ') ' + content
comment = self.linkCrossReferences(comment)
if len(comment) > self.getCharLimit():
comment = self.getOverflowComment(validRequests)
comment += self.getCommentFooter()
return True,comment
else:
return False,""
def getContextLink(self, dummy, location):
return self._baseURL + location
def getOverflowComment(self, requests):
numberOfRequests = 0
comment = ''
for request in requests:
isSectioned,content,location = self._dictionary[request]
numberOfRequests += 1
comment += ('([' + request + '](' + self.getContextLink("",location) + '))\n')
if len(comment) > self.getCharLimit():
comment += "\n\nAnd even when condensing the laws to links, you still exceeded the quota..."
break
return self.getOverflowHeader('law','laws',numberOfRequests) + comment
# HERE
def linkCrossReferences(self,comment):
return comment
xrefBlocks = reversed(list(re.finditer(r'cann*\.\s+\d+$(?m)',comment)))
for xrefBlock in xrefBlocks:
xrefs = reversed(list(re.finditer(r'\d+',xrefBlock.group(0))))
for xref in xrefs:
paragraph = xref.group(0)
content,location = self._Catechism[paragraph]
contextLink = self.__getCanonContextLink(paragraph, location)
start = xrefBlock.start()+xref.start()
end = xrefBlock.start()+xref.end()
comment = comment[:start]+"["+paragraph+"]("+contextLink+")"+comment[end:]
return comment
#########################################################################
#
# Constructs reddit comment response for GIRM requests of form [girm n].
#
#########################################################################
class GIRMResponse(Response):
def getResponse(self, requests):
validRequests = self.parsedRequests(requests)
if len(validRequests) > 0:
comment = ''
for request in validRequests:
content,location = self._dictionary[request]
comment += ('[**GIRM ' + request + '**](' + self.getContextLink(request, location) + ') ' + content) + '\n\n'
comment = self.linkCrossReferences(comment)
if len(comment) > self.getCharLimit():
comment = self.getOverflowComment(validRequests)
comment += self.getCommentFooter()
return True,comment
else:
return False,""
def getContextLink(self, request, location):
return self._baseURL + location
def linkCrossReferences(self, comment):
return comment
xrefBlocks = reversed(list(re.finditer(r'\([\d\,\s\-]+\)$(?m)',comment)))
for xrefBlock in xrefBlocks:
xrefs = reversed(list(re.finditer(r'\d+',xrefBlock.group(0))))
for xref in xrefs:
paragraph = xref.group(0)
content,location = self._dictionary[paragraph]
start = xrefBlock.start()+xref.start()
end = xrefBlock.start()+xref.end()
comment = comment[:start]+"["+paragraph+"]("+self.getContextLink(paragraph, location)+")"+comment[end:]
return comment
|
1662621
|
import itertools as it
import brownie
import pytest
DAY = 86400
WEEK = DAY * 7
pytestmark = pytest.mark.usefixtures("lock_alice")
@pytest.mark.parametrize("use_operator,timedelta_bps", it.product([False, True], range(0, 110, 50)))
def test_receiver_can_cancel_at_anytime(
alice, bob, charlie, chain, alice_unlock_time, veboost_delegation, use_operator, timedelta_bps
):
for i in range(10):
veboost_delegation.create_boost(alice, bob, 1_000, 0, alice_unlock_time, i, {"from": alice})
caller = bob
if use_operator:
veboost_delegation.setApprovalForAll(charlie, True, {"from": bob})
caller = charlie
fast_forward_amount = int((alice_unlock_time - chain.time()) * (timedelta_bps / 100))
chain.mine(timedelta=fast_forward_amount)
tokens = [veboost_delegation.get_token_id(alice, i) for i in range(10)]
veboost_delegation.batch_cancel_boosts(tokens + [0] * (256 - len(tokens)), {"from": caller})
boost_values = [veboost_delegation.token_boost(token) for token in tokens]
assert max(boost_values) == 0
@pytest.mark.parametrize("use_operator,timedelta_bps", it.product([False, True], range(0, 130, 20)))
def test_delegator_can_cancel_after_cancel_time_or_expiry(
alice, bob, charlie, chain, alice_unlock_time, veboost_delegation, use_operator, timedelta_bps
):
for i in range(10):
veboost_delegation.create_boost(
alice,
bob,
1_000,
alice_unlock_time - (WEEK * i ** 2),
alice_unlock_time,
i,
{"from": alice},
)
caller = alice
if use_operator:
veboost_delegation.setApprovalForAll(charlie, True, {"from": alice})
caller = charlie
fast_forward_amount = int((alice_unlock_time - chain.time()) * (timedelta_bps / 100))
chain.mine(timedelta=fast_forward_amount)
tokens = [veboost_delegation.get_token_id(alice, i) for i in range(10)]
cancel_times = [veboost_delegation.token_cancel_time(token) for token in tokens]
if chain.time() < max(cancel_times):
with brownie.reverts(dev_revert_msg="dev: must wait for cancel time"):
veboost_delegation.batch_cancel_boosts(tokens + [0] * 246, {"from": caller})
else:
veboost_delegation.batch_cancel_boosts(tokens + [0] * 246, {"from": caller})
boost_values = [veboost_delegation.token_boost(token) for token in tokens]
assert max(boost_values) == 0
@pytest.mark.parametrize("timedelta_bps", range(0, 130, 30))
def test_third_parties_can_only_cancel_past_expiry(
alice, bob, charlie, chain, alice_unlock_time, veboost_delegation, timedelta_bps
):
for i in range(10):
veboost_delegation.create_boost(
alice,
bob,
1_000,
0,
alice_unlock_time - (WEEK * i ** 2),
i,
{"from": alice},
)
fast_forward_amount = int((alice_unlock_time - chain.time()) * (timedelta_bps / 100))
chain.mine(timedelta=fast_forward_amount)
tokens = [veboost_delegation.get_token_id(alice, i) for i in range(10)]
expiry_times = [veboost_delegation.token_expiry(token) for token in tokens]
if chain.time() < max(expiry_times):
with brownie.reverts("Not allowed!"):
veboost_delegation.batch_cancel_boosts(tokens + [0] * 246, {"from": charlie})
else:
veboost_delegation.batch_cancel_boosts(tokens + [0] * 246, {"from": charlie})
boost_values = [veboost_delegation.token_boost(token) for token in tokens]
assert max(boost_values) == 0
|
1662623
|
from galaxy.jobs import JobDestination
import os
def dexseq_memory_mapper( job, tool ):
# Assign admin users' jobs to special admin_project.
# Allocate extra time
inp_data = dict( [ ( da.name, da.dataset ) for da in job.input_datasets ] )
inp_data.update( [ ( da.name, da.dataset ) for da in job.input_library_datasets ] )
gtf_file = inp_data[ "gtf" ].file_name
vmem = 5200
cores = 6
params = {}
gtf_file_size = os.path.getsize(gtf_file) / (1024*1024.0)
if gtf_file_size > 150:
vmem = 30000
cores = 6
# TODO(hxr): fix?
# params["nativeSpecification"] = """
# -q galaxy1.q,all.q -l galaxy1_slots=1 -l h_vmem=%sM -pe "pe*" %s -v
# _JAVA_OPTIONS -v TEMP -v TMPDIR -v PATH -v PYTHONPATH -v
# LD_LIBRARY_PATH -v XAPPLRESDIR -v GDFONTPATH -v GNUPLOT_DEFAULT_GDFONT
# -v MPLCONFIGDIR -soft -l galaxy1_dedicated=1
# """ % (vmem, cores)
params['request_memory'] = vmem / 1024
params['request_cpus'] = cores
params['requirements'] = '(GalaxyGroup == "compute")'
params['priority'] = 128
env = {
'_JAVA_OPTIONS': "-Xmx4G -Xms1G",
}
return JobDestination(id="dexseq_dynamic_memory_mapping", runner="condor", params=params, env=env)
# return JobDestination(id="dexseq_dynamic_memory_mapping", runner="drmaa", params=params)
|
1662684
|
import time
import io
def parse_timestamp(t):
"""Parses a string containing a timestamp.
Args:
t (str): A string containing a timestamp.
Returns:
time.struct_time: A timestamp.
"""
if t is None or t == '0000-00-00T00:00:00Z':
return time.struct_time((0, 0, 0, 0, 0, 0, 0, 0, 0))
return time.strptime(t, '%Y-%m-%dT%H:%M:%SZ')
def read_in_chunks(stream, chunk_size):
while True:
data = stream.read(chunk_size)
if not data:
break
yield io.BytesIO(data)
|
1662703
|
from functools import partial
from collections import Callable
def call_or_pass(value, args, kwargs):
if isinstance(value, Callable):
return value(*args, **kwargs)
return value
class OptionProperty(object):
def __init__(self, name):
self.name = name
def __repr__(self):
return '<%s %r>' % (self.__class__.__name__, self.name)
def __get__(self, obj, cls):
return partial(self, obj)
def __set__(self, obj, value):
obj.opts[self.name] = value
def __call__(self, obj, *args, **kwargs):
# As a decorator.
if len(args) == 1 and not kwargs and isinstance(args[0], Callable):
obj.opts[self.name] = args[0]
return args[0]
else:
value = obj.opts.get(self.name)
return call_or_pass(value, args, kwargs)
|
1662728
|
import urllib.request, urllib.parse, urllib.error
import pyttsx
engine = pyttsx.init()
#engine.say('Greetings!')
#engine.say('How are you today?')
engine.runAndWait()
fhand = urllib.request.urlopen('http://data.pr4e.org/romeo.txt')
for line in fhand:
h=line.decode().strip()
print(h)
engine.say(h)
engine.runAndWait()
|
1662730
|
import pyaudio
import socket
import select
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 44100
CHUNK = 4096
HOST = socket.gethostname()
PORT = 8082
audio = pyaudio.PyAudio()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((HOST, PORT))
def callback(in_data, frame_count, time_info, status):
try:
s.send(in_data)
return (None, pyaudio.paContinue)
except:
return
try:
stream = audio.open(format=FORMAT, channels=CHANNELS, rate=RATE, input=True, frames_per_buffer=CHUNK, stream_callback=callback)
while 1:
pass
except:
stream.close()
audio.terminate()
s.close()
|
1662853
|
import json
# execute a transaction/sendCoinsToAddress call
def sendCoinsToAddress(sidechainNode, address, amount, fee):
j = {
"outputs": [
{
"publicKey": str(address),
"value": amount
}
],
"fee": fee
}
request = json.dumps(j)
response = sidechainNode.transaction_sendCoinsToAddress(request)
return response["result"]["transactionId"]
|
1662855
|
import os, fnmatch
pattern = "// ignore-xcode-12"
def commentOut(n):
if pattern in n:
return "// %s" % (n)
return n
def find(directory, filePattern):
for path, dirs, files in os.walk(os.path.abspath(directory)):
for filename in fnmatch.filter(files, filePattern):
filepath = os.path.join(path, filename)
with open(filepath) as f:
s = f.read()
if pattern in s:
lines = s.split("\n")
s = '\n'.join(list(map(commentOut, lines)))
with open(filepath, "w") as f:
f.write(s)
find("./Sources", "*.swift")
|
1662861
|
import json
import os
from pathlib import Path
import pytest
@pytest.fixture(scope="module")
def api_client():
from rest_framework.test import APIClient
return APIClient()
@pytest.fixture(autouse=True)
def force_authenticate(request, api_client):
"""Automatically authenticate generated requests.
Check ongoing test for the `as_user` or `as_other_user` marks. To use those marks,
`user` and `other_user` must be available in the test scope.
"""
if request.node.get_closest_marker("as_user"):
user = request.getfixturevalue("user")
api_client.force_authenticate(user)
elif request.node.get_closest_marker("as_other_user"):
other_user = request.getfixturevalue("other_user")
api_client.force_authenticate(other_user)
def load_export_data(path: Path) -> dict:
with open(path) as f:
return json.loads(f.read())
MIMIC_CREDENTIALS = {
"host": os.environ.get("MIMIC_HOST", "mimic"),
"port": int(os.environ.get("MIMIC_PORT", 5432)),
"database": os.environ.get("MIMIC_DB", "mimic"),
"login": os.environ.get("MIMIC_LOGIN", "mimic"),
"password": os.environ.get("MIMIC_PASSWORD", "<PASSWORD>"),
"model": "POSTGRES",
}
@pytest.fixture
def mimic_credentials():
return MIMIC_CREDENTIALS
def load_mapping(path: Path) -> dict:
data = load_export_data(path)
data["credential"] = {**data["credential"], **MIMIC_CREDENTIALS}
return data
|
1662864
|
import pytest
from pymonet.immutable_list import ImmutableList
def test_eq():
assert len(ImmutableList.empty()) == 0
assert len(ImmutableList.of(1)) == 1
assert len(ImmutableList.of(1).unshift(0)) == 2
def test_immutable():
lst = ImmutableList(1)
lst2 = lst.append(2)
assert lst is not lst2
def test_to_list():
assert ImmutableList(1).unshift(0).to_list() == [0, 1]
def test_of():
assert ImmutableList.of(1, 2, 3, 4).to_list() == [1, 2, 3, 4]
def test_map():
assert ImmutableList.of(1, 2, 3, 4).map(lambda item: item + 1) == ImmutableList.of(2, 3, 4, 5)
def test_filter():
assert ImmutableList.of(1, 2, 3, 4).filter(lambda item: item % 2 == 0) == ImmutableList.of(2, 4)
def test_empty_filter():
assert ImmutableList.of(1, 2, 3, 4).filter(lambda item: False) == ImmutableList.empty()
def test_plus_operator():
assert ImmutableList.of(1, 2) + ImmutableList.of(3, 4) == ImmutableList.of(1, 2, 3, 4)
def test_plus_operator_exception():
with pytest.raises(ValueError):
ImmutableList.of(0) + [1]
def test_find_positive():
assert ImmutableList.of(1, 2, 3, 4).find(lambda item: item % 2 == 0) == 2
def test_find_negative():
assert ImmutableList.of(1, 2, 3, 4).find(lambda item: item < 0) is None
def test_unshift():
assert ImmutableList.of(1, 2).unshift(0) == ImmutableList.of(0, 1, 2)
def test_append():
assert ImmutableList.of(1, 2).append(3) == ImmutableList.of(1, 2, 3)
def test_reduce_addition():
assert ImmutableList.empty().reduce(lambda acc, curr: acc + curr, 0) == 0
assert ImmutableList.of(1).reduce(lambda acc, curr: acc + curr, 0) == 1
assert ImmutableList.of(1, 2).reduce(lambda acc, curr: acc + curr, 0) == 3
assert ImmutableList.of(1, 2, 3).reduce(lambda acc, curr: acc + curr, 0) == 6
def test_reduce_multiplication():
assert ImmutableList.empty().reduce(lambda acc, curr: acc * curr, 1) == 1
assert ImmutableList.of(1).reduce(lambda acc, curr: acc * curr, 1) == 1
assert ImmutableList.of(1, 2).reduce(lambda acc, curr: acc * curr, 1) == 2
assert ImmutableList.of(1, 2, 3).reduce(lambda acc, curr: acc * curr, 1) == 6
|
1662884
|
from . import constants as CONSTANTS
from .producer_property import ProducerProperty
from common.telemetry import telemetry_py
from common.telemetry_events import TelemetryEvent
class Image:
"""
If ``string`` is used, it has to consist of digits 0-9 arranged into
lines, describing the image, for example::
image = Image("90009:"
"09090:"
"00900:"
"09090:"
"90009")
will create a 5×5 image of an X. The end of a line is indicated by a colon.
It's also possible to use a newline (\\n) to indicate the end of a line
like this::
image = Image("90009\\n"
"09090\\n"
"00900\\n"
"09090\\n"
"90009")
The other form creates an empty image with ``width`` columns and
``height`` rows. Optionally ``buffer`` can be an array of
``width``×``height`` integers in range 0-9 to initialize the image::
Image(2, 2, b'\x08\x08\x08\x08')
or::
Image(2, 2, bytearray([9,9,9,9]))
Will create a 2 x 2 pixel image at full brightness.
.. note::
Keyword arguments cannot be passed to ``buffer``.
"""
# Attributes assigned (to functions) later;
# having this here helps the pylint.
HEART = None
HEART_SMALL = None
HAPPY = None
SMILE = None
SAD = None
CONFUSED = None
ANGRY = None
ASLEEP = None
SURPRISED = None
SILLY = None
FABULOUS = None
MEH = None
YES = None
NO = None
CLOCK12 = None
CLOCK11 = None
CLOCK10 = None
CLOCK9 = None
CLOCK8 = None
CLOCK7 = None
CLOCK6 = None
CLOCK5 = None
CLOCK4 = None
CLOCK3 = None
CLOCK2 = None
CLOCK1 = None
ARROW_N = None
ARROW_NE = None
ARROW_E = None
ARROW_SE = None
ARROW_S = None
ARROW_SW = None
ARROW_W = None
ARROW_NW = None
TRIANGLE = None
TRIANGLE_LEFT = None
CHESSBOARD = None
DIAMOND = None
DIAMOND_SMALL = None
SQUARE = None
SQUARE_SMALL = None
RABBIT = None
COW = None
MUSIC_CROTCHET = None
MUSIC_QUAVER = None
MUSIC_QUAVERS = None
PITCHFORK = None
XMAS = None
PACMAN = None
TARGET = None
TSHIRT = None
ROLLERSKATE = None
DUCK = None
HOUSE = None
TORTOISE = None
BUTTERFLY = None
STICKFIGURE = None
GHOST = None
SWORD = None
GIRAFFE = None
SKULL = None
UMBRELLA = None
SNAKE = None
ALL_CLOCKS = None
ALL_ARROWS = None
# implementing image model as described here:
# https://microbit-micropython.readthedocs.io/en/v1.0.1/image.html
def __init__(self, *args, **kwargs):
# Depending on the number of arguments
# in constructor, it treat args differently.
telemetry_py.send_telemetry(TelemetryEvent.MICROBIT_API_IMAGE_CREATION)
if len(args) == 0:
# default constructor
self.__LED = self.__string_to_square_array(CONSTANTS.BLANK_5X5)
elif len(args) == 1:
pattern = args[0]
if isinstance(pattern, str):
self.__LED = self.__string_to_square_array(pattern)
else:
raise TypeError("Image(s) takes a string")
else:
width = args[0]
height = args[1]
if width < 0 or height < 0:
# This is not in original, but ideally,
# image should fail non-silently
raise ValueError(CONSTANTS.INDEX_ERR)
if len(args) == 3:
# This option is for potential third bytearray arguments
byte_arr = args[2]
self.__LED = self.__bytes_to_array(width, height, byte_arr)
else:
self.__LED = self.__create_leds(width, height)
self.read_only = False
def width(self):
"""
Return the number of columns in the image.
"""
telemetry_py.send_telemetry(TelemetryEvent.MICROBIT_API_IMAGE_OTHER)
if len(self.__LED) > 0:
return len(self.__LED[0])
else:
return 0
def height(self):
"""
Return the numbers of rows in the image.
"""
telemetry_py.send_telemetry(TelemetryEvent.MICROBIT_API_IMAGE_OTHER)
return len(self.__LED)
def set_pixel(self, x, y, value):
"""
Set the brightness of the pixel at column ``x`` and row ``y`` to the
``value``, which has to be between 0 (dark) and 9 (bright).
This method will raise an exception when called on any of the built-in
read-only images, like ``Image.HEART``.
"""
telemetry_py.send_telemetry(TelemetryEvent.MICROBIT_API_IMAGE_OTHER)
if self.read_only:
raise TypeError(CONSTANTS.COPY_ERR_MESSAGE)
elif not self.__valid_pos(x, y):
raise ValueError(CONSTANTS.INDEX_ERR)
elif not self.__valid_brightness(value):
raise ValueError(CONSTANTS.BRIGHTNESS_ERR)
else:
self.__LED[y][x] = value
def get_pixel(self, x, y):
"""
Return the brightness of pixel at column ``x`` and row ``y`` as an
integer between 0 and 9.
"""
telemetry_py.send_telemetry(TelemetryEvent.MICROBIT_API_IMAGE_OTHER)
if self.__valid_pos(x, y):
return self.__LED[y][x]
else:
raise ValueError(CONSTANTS.INDEX_ERR)
def shift_up(self, n):
"""
Return a new image created by shifting the picture up by ``n`` rows.
"""
telemetry_py.send_telemetry(TelemetryEvent.MICROBIT_API_IMAGE_OTHER)
return self.__shift_vertical(-n)
def shift_down(self, n):
"""
Return a new image created by shifting the picture down by ``n`` rows.
"""
telemetry_py.send_telemetry(TelemetryEvent.MICROBIT_API_IMAGE_OTHER)
return self.__shift_vertical(n)
def shift_right(self, n):
"""
Return a new image created by shifting the picture right by ``n``
columns.
"""
telemetry_py.send_telemetry(TelemetryEvent.MICROBIT_API_IMAGE_OTHER)
return self.__shift_horizontal(n)
def shift_left(self, n):
"""
Return a new image created by shifting the picture left by ``n``
columns.
"""
telemetry_py.send_telemetry(TelemetryEvent.MICROBIT_API_IMAGE_OTHER)
return self.__shift_horizontal(-n)
def crop(self, x, y, w, h):
"""
Return a new image by cropping the picture to a width of ``w`` and a
height of ``h``, starting with the pixel at column ``x`` and row ``y``.
"""
telemetry_py.send_telemetry(TelemetryEvent.MICROBIT_API_IMAGE_OTHER)
res = Image(w, h)
res.blit(self, x, y, w, h)
return res
def copy(self):
"""
Return an exact copy of the image.
"""
telemetry_py.send_telemetry(TelemetryEvent.MICROBIT_API_IMAGE_OTHER)
return Image(self.__create_string())
# This inverts the brightness of each LED.
# ie: Pixel that is at brightness 4 would become brightness 5
# and pixel that is at brightness 9 would become brightness 0.
def invert(self):
"""
Return a new image by inverting the brightness of the pixels in the
source image.
"""
telemetry_py.send_telemetry(TelemetryEvent.MICROBIT_API_IMAGE_OTHER)
for y in range(self.height()):
for x in range(self.width()):
self.set_pixel(x, y, CONSTANTS.BRIGHTNESS_MAX - self.get_pixel(x, y))
# This fills all LEDs with same brightness.
def fill(self, value):
"""
Set the brightness of all the pixels in the image to the
``value``, which has to be between 0 (dark) and 9 (bright).
This method will raise an exception when called on any of the built-in
read-only images, like ``Image.HEART``.
"""
telemetry_py.send_telemetry(TelemetryEvent.MICROBIT_API_IMAGE_OTHER)
for y in range(self.height()):
for x in range(self.width()):
self.set_pixel(x, y, value)
# This transposes a certain area (w x h) on src onto the current image.
def blit(self, src, x, y, w, h, xdest=0, ydest=0):
"""
Copy the rectangle defined by ``x``, ``y``, ``w``, ``h`` from the image ``src`` into
this image at ``xdest``, ``ydest``.
Areas in the source rectangle, but outside the source image are treated as having a value of 0.
"""
telemetry_py.send_telemetry(TelemetryEvent.MICROBIT_API_IMAGE_OTHER)
if not src.__valid_pos(x, y):
raise ValueError(CONSTANTS.INDEX_ERR)
for count_y in range(h):
for count_x in range(w):
if self.__valid_pos(xdest + count_x, ydest + count_y):
if src.__valid_pos(x + count_x, y + count_y):
transfer_pixel = src.get_pixel(x + count_x, y + count_y)
else:
transfer_pixel = 0
self.set_pixel(xdest + count_x, ydest + count_y, transfer_pixel)
# This adds two images (if other object is not an image, throws error).
# The images must be the same size.
def __add__(self, other):
"""
Create a new image by adding the brightness values from the two images for each pixel.
"""
telemetry_py.send_telemetry(TelemetryEvent.MICROBIT_API_IMAGE_OTHER)
if not isinstance(other, Image):
raise TypeError(
CONSTANTS.UNSUPPORTED_ADD_TYPE + f"'{type(self)}', '{type(other)}'"
)
elif not (other.height() == self.height() and other.width() == self.width()):
raise ValueError(CONSTANTS.SAME_SIZE_ERR)
else:
res = Image(self.width(), self.height())
for y in range(self.height()):
for x in range(self.width()):
sum_value = other.get_pixel(x, y) + self.get_pixel(x, y)
display_result = min(CONSTANTS.BRIGHTNESS_MAX, sum_value)
res.set_pixel(x, y, display_result)
return res
# This multiplies image by number (if other factor is not a number, it throws an error).
def __mul__(self, other):
"""
Create a new image by multiplying the brightness of each pixel by n.
"""
telemetry_py.send_telemetry(TelemetryEvent.MICROBIT_API_IMAGE_OTHER)
try:
float_val = float(other)
except TypeError:
raise TypeError(f"can't convert {type(other)} to float")
res = Image(self.width(), self.height())
for y in range(self.height()):
for x in range(self.width()):
product = self.get_pixel(x, y) * float_val
res.set_pixel(x, y, min(CONSTANTS.BRIGHTNESS_MAX, product))
return res
def __repr__(self):
"""
Get a compact string representation of the image.
"""
telemetry_py.send_telemetry(TelemetryEvent.MICROBIT_API_IMAGE_OTHER)
ret_str = "Image('"
for index_y in range(self.height()):
ret_str += self.__row_to_str(index_y)
ret_str += "')"
return ret_str
def __str__(self):
"""
Get a readable string representation of the image.
"""
telemetry_py.send_telemetry(TelemetryEvent.MICROBIT_API_IMAGE_OTHER)
ret_str = "Image('\n"
for index_y in range(self.height()):
ret_str += "\t" + self.__row_to_str(index_y) + "\n"
ret_str += "')"
return ret_str
# HELPER FUNCTIONS
# This create 2D array of off LEDs with
# width w and height h
def __create_leds(self, w, h):
arr = []
for _ in range(h):
sub_arr = []
for _ in range(w):
sub_arr.append(0)
arr.append(sub_arr)
return arr
# This turns byte array to 2D array for LED field.
def __bytes_to_array(self, width, height, byte_arr):
bytes_translated = bytes(byte_arr)
if not len(bytes_translated) == height * width:
raise ValueError(CONSTANTS.INCORR_IMAGE_SIZE)
arr = []
sub_arr = []
for index, elem in enumerate(bytes_translated):
if index % width == 0 and index != 0:
arr.append(sub_arr)
sub_arr = []
sub_arr.append(elem)
arr.append(sub_arr)
return arr
# This converts string (with different rows separated by ":")
# to 2d array arrangement.
def __string_to_square_array(self, pattern):
initial_array, max_subarray_len = self.__string_directly_to_array(pattern)
# Fill in empty spaces in w x h matrix.
for arr_y in initial_array:
num_extra_spaces = max_subarray_len - len(arr_y)
for _ in range(num_extra_spaces):
arr_y.append(0)
return initial_array
def __string_directly_to_array(self, pattern):
# The result may have spaces in the 2D array
# and may uneven sub-array lengths
arr = []
sub_arr = []
max_subarray_len = 0
for elem in pattern:
if elem == ":" or elem == "\n":
if len(sub_arr) > max_subarray_len:
max_subarray_len = len(sub_arr)
arr.append(sub_arr)
sub_arr = []
else:
sub_arr.append(int(elem))
if (
len(pattern) > 0
and not str(pattern)[-1] == ":"
and not str(pattern)[-1] == "\n"
and len(sub_arr) != 0
):
if len(sub_arr) > max_subarray_len:
max_subarray_len = len(sub_arr)
arr.append(sub_arr)
return arr, max_subarray_len
def __valid_brightness(self, value):
return value >= CONSTANTS.BRIGHTNESS_MIN and value <= CONSTANTS.BRIGHTNESS_MAX
def __valid_pos(self, x, y):
return x >= 0 and x < self.width() and y >= 0 and y < self.height()
def __shift_vertical(self, n):
res = Image(self.width(), self.height())
if n > 0:
# down
res.blit(self, 0, 0, self.width(), self.height() - n, 0, n)
else:
# up
if self.__valid_pos(0, abs(n)):
res.blit(self, 0, abs(n), self.width(), self.height() - abs(n), 0, 0)
return res
def __shift_horizontal(self, n):
res = Image(self.width(), self.height())
if n > 0:
# right
res.blit(self, 0, 0, self.width() - n, self.height(), n, 0)
else:
# left
if self.__valid_pos(abs(n), 0):
res.blit(self, abs(n), 0, self.width() - abs(n), self.height(), 0, 0)
return res
def __create_string(self):
ret_str = ""
for index_y in range(self.height()):
ret_str += self.__row_to_str(index_y)
return ret_str
def __row_to_str(self, y):
new_str = ""
for x in range(self.width()):
new_str += str(self.get_pixel(x, y))
new_str += ":"
return new_str
@staticmethod
def __append_images(images):
width = 0
height = 0
for image in images:
width += image.width()
height = max(height, image.height())
res = Image(width, height)
x_ind = 0
for image in images:
res.blit(image, 0, 0, image.width(), image.height(), xdest=x_ind)
x_ind += image.width()
return res
@staticmethod
def __same_image(i1, i2):
if i1.width() != i2.width() or i1.height() != i2.height():
return False
for y in range(i1.height()):
for x in range(i1.width()):
if i1.get_pixel(x, y) != i2.get_pixel(x, y):
return False
return True
# This is for generating functions like Image.HEART
# that return a new read-only Image
def create_const_func(func_name):
telemetry_py.send_telemetry(TelemetryEvent.MICROBIT_API_IMAGE_STATIC)
def func(*args):
const_instance = Image(CONSTANTS.IMAGE_PATTERNS[func_name])
const_instance.read_only = True
return const_instance
func.__name__ = func_name
return ProducerProperty(func)
# for attributes like Image.ALL_CLOCKS
# that return tuples
def create_const_list_func(func_name):
def func(*args):
collection_names = CONSTANTS.IMAGE_TUPLE_LOOKUP[func_name]
ret_list = []
for image_name in collection_names:
const_instance = Image(CONSTANTS.IMAGE_PATTERNS[image_name])
const_instance.read_only = True
ret_list.append(const_instance)
return tuple(ret_list)
func.__name__ = func_name
return ProducerProperty(func)
for name in CONSTANTS.IMAGE_PATTERNS.keys():
setattr(Image, name, create_const_func(name))
for name in CONSTANTS.IMAGE_TUPLE_LOOKUP.keys():
setattr(Image, name, create_const_list_func(name))
|
1662897
|
from .browser_viz import profile_viewer
from .visualizer import BaseProfileVisualizer, ProfileVisualizer
__ALL__ = [ProfileVisualizer, BaseProfileVisualizer, profile_viewer]
|
1662917
|
from .bot import bot
from .server import run as run_server
from .healthcheck import run as run_healthcheck
if __name__ == '__main__':
run_server()
run_healthcheck()
bot.start_polling()
bot.idle()
|
1662935
|
import numpy as np
import random
from q1_softmax import softmax
from q2_gradcheck import gradcheck_naive
from q2_sigmoid import sigmoid, sigmoid_grad
def normalizeRows(x):
""" Row normalization function """
l2norm = np.sqrt((x**2).sum(axis=1, keepdims=True))
x /= l2norm
return x
def test_normalize_rows():
print "Testing normalizeRows..."
x = normalizeRows(np.array([[3.0,4.0],[1, 2]]))
# the result should be [[0.6, 0.8], [0.4472, 0.8944]]
print x
assert (x.all() == np.array([[0.6, 0.8], [0.4472, 0.8944]]).all())
print ""
def softmaxCost(uo, vc, outputVectors):
""" This is softmax(o,c) or y^o or p(o|c) """
return np.exp(uo*vc) / np.sum(np.exp(outputVectors * vc), \
axis=len(vc.shape)-1, keepdims=True)
# vc, u0, uws
def softmaxCostAndGradient(predicted, target, outputVectors, dataset):
""" Softmax cost function for word2vec models """
# Implement the cost and gradients for one predicted word vector
# and one target word vector as a building block for word2vec
# models, assuming the softmax prediction function and cross
# entropy loss.
# Inputs:
# - predicted: numpy ndarray, predicted word vector (\hat{v} in
# the written component or \hat{r} in an earlier version)
# - target: integer, the index of the target word
# - outputVectors: "output" vectors (as rows) for all tokens
# - dataset: needed for negative sampling, unused here.
# Outputs:
# - cost: cross entropy cost for the softmax word prediction
# - gradPred: the gradient with respect to the predicted word
# vector
# - grad: the gradient with respect to all the other word
# vectors
v_c = predicted
uws = outputVectors
u_0 = target
# this is from https://github.com/Khabermas/NLP_ps1
probabilities = softmax(predicted.dot(outputVectors.T))
cost = -np.log(probabilities[target])
delta = probabilities
delta[target] -= 1
N = delta.shape[0]
D = predicted.shape[0]
grad = delta.reshape((N,1)) * predicted.reshape((1,D))
gradPred = (delta.reshape((1,N)).dot(outputVectors)).flatten()
#y_0 = softmaxCost(u_0, v_c, uws) ##np.exp(u_0 * v_c ) / np.sum(u_ws, axis=len(x.shape)-1, keepdims=True)
## TODO: hack np.summing here, but should be done in
## softmaxCost
##cost = np.sum(y_0)
## this is the full cost formula
##cost = -u_0 * v_c + np.log(np.sum(np.exp(uws * v_c)))
#cost = np.sum(np.exp(uws * v_c))
#grad_vc = -u_0 + np.sum(softmaxCost(uws, v_c, uws))*uws
#gradPred = grad_vc
#dce_duw = -v_c*np.sum(softmaxCost(uws, v_c, uws))
#dce_duo = -v_c
#grad = [dce_duw, dce_duo]
return cost, gradPred, grad
def negSamplingCostAndGradient(predicted, target, outputVectors, dataset,
K=10):
""" Negative sampling cost function for word2vec models """
# Implement the cost and gradients for one predicted word vector
# and one target word vector as a building block for word2vec
# models, using the negative sampling technique. K is the sample
# size. You might want to use dataset.sampleTokenIdx() to sample
# a random word index.
#
# Note: See test_word2vec below for dataset's initialization.
#
# Input/Output Specifications: same as softmaxCostAndGradient
# We will not provide starter code for this function, but feel
# free to reference the code you previously wrote for this
# assignment!
v_c = predicted
uws = outputVectors
u_0 = target
u_k = np.zeros((K, v_c.shape[0]))
for i in xrange(K):
u_k[i] = outputVectors[dataset.sampleTokenIdx()]
#cost = np.log(sigmoid(u_0 * v_c)) - np.sum(np.log(sigmoid(u_k * v_c)))
cost = np.log(sigmoid(u_0 * v_c)) - np.sum(np.log(sigmoid(u_k * v_c)))
cost = np.sum(cost)
#dce_vc = - (1-sigmoid(u_0 * v_c)) - np.sum(1-sigmoid(-u_k * v_c))
gradPred = - (1-sigmoid(u_0 * v_c)) - np.sum(1-sigmoid(u_k * v_c))
dce_uo = -(1-sigmoid(u_0 * v_c)) * v_c
dce_uk = - (np.sum(-v_c + v_c * sigmoid(-u_k * v_c)))
#grad = np.sum(v_c - v_c * sigmoid(-u_k * v_c))
grad = [dce_uo, dce_uk]
return cost, gradPred, grad
def skipgram(currentWord, C, contextWords, tokens, inputVectors, outputVectors,
dataset, word2vecCostAndGradient = softmaxCostAndGradient):
""" Skip-gram model in word2vec """
#params provided centerword, C1, context, tokens, inputVectors, outputVectors, dataset, word2vecCostAndGradient
#print skipgram("c", 3, ["a", "b", "e", "d", "b", "c"], dummy_tokens, dummy_vectors[:5,:], dummy_vectors[5:,:], dataset)
# Implement the skip-gram model in this function.
# Inputs:
# - currrentWord: a string of the current center word
# - C: integer, context size
# - contextWords: list of no more than 2*C strings, the context words
# - tokens: a dictionary that maps words to their indices in
# the word vector list
# - inputVectors: "input" word vectors (as rows) for all tokens
# - outputVectors: "output" word vectors (as rows) for all tokens
# - word2vecCostAndGradient: the cost and gradient function for
# a prediction vector given the target word vectors,
# could be one of the two cost functions you
# implemented above
# Outputs:
# - cost: the cost function value for the skip-gram model
# - grad: the gradient with respect to the word vectors
# We will not provide starter code for this function, but feel
# free to reference the code you previously wrote for this
# assignment!
uws = outputVectors # rows
vc = inputVectors[tokens[currentWord]]
gradIn = np.zeros(inputVectors.shape)
gradOut = np.zeros(outputVectors.shape)
cost = 0.0
# vc, u0, uws
# softmax(predicted, target, outputVectors, dataset)
# return cost, gradPred (grad_vc), grad (grad uw, uo)
for cwd in contextWords:
#uo = inputVectors[i]
uo = tokens[cwd]
single_cost, single_gin, single_gout = word2vecCostAndGradient(vc, uo, uws, dataset)
cost += single_cost
try:
gradIn[tokens[currentWord],:] += single_gin
gradOut += single_gout[0]
except TypeError:
import pdb; pdb.set_trace()
return cost, gradIn, gradOut
def cbow(currentWord, C, contextWords, tokens, inputVectors, outputVectors,
dataset, word2vecCostAndGradient = softmaxCostAndGradient):
""" CBOW model in word2vec """
# Implement the continuous bag-of-words model in this function.
# Input/Output specifications: same as the skip-gram model
# We will not provide starter code for this function, but feel
# free to reference the code you previously wrote for this
# assignment!
#################################################################
# IMPLEMENTING CBOW IS EXTRA CREDIT, DERIVATIONS IN THE WRIITEN #
# ASSIGNMENT ARE NOT! #
#################################################################
cost = 0
gradIn = np.zeros(inputVectors.shape)
gradOut = np.zeros(outputVectors.shape)
### YOUR CODE HERE
#raise NotImplementedError
### END YOUR CODE
return cost, gradIn, gradOut
#############################################
# Testing functions below. DO NOT MODIFY! #
#############################################
def word2vec_sgd_wrapper(word2vecModel, tokens, wordVectors, dataset, C, word2vecCostAndGradient = softmaxCostAndGradient):
batchsize = 50
cost = 0.0
grad = np.zeros(wordVectors.shape)
N = wordVectors.shape[0]
inputVectors = wordVectors[:N/2,:]
outputVectors = wordVectors[N/2:,:]
for i in xrange(batchsize):
C1 = random.randint(1,C)
centerword, context = dataset.getRandomContext(C1)
if word2vecModel == skipgram:
denom = 1
else:
denom = 1
c, gin, gout = word2vecModel(centerword, C1, context, tokens, inputVectors, outputVectors, dataset, word2vecCostAndGradient)
cost += c / batchsize / denom
# HACK, I am returning multiple gradients because currently dce/duw and
# dce / duo are returned separately, this might be incorrect
#gout = gout[1]
grad[:N/2, :] += gin / batchsize / denom
grad[N/2:, :] += gout / batchsize / denom
return cost, grad
def test_word2vec():
# Interface to the dataset for negative sampling
dataset = type('dummy', (), {})()
def dummySampleTokenIdx():
return random.randint(0, 4)
def getRandomContext(C):
tokens = ["a", "b", "c", "d", "e"]
return tokens[random.randint(0,4)], [tokens[random.randint(0,4)] \
for i in xrange(2*C)]
dataset.sampleTokenIdx = dummySampleTokenIdx
dataset.getRandomContext = getRandomContext
random.seed(31415)
np.random.seed(9265)
dummy_vectors = normalizeRows(np.random.randn(10,3))
dummy_tokens = dict([("a",0), ("b",1), ("c",2),("d",3),("e",4)])
print "==== Gradient check for skip-gram ===="
gradcheck_naive(lambda vec: word2vec_sgd_wrapper(skipgram, dummy_tokens, vec, dataset, 5, softmaxCostAndGradient), dummy_vectors)
gradcheck_naive(lambda vec: word2vec_sgd_wrapper(skipgram, dummy_tokens, vec, dataset, 5, negSamplingCostAndGradient), dummy_vectors)
#print "\n==== Gradient check for CBOW ===="
#gradcheck_naive(lambda vec: word2vec_sgd_wrapper(cbow, dummy_tokens, vec, dataset, 5), dummy_vectors)
#gradcheck_naive(lambda vec: word2vec_sgd_wrapper(cbow, dummy_tokens, vec, dataset, 5, negSamplingCostAndGradient), dummy_vectors)
print "\n=== Results ==="
print "\n=== Skipgram ==="
print "\n=== Softmax ==="
print skipgram("c", 3, ["a", "b", "e", "d", "b", "c"], dummy_tokens, dummy_vectors[:5,:], dummy_vectors[5:,:], dataset)
print "\n=== Negative Sampling ===="
print skipgram("a", 3, ["a", "b", "c", "d", "b", "e"], dummy_tokens, dummy_vectors[:5,:], dummy_vectors[5:,:], dataset, negSamplingCostAndGradient)
print skipgram("c", 1, ["a", "b"], dummy_tokens, dummy_vectors[:5,:], dummy_vectors[5:,:], dataset, negSamplingCostAndGradient)
#print cbow("a", 2, ["a", "b", "c", "a"], dummy_tokens, dummy_vectors[:5,:], dummy_vectors[5:,:], dataset)
#print cbow("a", 2, ["a", "b", "a", "c"], dummy_tokens, dummy_vectors[:5,:], dummy_vectors[5:,:], dataset, negSamplingCostAndGradient)
if __name__ == "__main__":
test_normalize_rows()
test_word2vec()
|
1662941
|
import math
from functools import wraps
from theano import tensor as T
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
from lasagne import init
from lasagne.random import get_rng
__all__ = ['Accumulator', 'NormalApproximation', 'NormalApproximationScMix', 'bbpwrap']
c = - 0.5 * math.log(2 * math.pi)
def log_normal(x, mean, std, eps=0.0):
std += eps
return c - T.log(T.abs_(std)) - (x - mean) ** 2 / (2 * std ** 2)
def log_normal3(x, mean, rho, eps=0.0):
std = T.log1p(T.exp(rho))
return log_normal(x, mean, std, eps)
class Accumulator(object):
def __init__(self):
"""
A simple class for accumulating any cost
Used in layers with BayesianMeta
"""
self.srng = RandomStreams(get_rng().randint(1, 2147462579))
self.total = []
def get_cost(self):
return sum(map(T.sum,self.total))
def add_cost(self, new):
self.total.append(new)
class NormalApproximation(object):
def __init__(self, pm=0, pstd=T.exp(-3)):
self.pm = pm
self.pstd = pstd
def log_prior(self, x):
return log_normal(x, self.pm, self.pstd)
def __call__(self, layer, spec, shape, **tags):
# case when user uses default init specs
if not isinstance(spec, dict):
spec = {'mu': spec}
# important!
# we declare that params we add next
# are the ones we need to fit the distribution
tags['variational'] = True
rho_spec = spec.get('rho', init.Normal(1))
mu_spec = spec.get('mu', init.Normal(1))
rho = layer.add_param(rho_spec, shape, **tags)
mean = layer.add_param(mu_spec, shape, **tags)
e = layer.acc.srng.normal(shape, std=1)
W = mean + T.log1p(T.exp(rho)) * e
q_p = self.log_posterior_approx(W, mean, rho) - self.log_prior(W)
layer.acc.add_cost(q_p)
return W
@staticmethod
def log_posterior_approx(W, mean, rho):
return log_normal3(W, mean, rho)
class NormalApproximationScMix(NormalApproximation):
def __init__(self, pm1=.0, pstd1=.5, pi=.5, pm2=.0, pstd2=1e-3):
"""
:param pi:
weight for first Gaussian
pi is in [0, 1]
:param pm1: float
prior mean for first Gaussian
:param std1:
prior std for first Gaussian
:param pm2:
prior mean for second Gaussian
:param std2:
prior std for second Gaussian
"""
assert .0 <= pi <= 1., 'Weight %d not in [0, 1]' % pi
self.pi = pi
self.pm1 = pm1
self.pstd1 = pstd1
self.pm2 = pm2
self.pstd2 = pstd2
def log_prior(self, x):
return self.pi * log_normal(x, self.pm1, self.pstd1) + \
(1 - self.pi) * log_normal(x, self.pm2, self.pstd2)
def bbpwrap(approximation=NormalApproximation()):
def decorator(cls):
def add_param_wrap(add_param):
@wraps(add_param)
def wrapped(self, spec, shape, name=None, **tags):
# we should take care about some user specification
# to avoid bbp hook just set tags['variational'] = True
if not tags.get('trainable', True) or tags.get('variational', False):
return add_param(self, spec, shape, name, **tags)
else:
# they don't need to be regularized, strictly
tags['regularizable'] = False
param = self.approximation(self, spec, shape, **tags)
return param
return wrapped
def init_wrap(__init__):
@wraps(__init__)
def wrapped(self, acc, *args, **kwargs):
self.acc = acc # type: Accumulator
__init__(self, *args, **kwargs)
return wrapped
cls.approximation = approximation
cls.add_param = add_param_wrap(cls.add_param)
cls.__init__ = init_wrap(cls.__init__)
return cls
return decorator
|
1662948
|
import re
import random
import requests
import table
import user_agent_list
from bs4 import BeautifulSoup
class HtmlPage:
user_agent_number = 7345
def __init__(self, url):
self.url = url
def get_html(self, creds, proxy_pass):
have_a_try = 3
if not proxy_pass:
while have_a_try:
t = table.Table('proxy_list', creds=creds)
user_agent = user_agent_list.get_user_agent(int(random.random() * self.user_agent_number))
user_agent_dict = {'user-agent': user_agent}
table_exist = t.table_check()
if not table_exist:
print("Proxy table corrupted.")
return False
tab_length = t.table_len()
try:
proxy = t.table_read(int(random.random() * (tab_length[0] - 1)) + 1)
proxy_dict = {proxy[1]: proxy[2]}
except TypeError:
print("Fatal error in proxy list.")
return False
try:
result = requests.get(str.rstrip(self.url), headers=user_agent_dict, proxies=proxy_dict)
result.raise_for_status()
return result.text
except(requests.RequestException, ValueError):
print("Bad proxy. One more try.")
have_a_try -= 1
print("Network error. Update proxy list.")
else:
while have_a_try:
try:
result = requests.get(str.rstrip(self.url))
result.raise_for_status()
return result.text
except(requests.RequestException, ValueError):
have_a_try -= 1
print("Network error. Can't get html.")
return False
def get_wb_page(self, creds, proxy_pass):
html = self.get_html(creds, proxy_pass)
if html:
soup = BeautifulSoup(html, 'html.parser')
articles = {}
for index in soup.findAll('div', class_="dtList i-dtList j-card-item"):
article_number = re.search(r'\d+', index.get('data-catalogercod1s'))
articles[article_number[0]] = index.find('a')['href']
return articles
return False
|
1662978
|
import functools
from pylearn2.models.mlp import MLP, CompositeLayer
from pylearn2.space import CompositeSpace, VectorSpace
import theano
from theano import tensor as T
from theano.compat import OrderedDict
from theano.sandbox.rng_mrg import MRG_RandomStreams
from adversarial import AdversaryPair, AdversaryCost2, Generator, theano_parzen
class ConditionalAdversaryPair(AdversaryPair):
def __init__(self, generator, discriminator, data_space, condition_space,
inferer=None,
inference_monitoring_batch_size=128,
monitor_generator=True,
monitor_discriminator=True,
monitor_inference=True,
shrink_d=0.):
super(ConditionalAdversaryPair, self).__init__(generator, discriminator, inferer,
inference_monitoring_batch_size, monitor_generator, monitor_discriminator,
monitor_inference, shrink_d)
self.data_space = data_space
self.condition_space = condition_space
self.input_source = self.discriminator.get_input_source()
self.output_space = self.discriminator.get_output_space()
def get_monitoring_channels(self, data):
rval = OrderedDict()
g_ch = self.generator.get_monitoring_channels(data)
d_ch = self.discriminator.get_monitoring_channels((data, None))
samples, _, conditional_data, _ = self.generator.sample_and_noise(100)
d_samp_ch = self.discriminator.get_monitoring_channels(((samples, conditional_data), None))
i_ch = OrderedDict()
if self.inferer is not None:
batch_size = self.inference_monitoring_batch_size
sample, noise, conditional_data, _ = self.generator.sample_and_noise(batch_size)
i_ch.update(self.inferer.get_monitoring_channels(((sample, conditional_data), noise)))
if self.monitor_generator:
for key in g_ch:
rval['gen_' + key] = g_ch[key]
if self.monitor_discriminator:
for key in d_ch:
rval['dis_on_data_' + key] = d_samp_ch[key]
for key in d_ch:
rval['dis_on_samp_' + key] = d_ch[key]
if self.monitor_inference:
for key in i_ch:
rval['inf_' + key] = i_ch[key]
return rval
class ConditionalGenerator(Generator):
def __init__(self, mlp, input_condition_space, condition_distribution, noise_dim=100, *args, **kwargs):
super(ConditionalGenerator, self).__init__(mlp, *args, **kwargs)
self.noise_dim = noise_dim
self.noise_space = VectorSpace(dim=self.noise_dim)
self.condition_space = input_condition_space
self.condition_distribution = condition_distribution
self.input_space = CompositeSpace([self.noise_space, self.condition_space])
self.mlp.set_input_space(self.input_space)
def sample_and_noise(self, conditional_data, default_input_include_prob=1., default_input_scale=1.,
all_g_layers=False):
"""
Retrieve a sample (and the noise used to generate the sample)
conditioned on some input data.
Parameters
----------
conditional_data: member of self.condition_space
A minibatch of conditional data to feedforward.
default_input_include_prob: float
WRITEME
default_input_scale: float
WRITEME
all_g_layers: boolean
If true, return all generator layers in `other_layers` slot
of this method's return value. (Otherwise returns `None` in
this slot.)
Returns
-------
net_output: 3-tuple
Tuple of the form `(sample, noise, other_layers)`.
"""
if isinstance(conditional_data, int):
conditional_data = self.condition_distribution.sample(conditional_data)
num_samples = conditional_data.shape[0]
noise = self.get_noise((num_samples, self.noise_dim))
# TODO necessary?
formatted_noise = self.noise_space.format_as(noise, self.noise_space)
# Build inputs: concatenate noise with conditional data
inputs = (formatted_noise, conditional_data)
# Feedforward
# if all_g_layers:
# rval = self.mlp.dropout_fprop(inputs, default_input_include_prob=default_input_include_prob,
# default_input_scale=default_input_scale, return_all=all_g_layers)
# other_layers, rval = rval[:-1], rval[-1]
# else:
rval = self.mlp.dropout_fprop(inputs, default_input_include_prob=default_input_include_prob,
default_input_scale=default_input_scale)
# other_layers = None
return rval, formatted_noise, conditional_data, None# , other_layers
def sample(self, conditional_data, **kwargs):
sample, _, _, _ = self.sample_and_noise(conditional_data, **kwargs)
return sample
def get_monitoring_channels(self, data):
if data is None:
m = 100
conditional_data = self.condition_distribution.sample(m)
else:
_, conditional_data = data
m = conditional_data.shape[0]
noise = self.get_noise((m, self.noise_dim))
rval = OrderedDict()
sampled_data = (noise, conditional_data)
try:
rval.update(self.mlp.get_monitoring_channels((sampled_data, None)))
except Exception:
warnings.warn("something went wrong with generator.mlp's monitoring channels")
if self.monitor_ll:
rval['ll'] = T.cast(self.ll(data, self.ll_n_samples, self.ll_sigma),
theano.config.floatX).mean()
rval['nll'] = -rval['ll']
return rval
def ll(self, data, n_samples, sigma):
real_data, conditional_data = data
sampled_data = self.sample(conditional_data)
output_space = self.mlp.get_output_space()
if 'Conv2D' in str(output_space):
samples = output_space.convert(sampled_data, output_space.axes, ('b', 0, 1, 'c'))
samples = samples.flatten(2)
data = output_space.convert(real_data, output_space.axes, ('b', 0, 1, 'c'))
data = data.flatten(2)
parzen = theano_parzen(data, samples, sigma)
return parzen
class CompositeMLPLayer(CompositeLayer):
"""A CompositeLayer where each of the components are MLPs.
Supports forwarding dropout parameters to each MLP independently."""
def __init__(self, layers, *args, **kwargs):
for layer in layers:
assert isinstance(layer, MLP), "CompositeMLPLayer only supports MLP component layers"
super(CompositeMLPLayer, self).__init__(layers=layers, *args, **kwargs)
def _collect_mlp_layer_names(self):
"""Collect the layer names of the MLPs nested within this
layer."""
return [[sub_layer.layer_name for sub_layer in mlp.layers] for mlp in self.layers]
def validate_layer_names(self, req_names):
all_names = []
for sub_names in self._collect_mlp_layer_names():
all_names.extend(sub_names)
if any(req_name not in all_names for req_name in req_names):
unknown_names = [req_name for req_name in req_names
if req_name not in all_names]
raise ValueError("No MLPs in this CompositeMLPLayer have layer(s) named %s" %
", ".join(unknown_names))
def dropout_fprop(self, state_below, input_include_probs=None, input_scales=None,
*args, **kwargs):
"""Extension of Layer#fprop which forwards on dropout parameters
to MLP sub-layers."""
if input_include_probs is None:
input_include_probs = {}
if input_scales is None:
input_scales = {}
# Use to determine which args should be routed to which places
mlp_layer_names = self._collect_mlp_layer_names()
rvals = []
for i, mlp in enumerate(self.layers):
if self.routing_needed and i in self.layers_to_inputs:
cur_state_below = [state_below[j]
for j in self.layers_to_inputs[i]]
# This is to mimic the behavior of CompositeSpace's restrict
# method, which only returns a CompositeSpace when the number
# of components is greater than 1
if len(cur_state_below) == 1:
cur_state_below, = cur_state_below
else:
cur_state_below = state_below
# Get dropout params for relevant layers
relevant_keys_include = set(mlp_layer_names[i]) & set(input_include_probs)
relevant_keys_scale = set(mlp_layer_names[i]) & set(input_scales)
relevant_include = dict((k, input_include_probs[k]) for k in relevant_keys_include)
relevant_scale = dict((k, input_scales[k]) for k in relevant_keys_scale)
rvals.append(mlp.dropout_fprop(cur_state_below,
input_include_probs=relevant_include,
input_scales=relevant_scale,
*args, **kwargs))
return tuple(rvals)
class ConditionalDiscriminator(MLP):
def __init__(self, data_mlp, condition_mlp, joint_mlp,
input_data_space, input_condition_space, input_source=('features', 'condition'),
*args, **kwargs):
"""
A discriminator acting within a cGAN which may "condition" on
extra information.
Parameters
----------
data_mlp: pylearn2.models.mlp.MLP
MLP which processes the data-space information. Must output
a `VectorSpace` of some sort.
condition_mlp: pylearn2.models.mlp.MLP
MLP which processes the condition-space information. Must
output a `VectorSpace` of some sort.
joint_mlp: pylearn2.models.mlp.MLP
MLP which processes the combination of the outputs of the
data MLP and the condition MLP.
input_data_space : pylearn2.space.CompositeSpace
Space which contains the empirical / model-generated data
input_condition_space : pylearn2.space.CompositeSpace
Space which contains the extra data being conditioned on
kwargs : dict
Passed on to MLP superclass.
"""
# Make sure user isn't trying to override any fixed keys
for illegal_key in ['input_source', 'input_space', 'layers']:
assert illegal_key not in kwargs
# First feed forward in parallel along the data and condition
# MLPs; then feed the composite output to the joint MLP
layers = [
CompositeMLPLayer(layer_name='discriminator_composite',
layers=[data_mlp, condition_mlp],
inputs_to_layers={0: [0], 1: [1]}),
joint_mlp
]
super(ConditionalDiscriminator, self).__init__(
layers=layers,
input_space=CompositeSpace([input_data_space, input_condition_space]),
input_source=input_source,
*args, **kwargs)
@functools.wraps(MLP.dropout_fprop)
def dropout_fprop(self, state_below, default_input_include_prob=0.5,
input_include_probs=None, default_input_scale=2.,
input_scales=None, per_example=True):
"""Extended version of MLP#dropout_fprop which supports passing
on dropout parameters to nested MLPs within this MLP.
Coupled with `CompositeMLPLayer`, which is a core part of the
ConditionalDiscriminator setup.
"""
if input_include_probs is None:
input_include_probs = {}
if input_scales is None:
input_scales = {}
layer_name_set = set(input_include_probs.keys())
layer_name_set.update(input_scales.keys())
# Remove layers from the outer net
layer_name_set.difference_update(set(layer.layer_name for layer in self.layers))
# Make sure remaining layers are contained within sub-MLPs
# NOTE: Assumes composite layer is only at position zero
self.layers[0].validate_layer_names(list(input_include_probs.keys()))
self.layers[0].validate_layer_names(list(input_scales.keys()))
theano_rng = MRG_RandomStreams(max(self.rng.randint(2 ** 15), 1))
for layer in self.layers:
layer_name = layer.layer_name
if layer_name in input_include_probs:
include_prob = input_include_probs[layer_name]
else:
include_prob = default_input_include_prob
if layer_name in input_scales:
scale = input_scales[layer_name]
else:
scale = default_input_scale
# Forward propagate
if isinstance(layer, CompositeMLPLayer):
# This is a composite MLP layer -- forward on the
# dropout parameters
state_below = layer.dropout_fprop(state_below,
default_input_include_prob=default_input_include_prob,
input_include_probs=input_include_probs,
default_input_scale=default_input_scale,
input_scales=input_scales,
per_example=per_example)
else:
state_below = self.apply_dropout(
state=state_below,
include_prob=include_prob,
theano_rng=theano_rng,
scale=scale,
mask_value=layer.dropout_input_mask_value,
input_space=layer.get_input_space(),
per_example=per_example
)
state_below = layer.fprop(state_below)
return state_below
class ConditionalAdversaryCost(AdversaryCost2):
"""
Defines the cost expression for a cGAN.
"""
supervised = False
def __init__(self, condition_distribution, **kwargs):
self.condition_distribution = condition_distribution
super(ConditionalAdversaryCost, self).__init__(**kwargs)
def get_samples_and_objectives(self, model, data):
space, sources = self.get_data_specs(model)
space.validate(data)
assert isinstance(model, ConditionalAdversaryPair)
G, D = model.generator, model.discriminator
# X_data: empirical data to be sent to the discriminator. We'll
# make an equal amount of generated data and send this to the
# discriminator as well.
#
# X_condition: Conditional data for each empirical sample.
X_data, X_condition = data
m = X_data.shape[3]
# TODO get_batch_axis is wrong here.. probably a dataset issue?
# Expected discriminator output: 1 for real data, 0 for
# generated samples
y1 = T.alloc(1, m, 1)
y0 = T.alloc(0, m, 1)
# Generate conditional data for the generator
G_conditional_data = self.condition_distribution.sample(m)
S, z, _, other_layers = G.sample_and_noise(G_conditional_data,
default_input_include_prob=self.generator_default_input_include_prob,
default_input_scale=self.generator_default_input_scale,
all_g_layers=(self.infer_layer is not None))
if self.noise_both != 0.:
rng = MRG_RandomStreams(2014 / 6 + 2)
S = S + rng.normal(size=S.shape, dtype=S.dtype) * self.noise_both
X_data = X_data + rng.normal(size=X_data.shape, dtype=X_data.dtype) * self.noise_both
fprop_args = [self.discriminator_default_input_include_prob,
self.discriminator_input_include_probs,
self.discriminator_default_input_scale,
self.discriminator_input_scales]
# Run discriminator on empirical data (1 expected)
y_hat1 = D.dropout_fprop((X_data, X_condition), *fprop_args)
# Run discriminator on generated data (0 expected)
y_hat0 = D.dropout_fprop((S, G_conditional_data), *fprop_args)
# Compute discriminator objective
d_obj = 0.5 * (D.layers[-1].cost(y1, y_hat1) + D.layers[-1].cost(y0, y_hat0))
# Compute generator objective
if self.no_drop_in_d_for_g:
y_hat0_no_drop = D.dropout_fprop(S)
g_obj = D.layers[-1].cost(y1, y_hat0_no_drop)
else:
g_obj = D.layers[-1].cost(y1, y_hat0)
if self.blend_obj:
g_obj = (self.zurich_coeff * g_obj - self.minimax_coeff * d_obj) / (self.zurich_coeff + self.minimax_coeff)
if model.inferer is not None:
# Change this if we ever switch to using dropout in the
# construction of S.
S_nograd = block_gradient(S) # Redundant as long as we have custom get_gradients
pred = model.inferer.dropout_fprop(S_nograd, *fprop_args)
if self.infer_layer is None:
target = z
else:
target = other_layers[self.infer_layer]
i_obj = model.inferer.layers[-1].cost(target, pred)
else:
i_obj = 0
return S, d_obj, g_obj, i_obj
def get_monitoring_channels(self, model, data, **kwargs):
rval = OrderedDict()
space, sources = self.get_data_specs(model)
X_data, X_condition = data
m = X_data.shape[space.get_batch_axis()]
G, D = model.generator, model.discriminator
# Compute false negatives w/ empirical samples
y_hat = D.fprop((X_data, X_condition))
rval['false_negatives'] = T.cast((y_hat < 0.5).mean(), 'float32')
# Compute false positives w/ generated sample
G_conditional_data = self.condition_distribution.sample(m)
samples = G.sample(G_conditional_data)
y_hat = D.fprop((samples, G_conditional_data))
rval['false_positives'] = T.cast((y_hat > 0.5).mean(), 'float32')
# y = T.alloc(0., m, 1)
cost = D.cost_from_X(((samples, G_conditional_data), y_hat))
sample_grad = T.grad(-cost, samples)
rval['sample_grad_norm'] = T.sqrt(T.sqr(sample_grad).sum())
_S, d_obj, g_obj, i_obj = self.get_samples_and_objectives(model, data)
if model.monitor_inference and i_obj != 0:
rval['objective_i'] = i_obj
if model.monitor_discriminator:
rval['objective_d'] = d_obj
if model.monitor_generator:
rval['objective_g'] = g_obj
rval['now_train_generator'] = self.now_train_generator
return rval
|
1662994
|
import ai_flow as af
from ai_flow_plugins.job_plugins.bash import BashProcessor
# Initialize the project and workflow environment.
af.init_ai_flow_context()
# Define 2 bash jobs with simple commands.
with af.job_config('job_1'):
af.user_define_operation(processor=BashProcessor("echo job_1"))
with af.job_config('job_2'):
af.user_define_operation(processor=BashProcessor("echo job_2"))
# Define relations between 2 jobs, job_2 would started after job_1 finished.
af.action_on_job_status(job_name='job_2', upstream_job_name='job_1')
|
1662995
|
import json
import sys
def load_from_file():
try:
with open("plugin_options.sav") as fh:
str = fh.readline()
return json.loads(str)
except:
return []
def store_to_file(options):
with open("plugin_options.sav", "w") as fh:
fh.write(json.dumps(options))
def get_index_of_option_in_options(options, option_name):
i = 0
for option in options:
if option['name'] == option_name:
return i
i = i + 1
return -1
def make_generic_int_option(name, default_value, max_value, min_value):
option = {'name':name, 'type':'int', 'defaultValue':default_value, 'maxValue':max_value, 'minValue':min_value}
return option
def make_generic_double_option(name, default_value, max_value, min_value):
option = {'name':name, 'type':'double', 'defaultValue':default_value, 'maxValue':max_value, 'minValue':min_value}
return option
def make_generic_string_option(name, default_value, strings):
option = {'name':name, 'type':'string', 'defaultValue':default_value, 'strings':strings}
return option
def make_generic_bool_option(name, default_value):
option = {'name':name, 'type':'bool', 'defaultValue':default_value}
return option
def make_transtime(default_value, max_value=600, min_value=1):
option = make_generic_int_option('transTime', default_value, max_value, min_value)
return option
def make_loop(default_value):
option = make_generic_bool_option('loop', default_value)
return option
def make_lindirection(default_value, enabled_strings=['left', 'right', 'up', 'down']):
option = make_generic_string_option('linDirection', default_value, enabled_strings)
return option
def make_rotdirection(default_value):
strings = ['in', 'out']
option = make_generic_string_option('rotDirection', default_value, strings)
return option
def make_raddirection(default_value):
strings = ['cw', 'ccw']
option = make_generic_string_option('radDirection', default_value, strings)
return option
def make_delaytime(default_value, max_value=600, min_value=0):
option = make_generic_int_option('delayTime', default_value, max_value, min_value)
return option
def make_ncolorsperframe(default_value, max_value=50, min_value=0):
option = make_generic_int_option('nColorsPerFrame', default_value, max_value, min_value)
return option
def option_exists(options, name):
index = get_index_of_option_in_options(options, name)
if index >= 0:
return True
else:
return False
def add_option(options):
print("Please choose one from the list of the following plugin options:\n" + \
"1. TransTime\n" + \
"2. loop\n" + \
"3. linDirection\n" + \
"4. radDirection\n" + \
"5. rotDirection\n" + \
"6. delayTime\n" + \
"7. nColorsPerFrame")
while True:
try:
option_choice = input(">>>")
except:
print("please input a number corresponding to a plugin option")
continue
if option_choice > 7 or option_choice < 1:
continue
break
chosen_option = {}
if option_choice == 1:
print("please enter a default value for the 'transTime' option(int, 1-600): ")
default_value = input(">>>")
chosen_option = make_transtime(default_value)
elif option_choice == 2:
print("please enter a default value for the 'loop' option(boolean, please enter 1 for true, and 0. for false): ")
default_value = input(">>>")
chosen_option = make_loop(default_value)
elif option_choice == 3:
print("please enter a default value for the 'linDirection' option (choose one of the following: left, right, up, down): ")
default_value = raw_input(">>>")
chosen_option = make_lindirection(default_value)
elif option_choice == 4:
print("please enter a default value for the 'radDirection' option (choose one of the following: in, out): ")
default_value = raw_input(">>>")
chosen_option = make_raddirection(default_value)
elif option_choice == 5:
print("please enter a default value for the 'rotDirection' option (choose one of the following: cw, ccw): ")
default_value = raw_input(">>>")
chosen_option = make_rotdirection(default_value)
elif option_choice == 6:
print("please enter a default value for the 'delayTime' option (int, 0-600): ")
default_value = input(">>>")
chosen_option = make_delaytime(default_value)
elif option_choice == 7:
print("please enter a default value for the 'nColorsPerFrame' option (int): ")
default_value = input(">>>")
chosen_option = make_ncolorsperframe(default_value)
if not option_exists(options, chosen_option['name']):
options.append(chosen_option)
def remove_option(options):
print("please input name of option you wish to remove")
name = raw_input(">>>")
index = get_index_of_option_in_options(options, name)
if index < 0:
print("could not find option : " + name)
return
del options[index]
def load_from_header_file(file_path):
try:
with open(file_path) as fh:
str = None
for line in fh:
if "const char* pluginOptionsJsonString" in line:
str = line
break
if not str:
return []
str = str.replace(" ", "")
str = str.replace("constchar*pluginOptionsJsonString=\"", "")
str = str.replace("\\\"", "\"")
str = str.replace("\";", "")
str = str.replace("{\"options\":", "")
str = str.replace("}]}", "}]")
return json.loads(str)
except:
return []
def write_to_header_file(json_str_escaped_quotes, file_path):
with open(file_path, "w") as fh:
fh.write("#ifndef PLUGIN_OPTIONS_H\n#define PLUGIN_OPTIONS_H\n\n")
fh.write("const char* pluginOptionsJsonString = \"" + json_str_escaped_quotes + "\";\n\n")
fh.write("#ifdef __cplusplus\nextern \"C\" {\n#endif\n\n")
fh.write("const char* getPluginOptionsJsonString(){\n\treturn pluginOptionsJsonString;\n}\n\n")
fh.write("#ifdef __cplusplus\n}\n#endif\n\n#endif\n")
def write_options_to_header_file(options, file_path):
optionsJson = {'options':options}
json_str = json.dumps(optionsJson)
json_str_escaped_quotes = ""
for c in json_str:
if c == "\"":
json_str_escaped_quotes += "\\"
json_str_escaped_quotes += c
write_to_header_file(json_str_escaped_quotes, file_path)
def print_options(options):
if len(options) == 0:
return
print("##################################")
for option in options:
print(option['name'])
print("\tType: " + option['type'])
print("\tDefault Value: " + str(option['defaultValue']))
if option['type'] == "int" or option['type'] == "double":
print("\tMax Value: " + str(option['maxValue']))
print("\tMin Value: " + str(option['minValue']))
if option['type'] == "string":
print("\tStrings: " + str(option['strings']))
print("##################################")
if __name__ == "__main__":
header_file_path = ""
quit = False
options = []
if len(sys.argv) >= 2:
if sys.argv[1] == "--help":
print("Usage: python sdk_json_builder.py <absolute file path to top level plugin directory>")
exit(0)
file_path = sys.argv[1]
if file_path[-1] == "/":
header_file_path = file_path + "inc/PluginOptions.h"
else:
header_file_path = file_path + "/inc/PluginOptions.h"
else:
print("Usage: python sdk_json_builder.py <absolute file path to top level plugin directory>")
exit(0)
options = load_from_file()
print("************************************* PLUGIN OPTIONS JSON BUILDER *************************************\n")
while (not quit):
print("Choose one of the following:\n" + \
"1. Add option\n" + \
"2. Remove option\n" + \
"3. List Options\n" + \
"4. Write options to header file\n" + \
"5. Quit")
try:
choice = input(">>>")
except:
print("please input a number\n")
continue
if choice == 1:
add_option(options)
store_to_file(options)
elif choice == 2:
remove_option(options)
store_to_file(options)
elif choice == 3:
print_options(options)
elif choice == 4:
write_options_to_header_file(options, header_file_path)
elif choice == 5:
quit = True
else:
print("please input a valid number corresponding to a menu\n")
continue
|
1663008
|
from floyd.model.experiment_config import ExperimentConfig
def mock_exp(exp_id):
class Experiment:
id = exp_id
state = 'success'
name = 'test_name'
task_instances = []
return Experiment()
def mock_exp(exp_id):
class Experiment:
id = exp_id
state = 'success'
name = 'test_name'
task_instances = []
return Experiment()
def mock_task_inst(exp_id):
class TaskInstance:
module_id = '999999'
return TaskInstance()
def mock_experiment_config():
return ExperimentConfig(name="name", family_id="family_id")
def mock_data_config():
class DataConfig:
name = 'my_dataset'
namespace = None
return DataConfig()
def mock_access_token():
class AccessToken:
username = 'username'
token = 'token'
return AccessToken()
|
1663011
|
import torch
from torch.autograd import Function
from . import _roi_pooling as roi_pooling
import pdb
class RoIPoolFunction(Function):
pooled_width = 0
pooled_height = 0
spatial_scale = 0
def __init__(ctx, pooled_height, pooled_width, spatial_scale):
RoIPoolFunction.static_init(pooled_height, pooled_width, spatial_scale)
def static_init(pooled_height, pooled_width, spatial_scale):
RoIPoolFunction.pooled_width = pooled_width
RoIPoolFunction.pooled_height = pooled_height
RoIPoolFunction.spatial_scale = spatial_scale
@staticmethod
def forward(ctx, features, rois):
feature_size = features.size()
batch_size, num_channels, data_height, data_width = feature_size
num_rois = rois.size(0)
output = features.new(num_rois,
num_channels,
RoIPoolFunction.pooled_height,
RoIPoolFunction.pooled_width).zero_()
argmax = features.new(num_rois, num_channels,
RoIPoolFunction.pooled_height,
RoIPoolFunction.pooled_width).zero_().int()
ctx.save_for_backward(features, rois, argmax)
rois = rois
if not features.is_cuda:
_features = features.permute(0, 2, 3, 1)
roi_pooling.roi_pooling_forward(RoIPoolFunction.pooled_height,
RoIPoolFunction.pooled_width,
RoIPoolFunction.spatial_scale,
_features,
rois,
output)
else:
roi_pooling.roi_pooling_forward_cuda(RoIPoolFunction.pooled_height,
RoIPoolFunction.pooled_width,
RoIPoolFunction.spatial_scale,
features,
rois,
output,
argmax)
return output
@staticmethod
def backward(ctx, grad_output):
features, rois, argmax = ctx.saved_tensors
feature_size = features.size()
assert(feature_size is not None and grad_output.is_cuda)
batch_size, num_channels, data_height, data_width = feature_size
grad_input = grad_output.new(
batch_size, num_channels, data_height, data_width).zero_()
roi_pooling.roi_pooling_backward_cuda(RoIPoolFunction.pooled_height,
RoIPoolFunction.pooled_width,
RoIPoolFunction.spatial_scale,
grad_output,
rois,
grad_input,
argmax)
return grad_input, None
|
1663070
|
import torch.nn as nn
import torchvision
import torch, os
from skimage import morphology as morph
import numpy as np
from src.modules.eprop import eprop
import torch.utils.model_zoo as model_zoo
from scripts.SEAM.network import resnet38_SEAM, resnet38_aff
#----------- LC-FCN8
class FCN8VGG16(nn.Module):
def __init__(self, n_classes, with_attention=False, with_affinity=False,
with_affinity_average=False, shared=False, exp_dict=None):
super().__init__()
self.n_classes = n_classes
self.shared = shared
# PREDEFINE LAYERS
self.pool = nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)
self.relu = nn.ReLU(inplace=True)
# VGG16 PART
self.conv1_1 = conv3x3(3, 64, stride=1, padding=100)
self.conv1_2 = conv3x3(64, 64)
self.conv2_1 = conv3x3(64, 128)
self.conv2_2 = conv3x3(128, 128)
self.conv3_1 = conv3x3(128, 256)
self.conv3_2 = conv3x3(256, 256)
self.conv3_3 = conv3x3(256, 256)
self.conv4_1 = conv3x3(256, 512)
self.conv4_2 = conv3x3(512, 512)
self.conv4_3 = conv3x3(512, 512)
self.conv5_1 = conv3x3(512, 512)
self.conv5_2 = conv3x3(512, 512)
self.conv5_3 = conv3x3(512, 512)
self.fc6 = nn.Conv2d(512, 4096, kernel_size=7, stride=1, padding=0)
self.dropout_f6 = nn.Dropout()
self.fc7 = nn.Conv2d(4096, 4096, kernel_size=1, stride=1, padding=0)
self.dropout_f7 = nn.Dropout()
# SEMANTIC SEGMENTAION PART
self.scoring_layer = nn.Conv2d(4096, self.n_classes, kernel_size=1,
stride=1, padding=0)
self.upscore2 = nn.ConvTranspose2d(self.n_classes, self.n_classes,
kernel_size=4, stride=2, bias=False)
self.upscore_pool4 = nn.ConvTranspose2d(self.n_classes, self.n_classes,
kernel_size=4, stride=2, bias=False)
self.upscore8 = nn.ConvTranspose2d(self.n_classes, self.n_classes,
kernel_size=16, stride=8, bias=False)
# Initilize Weights
self.scoring_layer.weight.data.zero_()
self.scoring_layer.bias.data.zero_()
self.score_pool3 = nn.Conv2d(256, self.n_classes, kernel_size=1)
self.score_pool4 = nn.Conv2d(512, self.n_classes, kernel_size=1)
self.score_pool3.weight.data.zero_()
self.score_pool3.bias.data.zero_()
self.score_pool4.weight.data.zero_()
self.score_pool4.bias.data.zero_()
self.upscore2.weight.data.copy_(get_upsampling_weight(self.n_classes, self.n_classes, 4))
self.upscore_pool4.weight.data.copy_(get_upsampling_weight(self.n_classes, self.n_classes, 4))
self.upscore8.weight.data.copy_(get_upsampling_weight(self.n_classes, self.n_classes, 16))
self.eprop = eprop.EmbeddingPropagation()
# Pretrained layers
pth_url = 'https://download.pytorch.org/models/vgg16-397923af.pth' # download from model zoo
state_dict = model_zoo.load_url(pth_url)
layer_names = [layer_name for layer_name in state_dict]
counter = 0
for p in self.parameters():
if counter < 26: # conv1_1 to pool5
p.data = state_dict[ layer_names[counter] ]
elif counter == 26: # fc6 weight
p.data = state_dict[ layer_names[counter] ].view(4096, 512, 7, 7)
elif counter == 27: # fc6 bias
p.data = state_dict[ layer_names[counter] ]
elif counter == 28: # fc7 weight
p.data = state_dict[ layer_names[counter] ].view(4096, 4096, 1, 1)
elif counter == 29: # fc7 bias
p.data = state_dict[ layer_names[counter] ]
counter += 1
self.with_attention = with_attention
if with_attention:
self.att1 = Attention_block(self.n_classes,
self.n_classes,
self.n_classes).cuda()
self.att2 = Attention_block(self.n_classes,
self.n_classes,
self.n_classes).cuda()
self.with_affinity = with_affinity
if with_affinity or self.shared:
self.model_aff = resnet38_aff.Net(self.n_classes, exp_dict).cuda()
self.model_aff.load_state_dict(torch.load(os.path.join('/mnt/public/weights', 'resnet38_aff_SEAM.pth')), strict=False)
self.with_affinity_average = with_affinity_average
# siamese
# self.siamese_network = Siamese()
def forward(self, x, return_features=False, return_cam=False, crf=False):
n,c,h,w = x.size()
# VGG16 PART
conv1_1 = self.relu( self.conv1_1(x) )
conv1_2 = self.relu( self.conv1_2(conv1_1) )
pool1 = self.pool(conv1_2)
conv2_1 = self.relu( self.conv2_1(pool1) )
conv2_2 = self.relu( self.conv2_2(conv2_1) )
pool2 = self.pool(conv2_2)
# pool2 = self.eprop(pool2)
conv3_1 = self.relu( self.conv3_1(pool2) )
conv3_2 = self.relu( self.conv3_2(conv3_1) )
conv3_3 = self.relu( self.conv3_3(conv3_2) )
pool3 = self.pool(conv3_3)
conv4_1 = self.relu( self.conv4_1(pool3) )
conv4_2 = self.relu( self.conv4_2(conv4_1) )
conv4_3 = self.relu( self.conv4_3(conv4_2) )
pool4 = self.pool(conv4_3)
conv5_1 = self.relu( self.conv5_1(pool4) )
conv5_2 = self.relu( self.conv5_2(conv5_1) )
conv5_3 = self.relu( self.conv5_3(conv5_2) )
pool5 = self.pool(conv5_3)
fc6 = self.dropout_f6( self.relu( self.fc6(pool5) ) )
fc7 = self.dropout_f7( self.relu( self.fc7(fc6) ) )
# SEMANTIC SEGMENTATION PART
# first
scores = self.scoring_layer( fc7 )
upscore2 = self.upscore2(scores)
# second
score_pool4 = self.score_pool4(pool4)
score_pool4c = score_pool4[:, :, 5:5+upscore2.size(2),
5:5+upscore2.size(3)]
if self.with_attention:
score_pool4c = self.att1(g=upscore2, x=score_pool4c)
upscore_pool4 = self.upscore_pool4(score_pool4c + upscore2)
# third
score_pool3 = self.score_pool3(pool3)
score_pool3c = score_pool3[:, :, 9:9+upscore_pool4.size(2),
9:9+upscore_pool4.size(3)]
if self.with_attention:
score_pool3c = self.att2(g=upscore_pool4, x=score_pool3c)
output = self.upscore8(score_pool3c + upscore_pool4)
logits = output[:, :, 31: (31 + h), 31: (31 + w)].contiguous()
if self.shared:
logits = cam = self.model_aff.output_logits(x)
if self.with_affinity:
logits_aff = self.model_aff.apply_affinity(x, logits, crf=crf)
if self.with_affinity_average:
logits = (logits_aff + logits) / 2.
else:
logits = logits_aff
if return_features:
return logits, upscore_pool4, fc7
if return_cam:
return cam, logits_aff
return logits
# ===========================================================
# helpers
def get_upsampling_weight(in_channels, out_channels, kernel_size):
"""Make a 2D bilinear kernel suitable for upsampling"""
factor = (kernel_size + 1) // 2
if kernel_size % 2 == 1:
center = factor - 1
else:
center = factor - 0.5
og = np.ogrid[:kernel_size, :kernel_size]
filt = (1 - abs(og[0] - center) / factor) * \
(1 - abs(og[1] - center) / factor)
weight = np.zeros((in_channels, out_channels, kernel_size, kernel_size),
dtype=np.float64)
weight[range(in_channels), range(out_channels), :, :] = filt
return torch.from_numpy(weight).float()
def conv3x3(in_planes, out_planes, stride=1, padding=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=(3,3), stride=(stride,stride),
padding=(padding,padding))
def conv1x1(in_planes, out_planes, stride=1):
"1x1 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,
padding=0)
class Attention_block(nn.Module):
def __init__(self,F_g,F_l,F_int):
super(Attention_block,self).__init__()
self.W_g = nn.Sequential(
nn.Conv2d(F_g, F_int, kernel_size=1,stride=1,padding=0,bias=True),
# nn.BatchNorm2d(F_int)
)
self.W_x = nn.Sequential(
nn.Conv2d(F_l, F_int, kernel_size=1,stride=1,padding=0,bias=True),
# nn.BatchNorm2d(F_int)
)
self.psi = nn.Sequential(
nn.Conv2d(F_int, 1, kernel_size=1,stride=1,padding=0,bias=True),
# nn.BatchNorm2d(1),
nn.Sigmoid()
)
self.relu = nn.ReLU(inplace=True)
def forward(self,g,x):
g1 = self.W_g(g)
x1 = self.W_x(x)
psi = self.relu(g1+x1)
psi = self.psi(psi)
return x*psi
import torch
import torch.nn as nn
import torch.nn.functional as F
class Siamese(nn.Module):
def __init__(self):
super(Siamese, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(2, 64, 10), # 64@96*96
nn.ReLU(inplace=True),
nn.MaxPool2d(2), # 64@48*48
nn.Conv2d(64, 128, 7),
nn.ReLU(), # 128@42*42
nn.MaxPool2d(2), # 128@21*21
nn.Conv2d(128, 128, 4),
nn.ReLU(), # 128@18*18
nn.MaxPool2d(2), # 128@9*9
nn.Conv2d(128, 256, 4),
nn.ReLU(), # 256@6*6
)
self.liner = nn.Sequential(nn.Linear(4096, 4096), nn.Sigmoid())
self.out = nn.Linear(4096, 1)
def forward_one(self, x):
x = self.conv(x)
x = x.view(x.size()[0], -1)
x = self.liner(x)
return x
def forward(self, x1, x2):
out1 = self.forward_one(x1)
out2 = self.forward_one(x2)
dis = torch.abs(out1 - out2)
out = self.out(dis)
# return self.sigmoid(out)
return out
|
1663109
|
import pytest
from django.urls import reverse
@pytest.mark.django_db
def test_xliff_more_buttons(admin_client, page):
resp = admin_client.get(reverse("wagtailadmin_explore_root"))
# assert the last more button is download xliff
assert set(["Download XLIFF", "Upload XLIFF"]) <= set(
[button.label for button in resp.context[63].get("buttons")]
)
|
1663119
|
import struct
import sys
import io
import wave
import flac
from pathlib import Path
from bitstring import Bits
MAGIC = Bits('0xbe0498c88')
def twos_complement(n, bits):
mask = 2 ** (bits - 1)
return -(n & mask) + (n & ~mask)
def iter_i24_as_i32(data):
for l, h in struct.iter_unpack('<BH', data):
yield twos_complement(h << 8 | l, 24) << 8
def iter_i16_as_i32(data):
for x, in struct.iter_unpack('<h', data):
yield x << 16
def peek(f, n):
o = f.tell()
r = f.read(n)
f.seek(o)
return r
def main(path):
with open(str(path), 'rb') as f:
magic = peek(f, 4)
if magic == b'fLaC':
with flac.BitInputStream(f) as bf:
f = io.BytesIO()
flac.decode_file(bf, f, seconds=1)
f.seek(0)
with wave.open(f) as wf:
nchannels, sampwidth, framerate, *_ = wf.getparams()
if nchannels != 2:
raise ValueError('Input must be stereo')
if sampwidth == 3:
iter_data = iter_i24_as_i32
elif sampwidth == 2:
iter_data = iter_i16_as_i32
else:
raise ValueError('Input must be 16- or 24-bit')
sound_data = wf.readframes(framerate)
samples = list(iter_data(sound_data))
streams = (Bits((x ^ y) >> p & 1
for x, y in zip(samples[::2], samples[1::2]))
for p in range(16, 24))
if any(s.find(MAGIC) for s in streams):
print('\x1b[1;31m MQA syncword present. [{}] \x1b[0m'.format(str(path)))
else:
print('\x1b[1;32m Didn\'t find an MQA syncword. [{}] \x1b[0m'.format(path.parts[-1]))
if __name__ == '__main__':
args = sys.argv[1:]
flacpaths = []
for path in args:
path = Path(path)
if Path.is_dir(path):
flacpaths += sorted(Path(path).glob('**/*.flac'))
elif str(path).endswith('.flac') and path.is_file():
flacpaths.append(path)
print('\x1b[1;33m Found {} flac file(s). Decoding now... \x1b[0m'.format(len(flacpaths)))
for fpath in flacpaths:
try:
main(fpath)
except Exception as ex:
print(ex)
|
1663142
|
import tensorflow as tf
import sys
import os
import tf_util
import multi_model as resnet_model
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(BASE_DIR, '../utils'))
def placeholder_inputs(batch_size, num_point, resolution, views, devices):
pointclouds_pl = tf.placeholder(tf.float32, shape=(batch_size * views * len(devices), resolution, resolution, 1))
labels_pl = tf.placeholder(tf.int32, shape=batch_size * len(devices))
return pointclouds_pl, labels_pl
def _get_block_sizes(resnet_size):
"""Retrieve the size of each block_layer in the ResNet model.
The number of block layers used for the Resnet model varies according
to the size of the model. This helper grabs the layer set we want, throwing
an error if a non-standard size has been selected.
Args:
resnet_size: The number of convolutional layers needed in the model.
Returns:
A list of block sizes to use in building the model.
Raises:
KeyError: if invalid resnet_size is received.
"""
choices = {
9: [2, 2],
18: [2, 2, 2, 2],
34: [3, 4, 6, 3],
50: [3, 4, 6, 3],
101: [3, 4, 23, 3],
152: [3, 8, 36, 3],
200: [3, 24, 36, 3]
}
try:
return choices[resnet_size]
except KeyError:
err = (
'Could not find layers for selected Resnet size.\n'
'Size received: {}; sizes allowed: {}.'.format(resnet_size, choices.keys())
)
raise ValueError(err)
def get_model(images, batch, views, is_training, bn_decay, num_classes=15, bn=True, resnet_size=18, kernel_size=7,
conv_stride=2, first_pool_size=3, first_pool_stride=2):
""" Classification Resnet, input is (BxV)XRXRX1, output Bx40 """
resnet_version = 2
data_format = None
dtype = resnet_model.DEFAULT_DTYPE
print(resnet_size)
block_strides = [1, 2] if resnet_size == 9 else [1, 2, 2, 2]
model = resnet_model.Model(
resnet_size=resnet_size,
bottleneck=False,
num_classes=num_classes,
num_filters=16,
kernel_size=kernel_size,
conv_stride=conv_stride,
first_pool_size=first_pool_size,
first_pool_stride=first_pool_stride,
block_sizes=_get_block_sizes(resnet_size),
block_strides=block_strides,
bn_decay=bn_decay,
resnet_version=resnet_version,
data_format=data_format,
dtype=dtype,
bn=bn
)
print(f"kernel_size: {kernel_size}")
print(f"conv_stride: {conv_stride}")
print(f"first_pool_size: {first_pool_size}")
print(f"first_pool_stride: {first_pool_stride}")
features = model(images, training=is_training) # (BXV)XF
with tf.compat.v1.variable_scope("extract", reuse=tf.compat.v1.AUTO_REUSE):
if views != 1:
out = tf.reshape(features, [batch * views, -1, 1, 1])
out = tf_util.batch_norm_for_conv2d(out, is_training, bn_decay, scope="bn1")
out = tf.reshape(out, [batch, views, -1])
out = tf_util.dropout(out, is_training, scope="dp1")
out = tf.reshape(out, [batch, -1])
out = tf_util.fully_connected(out, 128, scope="fc1", is_training=is_training, bn=bn, bn_decay=bn_decay)
out = tf_util.dropout(out, is_training, scope="dp2")
else:
out = features
out = tf_util.fully_connected(out, num_classes, activation_fn=None, scope='fc3') # BXnum_classes
end_points = {}
return out, end_points
def get_loss(pred, label, weight_decay, end_points, loss_filter_fn=None, num_classes=15):
""" pred: B*NUM_CLASSES,
label: B, """
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=pred, labels=label)
classify_loss = tf.reduce_mean(loss)
# If no loss_filter_fn is passed, assume we want the default behavior,
# which is that batch_normalization variables are excluded from loss.
def exclude_batch_norm(v):
tf.summary.scalar(v.name, tf.nn.l2_loss(tf.cast(v, tf.float32)))
return 'batch_normalization' not in v.name
loss_filter_fn = loss_filter_fn or exclude_batch_norm
print(loss_filter_fn)
# Add weight decay to the loss.
l2_loss = weight_decay * tf.add_n(
# loss is computed using fp32 for numerical stability.
[
tf.nn.l2_loss(tf.cast(v, tf.float32))
for v in tf.compat.v1.trainable_variables()
if loss_filter_fn(v)
])
tf.summary.scalar('l2_loss', l2_loss)
loss = classify_loss + l2_loss
return loss
def parameters():
total_parameters = 0
for variable in tf.trainable_variables():
# shape is an array of tf.Dimension
shape = variable.get_shape()
print(shape)
print(len(shape))
variable_parameters = 1
for dim in shape:
print(dim)
variable_parameters *= dim.value
print(variable_parameters)
total_parameters += variable_parameters
print(total_parameters)
return total_parameters
if __name__ == '__main__':
with tf.Graph().as_default():
inputs = tf.zeros((32, 1024, 3))
outputs = get_model(inputs, tf.constant(True))
print(outputs)
|
1663187
|
from pygears.lib import sdp, check, drv, delay
from pygears.typing import Uint, Tuple
wr_addr_data = drv(t=Tuple[Uint[2], Uint[3]],
seq=[(0, 0), (1, 2), (2, 4), (3, 6)])
rd_addr = drv(t=Uint[2], seq=[0, 1, 2, 3]) | delay(1)
rd_addr \
| sdp(wr_addr_data) \
| check(ref=[0, 2, 4, 6])
|
1663217
|
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.decomposition import PCA
from sklearn.pipeline import Pipeline
from sklearn.base import BaseEstimator
import numpy as np
class Regressor(BaseEstimator):
def __init__(self):
self.n_components = 10
self.n_estimators = 40
self.learning_rate = 0.2
self.list_molecule = ['A', 'B', 'Q', 'R']
self.dict_reg = {}
for mol in self.list_molecule:
self.dict_reg[mol] = Pipeline([
('pca', PCA(n_components=self.n_components)),
('reg', GradientBoostingRegressor(
n_estimators=self.n_estimators,
learning_rate=self.learning_rate,
random_state=42))
])
def fit(self, X, y):
for i, mol in enumerate(self.list_molecule):
ind_mol = np.where(np.argmax(X[:, -4:], axis=1) == i)[0]
X_mol = X[ind_mol]
y_mol = y[ind_mol]
self.dict_reg[mol].fit(X_mol, np.log(y_mol))
def predict(self, X):
y_pred = np.zeros(X.shape[0])
for i, mol in enumerate(self.list_molecule):
ind_mol = np.where(np.argmax(X[:, -4:], axis=1) == i)[0]
X_mol = X[ind_mol]
y_pred[ind_mol] = np.exp(self.dict_reg[mol].predict(X_mol))
return y_pred
|
1663218
|
from pathlib import Path
import pandas as pd
from matplotlib import pyplot as plt
import seaborn as sns
import argparse
'''
How to run
python assemble_timer_output.py -b folder_before -a folder_after -d folder_output -o file_name_prefix
'''
plt.rcParams['figure.figsize'] = [10, 6]
plt.rcParams.update({'font.size': 18, 'figure.dpi': 150})
sns.set(rc={"lines.linewidth": 0.7})
# https://github.com/mwaskom/seaborn/issues/915
def fixed_boxplot(x, y, *args, label=None, **kwargs):
sns.boxplot(x=x, y=y, *args, **kwargs, labels=[label])
def plot_micro_timing_min_cag_distributions(df_mcmc_and_kde, measurement='Wall Clock Time (ns)', hue='Sample Type',
line=True, separate=True, summary=False, file_name_prefix=''):
min_cag = df_mcmc_and_kde['Edges'] == (df_mcmc_and_kde['Nodes'] - 1)
df_min_cag = df_mcmc_and_kde[min_cag]
df_min_cag.to_csv('min_cag.csv', index=False)
if line:
sns.lineplot(data=df_min_cag, x='Nodes', y=measurement, hue=hue, marker='o', linewidth=2)
if summary:
plt.title('Percentage Speedup for a Single MCMC Iteration (# Edges = # Nodes - 1)', size=16)
else:
plt.title('Timing for a Single MCMC Iteration (# Edges = # Nodes - 1)', size=16)
plt.tight_layout()
else:
if separate:
g = sns.FacetGrid(df_min_cag, col=hue, row='Nodes', hue=hue, sharex='col', margin_titles=True)
else:
g = sns.FacetGrid(df_min_cag, row='Nodes', hue=hue, sharex='col', margin_titles=True)
# g = sns.FacetGrid(df_min_cag, row='Nodes', hue='Sample Type', sharex=False, sharey=False, margin_titles=True)
g.map(sns.histplot, measurement)
# g.map(fixed_boxplot, 'Sample Type', measurement)
g.fig.set_figwidth(24)
g.fig.set_figheight(11)
g.set_titles(col_template='{col_name}', row_template='{row_name} Nodes')
g.fig.suptitle('Micro Timing for Minimum Size CAGs', size=16)
g.fig.subplots_adjust(top=.9)
# Iterate thorugh each axis
for ax in g.axes.flat:
ax.set_ylabel('Number of Samples')
g.add_legend()
if file_name_prefix:
plt.savefig(file_name_prefix)
else:
plt.show()
plt.close()
def plot_micro_timing_distributions(df_mcmc_and_kde, measurement='Wall Clock Time (ns)', separate=True):
df_nodes = df_mcmc_and_kde.groupby(by=['Nodes'], as_index=False)
plot_no = 1
for nodes, df_node in df_nodes:
if separate:
g = sns.FacetGrid(df_node, col='Sample Type', row='Edges', hue='Sample Type', sharex='col', sharey='row', margin_titles=True)
file_name_modifier = 'sep'
else:
g = sns.FacetGrid(df_node, row='Edges', hue='Sample Type', sharex='col', sharey='row', margin_titles=True)
file_name_modifier = 'comb'
g.map(sns.histplot, measurement)
g.fig.set_figwidth(24)
g.fig.set_figheight(11)
g.set_titles(col_template='{col_name}', row_template='{row_name} Edges')
g.fig.suptitle(f'Micro Timing for CAGs with {nodes} Nodes', size=16)
g.fig.subplots_adjust(top=.9)
# Iterate thorugh each axis
for ax in g.axes.flat:
ax.set_ylabel('Number of Samples')
g.add_legend()
# plt.tight_layout()
# plt.savefig(f'{out_dir}{plot_no}_{file_name_modifier}_{nodes}.png')
plot_no += 1
plt.show()
plt.close()
def plot_micro_timing_summery_per_cag_size(df_mcmc_and_kde, measurement='Wall Clock Time (ns)', separate=True,
title_specifier='', y_label='', file_name_prefix='', timing_type=''):
def edges_to_label(row):
if row['Edges'] == row['Nodes'] - 1:
return '$Nodes - 1$'
elif row['Edges'] == int((row['Nodes'] - 1) * 5 / 4):
return '$\\frac{5(Nodes - 1)}{4}$'
elif row['Edges'] == int((row['Nodes'] - 1) * 6 / 4):
return '$\\frac{6(Nodes - 1)}{4}$'
elif row['Edges'] == int((row['Nodes'] - 1) * 7 / 4):
return '$\\frac{7(Nodes - 1)}{4}$'
elif row['Edges'] == (row['Nodes'] - 1) * 2:
return '$2(Nodes - 1)$'
order = ['$Nodes - 1$', '$\\frac{5(Nodes - 1)}{4}$', '$\\frac{6(Nodes - 1)}{4}$',
'$\\frac{7(Nodes - 1)}{4}$', '$2(Nodes - 1)$']
if separate:
df_nodes = df_mcmc_and_kde.groupby(by=['Nodes'], as_index=False)
for nodes, df_node in df_nodes:
sns.lineplot(data=df_node, x='Edges', y=measurement, hue='Sample Type', marker='o', linewidth=2)
plt.title(f'Variation of the {title_specifier} for a Single MCMC Iteration\nwith the Number of Edges for {nodes} Nodes', size=16)
if file_name_prefix:
plt.savefig(f'{file_name_prefix}with_num_edges_for_{nodes}_nodes.png')
else:
plt.show()
plt.close()
else:
df_mcmc_and_kde = df_mcmc_and_kde.copy()
df_mcmc_and_kde['x label'] = df_mcmc_and_kde.apply(edges_to_label, axis=1)
# set categorical order
df_mcmc_and_kde['x label'] = pd.Categorical(df_mcmc_and_kde['x label'], categories=order, ordered=True)
df_mcmc_and_kde['Nodes'] = df_mcmc_and_kde['Nodes'].apply(lambda nodes: str(nodes))
sns.lineplot(data=df_mcmc_and_kde[((df_mcmc_and_kde['Sample Type'] == timing_type) & (df_mcmc_and_kde[measurement] >= 0))],
x='x label', y=measurement, hue='Nodes', linewidth=2)
plt.xlabel('Edges (as a function of Nodes)')
plt.ylabel(y_label)
plt.title(f'Variation of the {title_specifier} for a Single MCMC Iteration\nwith the Number of Edges', size=16)
if file_name_prefix:
plt.savefig(f'{file_name_prefix}with_num_edges.png')
else:
plt.show()
plt.close()
def plot_prediction_timing_min_cag(df_prediction, measurement='Wall Clock Time (ns)', line=False, separate=True):
min_cag = df_prediction['Edges'] == (df_prediction['Nodes'] - 1)
df_min_cag = df_prediction[min_cag]
if line:
sns.lineplot(data=df_min_cag, x='Nodes', y=measurement, marker='o', linewidth=2)
plt.title('Prediction Timing for Minimum Size CAGs', size=16)
plt.tight_layout()
else:
if separate:
g = sns.FacetGrid(df_min_cag, row='Nodes', margin_titles=True)
else:
g = sns.FacetGrid(df_min_cag, row='Nodes', hue='Sample Type', sharex='col', margin_titles=True)
g.map(sns.histplot, measurement)
g.fig.set_figwidth(24)
g.fig.set_figheight(11)
g.set_titles(col_template='{col_name}', row_template='{row_name} Nodes')
g.fig.suptitle('Micro Timing for Minimum Size CAGs', size=16)
g.fig.subplots_adjust(top=.9)
# Iterate thorugh each axis
for ax in g.axes.flat:
ax.set_ylabel('Number of Samples')
g.add_legend()
plt.show()
plt.close()
def plot_prediction_timing_distributions(df_prediction, measurement='Wall Clock Time (ns)', separate=True):
df_prediction = df_prediction.groupby(by=['Nodes'], as_index=False)
for nodes, df_node in df_prediction:
if separate:
g = sns.FacetGrid(df_node, col='Nodes', row='Edges', hue='Nodes', sharex='col', margin_titles=True)
else:
g = sns.FacetGrid(df_node, row='Edges', hue='Nodes', sharex='col', margin_titles=True)
g.map(sns.histplot, measurement)
g.fig.set_figwidth(24)
g.fig.set_figheight(11)
g.set_titles(row_template='{row_name} Edges')
g.fig.suptitle(f'Prediction Timing Distributions for CAGs with {nodes} Nodes', size=16)
g.fig.subplots_adjust(top=.9)
# Iterate thorugh each axis
for ax in g.axes.flat:
ax.set_ylabel('Number of Samples')
g.add_legend()
# plt.tight_layout()
plt.show()
plt.close()
def analyze_micro_timing_data(df, mcmc_timing=False):
# df_summerry = df.groupby(by=['Nodes', 'Edges', 'Sample Type'], as_index=False).agg(['mean', 'median', 'std'])
# print(df_summerry)
# return
df_node_edge = df.groupby(by=['Nodes'], as_index=False)
for ne, df_ne in df_node_edge:
# print(ne)
# print(df_ne.columns)
# fig, ax = plt.subplots(dpi=250, figsize=(24, 6.75))
g = sns.FacetGrid(df_ne, col='Sample Type', row='Edges', sharex='col', margin_titles=True)
g.map(sns.histplot, 'Time Wall')
plt.show()
plt.close()
continue
if mcmc_timing:
df_sample_type = df_ne.groupby(by=['Sample Type'], as_index=False)
for st, df_st in df_sample_type:
min_cag = df_st['Edges'] == (df_st['Nodes'] - 1)
df_min_cag = df_st[min_cag]
# print(st)
# print(df_st.columns)
# continue
# sns.lineplot(data=df_min_cag, x='Nodes', y='MCMC Wall', marker='o', linewidth=2)
sns.histplot(df_min_cag, x='MCMC Wall', element='step',
color=(0.9375, 0.5, 0.5), stat='probability')
title = 'Sampling $\\theta$ ' if st == 1 else 'Sampling derivative '
plt.title(title + f'{ne}')
plt.tight_layout()
# plt.savefig(f'{out_dir}{plot_no}_{title} - line.png')
plt.show()
plt.close()
def assemble_micro_timing_output_files_into_df(folder, file_name_filter, ns_to_ms=True):
csv_files = Path(folder).glob(f'*{file_name_filter}*.csv')
df = pd.concat(map(pd.read_csv, csv_files), ignore_index=True)
df.drop(['Run', 'KDE Kernels'], axis=1, inplace=True, errors='ignore')
if ns_to_ms:
df['CPU Time (ns)'] = df['CPU Time (ns)'].apply(lambda ns: ns / 1000000.0)
df['Wall Clock Time (ns)'] = df['Wall Clock Time (ns)'].apply(lambda ns: ns / 1000000.0)
df.rename(columns={'Wall Clock Time (ns)': 'Wall Clock Time (ms)', 'CPU Time (ns)': 'CPU Time (ms)'},
inplace=True)
return df
def combine_before_and_after_dfs(df_bf, df_af):
def add_percentage_speedup_columns(df, timing_type, col_before, col_after):
df[f'{timing_type} Diff (ms)'] = df.apply(lambda row: row[col_before] - row[col_after], axis=1)
df[f'% Speedup ({timing_type})'] = df\
.apply(lambda row: row[f'{timing_type} Diff (ms)'] * 100 / row[col_before], axis=1)
df[f'Fold Speedup ({timing_type}) - $f$'] = df.apply(lambda row: row[col_before] / row[col_after], axis=1)
df_summary_bf = df_bf.groupby(by=['Nodes', 'Edges', 'Sample Type'], as_index=False)\
.agg(wall_before_mean=('Wall Clock Time (ms)', 'mean'),
wall_before_median=('Wall Clock Time (ms)', 'median'),
wall_before_std=('Wall Clock Time (ms)', 'std'),
wall_before_count=('Wall Clock Time (ms)', 'count'),
cpu_before_mean=('CPU Time (ms)', 'mean'),
cpu_before_median=('CPU Time (ms)', 'median'),
cpu_before_std=('CPU Time (ms)', 'std'),
cpu_before_count=('CPU Time (ms)', 'count')
).round(2)
df_summary_af = df_af.groupby(by=['Nodes', 'Edges', 'Sample Type'], as_index=False)\
.agg(wall_after_mean=('Wall Clock Time (ms)', 'mean'),
wall_after_median=('Wall Clock Time (ms)', 'median'),
wall_after_std=('Wall Clock Time (ms)', 'std'),
wall_after_count=('Wall Clock Time (ms)', 'count'),
cpu_after_mean=('CPU Time (ms)', 'mean'),
cpu_after_median=('CPU Time (ms)', 'median'),
cpu_after_std=('CPU Time (ms)', 'std'),
cpu_after_count=('CPU Time (ms)', 'count')
).round(2)
df_both = pd.merge(left=df_summary_bf, right=df_summary_af, on=['Nodes', 'Edges', 'Sample Type'])
df_theta = df_both[df_both['Sample Type'] == 1].copy()
df_deri = df_both[df_both['Sample Type'] == 0].copy()
df_theta.rename(columns={'wall_before_mean': 'theta_wall_before_mean',
'wall_before_median': 'theta_wall_before_median',
'wall_before_std': 'theta_wall_before_std',
'wall_before_count': 'theta_wall_before_count',
'cpu_before_mean': 'theta_cpu_before_mean',
'cpu_before_median': 'theta_cpu_before_median',
'cpu_before_std': 'theta_cpu_before_std',
'cpu_before_count': 'theta_cpu_before_count',
'wall_after_mean': 'theta_wall_after_mean',
'wall_after_median': 'theta_wall_after_median',
'wall_after_std': 'theta_wall_after_std',
'wall_after_count': 'theta_wall_after_count',
'cpu_after_mean': 'theta_cpu_after_mean',
'cpu_after_median': 'theta_cpu_after_median',
'cpu_after_std': 'theta_cpu_after_std',
'cpu_after_count': 'theta_cpu_after_count',
}, inplace=True)
df_deri.rename(columns={'wall_before_mean': 'deri_wall_before_mean',
'wall_before_median': 'deri_wall_before_median',
'wall_before_std': 'deri_wall_before_std',
'wall_before_count': 'deri_wall_before_count',
'cpu_before_mean': 'deri_cpu_before_mean',
'cpu_before_median': 'deri_cpu_before_median',
'cpu_before_std': 'deri_cpu_before_std',
'cpu_before_count': 'deri_cpu_before_count',
'wall_after_mean': 'deri_wall_after_mean',
'wall_after_median': 'deri_wall_after_median',
'wall_after_std': 'deri_wall_after_std',
'wall_after_count': 'deri_wall_after_count',
'cpu_after_mean': 'deri_cpu_after_mean',
'cpu_after_median': 'deri_cpu_after_median',
'cpu_after_std': 'deri_cpu_after_std',
'cpu_after_count': 'deri_cpu_after_count',
}, inplace=True)
df_theta.drop(['Sample Type'], axis=1, inplace=True)
df_deri.drop(['Sample Type'], axis=1, inplace=True)
df_average = pd.merge(left=df_theta, right=df_deri, on=['Nodes', 'Edges'])
df_average['Average of Mean Wall Times (ms) - Before'] = df_average \
.apply(lambda row: (row['theta_wall_before_mean'] + row['deri_wall_before_mean']) / 2, axis=1)
df_average['Average of Median Wall Times (ms) - Before'] = df_average \
.apply(lambda row: (row['theta_wall_before_median'] + row['deri_wall_before_median']) / 2, axis=1)
df_average['Average of Mean CPU Times (ms) - Before'] = df_average \
.apply(lambda row: (row['theta_cpu_before_mean'] + row['deri_cpu_before_mean']) / 2, axis=1)
df_average['Average of Median CPU Times (ms) - Before'] = df_average \
.apply(lambda row: (row['theta_cpu_before_median'] + row['deri_cpu_before_median']) / 2, axis=1)
df_average['Average of Mean Wall Times (ms) - After'] = df_average \
.apply(lambda row: (row['theta_wall_after_mean'] + row['deri_wall_after_mean']) / 2, axis=1)
df_average['Average of Median Wall Times (ms) - After'] = df_average \
.apply(lambda row: (row['theta_wall_after_median'] + row['deri_wall_after_median']) / 2, axis=1)
df_average['Average of Mean CPU Times (ms) - After'] = df_average \
.apply(lambda row: (row['theta_cpu_after_mean'] + row['deri_cpu_after_mean']) / 2, axis=1)
df_average['Average of Median CPU Times (ms) - After'] = df_average \
.apply(lambda row: (row['theta_cpu_after_median'] + row['deri_cpu_after_median']) / 2, axis=1)
add_percentage_speedup_columns(df_average, 'Average of Mean Wall Times',
'Average of Mean Wall Times (ms) - Before',
'Average of Mean Wall Times (ms) - After')
add_percentage_speedup_columns(df_average, 'Average of Median Wall Times',
'Average of Median Wall Times (ms) - Before',
'Average of Median Wall Times (ms) - After')
add_percentage_speedup_columns(df_average, 'Average of Mean CPU Times',
'Average of Mean CPU Times (ms) - Before',
'Average of Mean CPU Times (ms) - After')
add_percentage_speedup_columns(df_average, 'Average of Median CPU Times',
'Average of Median CPU Times (ms) - Before',
'Average of Median CPU Times (ms) - After')
value_vars = ['Average of Mean Wall Times (ms) - Before', 'Average of Mean Wall Times (ms) - After',
'Average of Median Wall Times (ms) - Before', 'Average of Median Wall Times (ms) - After',
'Average of Mean CPU Times (ms) - Before', 'Average of Mean CPU Times (ms) - After',
'Average of Median CPU Times (ms) - Before', 'Average of Median CPU Times (ms) - After',
'Average of Mean CPU Times Diff (ms)', '% Speedup (Average of Mean CPU Times)',
'Average of Median CPU Times Diff (ms)', '% Speedup (Average of Median CPU Times)',
'Average of Mean Wall Times Diff (ms)', '% Speedup (Average of Mean Wall Times)',
'Average of Median Wall Times Diff (ms)', '% Speedup (Average of Median Wall Times)',
'Fold Speedup (Average of Mean CPU Times) - $f$', 'Fold Speedup (Average of Median CPU Times) - $f$',
'Fold Speedup (Average of Mean Wall Times) - $f$', 'Fold Speedup (Average of Median Wall Times) - $f$']
df_average = pd.melt(df_average, id_vars=['Nodes', 'Edges'],
value_vars=value_vars, value_name='Average Timing', var_name='Timing Type')
add_percentage_speedup_columns(df_both, 'Mean CPU Time', 'cpu_before_mean', 'cpu_after_mean')
add_percentage_speedup_columns(df_both, 'Median CPU Time', 'cpu_before_median', 'cpu_after_median')
add_percentage_speedup_columns(df_both, 'Mean Wall Time', 'wall_before_mean', 'wall_after_mean')
add_percentage_speedup_columns(df_both, 'Median Wall Time', 'wall_before_median', 'wall_after_median')
return df_both, df_average
def plot_micro_timing_min_cag_averages(df_average, timing_type, speedup=True, fold=False, save=False, file_name_prefix=''):
min_cag = df_average['Edges'] == (df_average['Nodes'] - 1)
df_min_cag = df_average[min_cag]
if speedup:
if fold:
filter = df_min_cag['Timing Type'] == f'Fold Speedup ({timing_type}) - $f$'
title = 'Average Fold Speedup for a Single MCMC Iteration (# Edges = # Nodes - 1)\n' \
'Optimized version is $f$-fold faster than earlier'
y_label = 'Fold-Speedup'
file_name = f'{file_name_prefix}avg_fold_speedup.png'
else:
filter = df_min_cag['Timing Type'] == f'% Speedup ({timing_type})'
title = 'Average Percentage Speedup for a Single MCMC Iteration (# Edges = # Nodes - 1)'
y_label = 'Average Percentage Speedup'
file_name = f'{file_name_prefix}avg_percent_speedup.png'
else:
filter = (df_min_cag['Timing Type'] == f'{timing_type} (ms) - Before') | \
(df_min_cag['Timing Type'] == f'{timing_type} (ms) - After')
title = 'Average Timing for a Single MCMC Iteration (# Edges = # Nodes - 1)'
y_label = 'Average Timing (ms)'
file_name = f'{file_name_prefix}avg_timing_before_after.png'
df_min_cag = df_min_cag[filter]
sns.lineplot(data=df_min_cag, x='Nodes', y='Average Timing', hue='Timing Type', marker='o', linewidth=2)
plt.title(title)
plt.ylabel(y_label)
plt.tight_layout()
if save:
plt.savefig(file_name)
else:
plt.show()
plt.close()
def rename_sample_type(df):
timing_type_remap = {0: 'Derivative',
1: '$\\theta$',
10: 'KDE',
11: 'Mat Exp',
12: 'Update TM',
13: 'LL Calc'}
df['Sample Type'] = df['Sample Type'] \
.apply(lambda timing_type: timing_type_remap.get(timing_type, timing_type))
parser = argparse.ArgumentParser(description='Plot Delphi speedup timing results before and after an optimization')
parser.add_argument('-b', metavar='Before timing results directory', type=str, default='before',
help='Directory where timing results before the optimization are kept')
parser.add_argument('-a', metavar='After timing results directory', type=str, default='after',
help='Directory where timing results after the optimization are kept')
parser.add_argument('-fb', metavar='Before file name filter', type=str, default='mcmc',
help='File name specifier to filter files in the before directory')
parser.add_argument('-fa', metavar='After file name filter', type=str, default='mcmc',
help='File name specifier to filter files in the after directory')
parser.add_argument('-d', metavar='Output directory name', type=str, default='timing_plots',
help='Directory where output plots are saved')
parser.add_argument('-o', metavar='Output file name specifier', type=str, default='timing',
help='This specifier will be prefixed to all the output files')
args = parser.parse_args()
timing_folder_before = args.b + '/'
timing_folder_after = args.a + '/'
file_name_filter_before = args.fb
file_name_filter_after = args.fa
out_dir = args.d + '/'
out_file_name_prefix = args.o
if out_dir:
out_path = Path(out_dir)
if not out_path.is_dir():
print(f'\nMaking output directory: {out_dir}')
out_path.mkdir(parents=True, exist_ok=True)
df_before = assemble_micro_timing_output_files_into_df(timing_folder_before, file_name_filter_before)
df_after = assemble_micro_timing_output_files_into_df(timing_folder_after, file_name_filter_after)
df_after.to_csv(f'{out_dir}{out_file_name_prefix}_after.csv', index=False)
df_before.to_csv(f'{out_dir}{out_file_name_prefix}_before.csv', index=False)
df_all, df_avg = combine_before_and_after_dfs(df_before, df_after)
rename_sample_type(df_all)
df_all.to_csv(f'{out_dir}{out_file_name_prefix}_timing_summary.csv', index=False)
df_avg.to_csv(f'{out_dir}{out_file_name_prefix}_timing_average.csv', index=False)
plot_micro_timing_min_cag_distributions(df_all, measurement='% Speedup (Median CPU Time)', line=True, separate=False,
summary=True,
file_name_prefix=f'{out_dir}{out_file_name_prefix}_theta_vs_derivative')
plot_micro_timing_min_cag_averages(df_avg, 'Average of Median CPU Times', speedup=False, fold=False, save=True,
file_name_prefix=f'{out_dir}{out_file_name_prefix}_median_')
plot_micro_timing_min_cag_averages(df_avg, 'Average of Median CPU Times', speedup=True, fold=False, save=True,
file_name_prefix=f'{out_dir}{out_file_name_prefix}_median_')
plot_micro_timing_min_cag_averages(df_avg, 'Average of Median CPU Times', speedup=True, fold=True, save=True,
file_name_prefix=f'{out_dir}{out_file_name_prefix}_median_')
plot_micro_timing_min_cag_averages(df_avg, 'Average of Mean CPU Times', speedup=False, fold=False, save=True,
file_name_prefix=f'{out_dir}{out_file_name_prefix}_mean_')
plot_micro_timing_min_cag_averages(df_avg, 'Average of Mean CPU Times', speedup=True, fold=False, save=True,
file_name_prefix=f'{out_dir}{out_file_name_prefix}_mean_')
plot_micro_timing_min_cag_averages(df_avg, 'Average of Mean CPU Times', speedup=True, fold=True, save=True,
file_name_prefix=f'{out_dir}{out_file_name_prefix}_mean_')
df_after_embedded = assemble_micro_timing_output_files_into_df(timing_folder_after, file_name_filter='embeded')
df_kde = assemble_micro_timing_output_files_into_df(timing_folder_after, file_name_filter='kde')
rename_sample_type(df_after_embedded)
rename_sample_type(df_kde)
rename_sample_type(df_after)
plot_micro_timing_summery_per_cag_size(df_all, measurement='cpu_after_median', separate=False,
title_specifier='Median Duration', y_label='Median Duration (ms)',
file_name_prefix=f'{out_dir}{out_file_name_prefix}_', timing_type='$\\theta$')
for component in ['KDE', 'Mat Exp', 'Update TM', 'LL Calc']:
plot_micro_timing_summery_per_cag_size(df_after_embedded, measurement='CPU Time (ms)', separate=False,
title_specifier=f'{component} Duration', y_label='CPU Time (ms)',
file_name_prefix=f'{out_dir}{out_file_name_prefix}_{component}_', timing_type=component)
plot_micro_timing_summery_per_cag_size(df_after_embedded, measurement='CPU Time (ms)', separate=True,
title_specifier='Median Duration', y_label='Median Duration (ms)',
file_name_prefix=f'{out_dir}{out_file_name_prefix}_')
df_after_embedded.rename(columns={'Sample Type': 'Timing Type'}, inplace=True)
df_kde.rename(columns={'Sample Type': 'Timing Type'}, inplace=True)
df_after.rename(columns={'Sample Type': 'Timing Type'}, inplace=True)
timing_type_remap = {'$\\theta$': '$\\theta$ = KDE + Mat Exp + Update TM + LL Calc'}
df_after_embedded['Timing Type'] = df_after_embedded['Timing Type'] \
.apply(lambda timing_type: timing_type_remap.get(timing_type, timing_type))
df_after['Timing Type'] = df_after['Timing Type'] \
.apply(lambda timing_type: timing_type_remap.get(timing_type, timing_type))
plot_micro_timing_min_cag_distributions(pd.concat([df_kde, df_after]), measurement='CPU Time (ms)', line=True,
separate=False, summary=False, hue='Timing Type',
file_name_prefix=f'{out_dir}{out_file_name_prefix}_mcmc_components_profiler')
plot_micro_timing_min_cag_distributions(pd.concat([df_after_embedded, df_after]), measurement='CPU Time (ms)', line=True,
separate=False, summary=False, hue='Timing Type',
file_name_prefix=f'{out_dir}{out_file_name_prefix}_mcmc_components_embedded')
# plot_micro_timing_min_cag_distributions(df_after_embedded[df_after_embedded['Sample Type'] == 'Update TM'], measurement='CPU Time (ms)',
# line=True, separate=False, summary=False, hue='Timing Type')
# file_name=f'{out_dir}min_cag_percent_speedups.png')
# plot_micro_timing_summery_per_cag_size(df_all, measurement='% Speedup (Median CPU Time)', separate=False)
|
1663224
|
from splitcli.ux import menu
from splitcli.accounts import user
from splitcli.split_apis import users_api
def sign_in():
email = menu.text_input("Enter your email")
menu.info_message("To find your Admin API Key, follow the directions here:")
menu.info_message("https://www.youtube.com/watch?v=80Bz2ZcZUrs")
split_apikey = menu.password_input("Enter your Split Admin API Key")
new_user = user.User(split_apikey, "", "", "", "", email)
user.set_user(new_user)
# Check user
active_user = users_api.get_user_by_email(email)
if active_user != None:
new_user.firstname = active_user['name']
else:
new_user.firstname = email
menu.warn_message("Email does not exist in organization")
new_user.write()
return new_user
|
1663229
|
notify_spec = {
'type': 'object',
'required': ['message'],
'properties': {
'message': {
'description': 'Message to send to administrators.',
'type': 'string',
},
'sendAsFile': {
'description': 'Whether to send message as a file. (0 or 1)',
'type': 'integer',
},
},
}
|
1663241
|
from __future__ import absolute_import, division, print_function
import torch
from torch import nn
from utils.interpolation import interpolate2d, my_grid_sample, get_coordgrid
def post_processing(l_disp, r_disp):
b, _, h, w = l_disp.shape
m_disp = 0.5 * (l_disp + r_disp)
grid_l = torch.linspace(0.0, 1.0, w).view(1, 1, 1, w).expand(1, 1, h, w).to(device=l_disp.device, dtype=l_disp.dtype).requires_grad_(False)
l_mask = 1.0 - torch.clamp(20 * (grid_l - 0.05), 0, 1)
r_mask = torch.flip(l_mask, [3])
return r_mask * l_disp + l_mask * r_disp + (1.0 - l_mask - r_mask) * m_disp
def flow_horizontal_flip(flow_input):
flow_flip = torch.flip(flow_input, [3])
flow_flip[:, 0:1, :, :] *= -1
return flow_flip.contiguous()
def disp2depth_kitti(pred_disp, k_value, depth_clamp=True):
pred_depth = k_value.unsqueeze(1).unsqueeze(1).unsqueeze(1) * 0.54 / (pred_disp + 1e-4)
if depth_clamp:
pred_depth = torch.clamp(pred_depth, 1e-3, 80)
return pred_depth
def depth2disp_kitti(pred_depth, k_value, depth_clamp=True):
if depth_clamp:
pred_depth = torch.clamp(pred_depth, 1e-3, 80)
pred_disp = k_value.unsqueeze(1).unsqueeze(1).unsqueeze(1) * 0.54 / (pred_depth)
return pred_disp
def pixel2pts(intrinsics, depth):
b, _, h, w = depth.size()
pixelgrid = get_coordgrid(depth)
depth_mat = depth.view(b, 1, -1)
pixel_mat = pixelgrid.view(b, 3, -1)
# doing torch.inverse on CPU is way faster than that on GPU
pts_mat = torch.matmul(torch.inverse(intrinsics.float().cpu()).to(device=depth.device, dtype=depth.dtype), pixel_mat) * depth_mat
pts = pts_mat.view(b, -1, h, w)
return pts, pixelgrid
def pts2pixel(pts, intrinsics):
b, _, h, w = pts.size()
proj_pts = torch.matmul(intrinsics, pts.view(b, 3, -1))
pixels_mat = proj_pts.div(proj_pts[:, 2:3, :] + 1e-8)[:, 0:2, :]
pixels_mat = torch.clamp(pixels_mat, -w * 1.5, w * 1.5)
return pixels_mat.view(b, 2, h, w)
def intrinsic_scale(intrinsic, scale_y, scale_x):
b, h, w = intrinsic.size()
fx = intrinsic[:, 0, 0] * scale_x
fy = intrinsic[:, 1, 1] * scale_y
cx = intrinsic[:, 0, 2] * scale_x
cy = intrinsic[:, 1, 2] * scale_y
zeros = torch.zeros_like(fx)
r1 = torch.stack([fx, zeros, cx], dim=1)
r2 = torch.stack([zeros, fy, cy], dim=1)
r3 = torch.tensor([0., 0., 1.], device=intrinsic.device, dtype=intrinsic.dtype, requires_grad=False).unsqueeze(0).expand(b, -1)
intrinsic_s = torch.stack([r1, r2, r3], dim=1)
return intrinsic_s
def pixel2pts_ms(intrinsic, output_disp, rel_scale, depth_clamp=True):
# pixel2pts
intrinsic_dp_s = intrinsic_scale(intrinsic, rel_scale[:,0], rel_scale[:,1])
output_depth = disp2depth_kitti(output_disp, intrinsic_dp_s[:, 0, 0], depth_clamp)
pts, _ = pixel2pts(intrinsic_dp_s, output_depth)
return pts, intrinsic_dp_s
def pts2pixel_ms(intrinsic, pts, output_sf, disp_size):
# +sceneflow and reprojection
sf_s = interpolate2d(output_sf, disp_size, mode="bilinear")
pts_tform = pts + sf_s
coord = pts2pixel(pts_tform, intrinsic)
norm_coord_w = coord[:, 0:1, :, :] / (disp_size[1] - 1) * 2 - 1
norm_coord_h = coord[:, 1:2, :, :] / (disp_size[0] - 1) * 2 - 1
norm_coord = torch.cat((norm_coord_w, norm_coord_h), dim=1)
return sf_s, pts_tform, norm_coord
def reconstructImg(coord, img):
grid = coord.transpose(1, 2).transpose(2, 3)
img_warp = my_grid_sample(img, grid)
mask = torch.ones_like(img, requires_grad=False)
mask = my_grid_sample(mask, grid)
mask = (mask >= 1.0).to(dtype=img.dtype)
return img_warp * mask
def reconstructPts(coord, pts):
grid = coord.transpose(1, 2).transpose(2, 3)
pts_warp = my_grid_sample(pts, grid)
mask = torch.ones_like(pts, requires_grad=False)
mask = my_grid_sample(mask, grid)
mask = (mask >= 1.0).to(dtype=pts.dtype)
return pts_warp * mask
def projectSceneFlow2Flow(intrinsic, sceneflow, disp, input_size=None, depth_clamp=True):
_, _, h, w = disp.size()
if input_size == None:
output_depth = disp2depth_kitti(disp, intrinsic[:, 0, 0], depth_clamp)
pts, pixelgrid = pixel2pts(intrinsic, output_depth)
else: ## if intrinsic is not adjusted to the "input_size"
local_scale = torch.zeros_like(input_size)
local_scale[:, 0] = h
local_scale[:, 1] = w
pts, intrinsic = pixel2pts_ms(intrinsic, disp, local_scale / input_size)
pixelgrid = get_coordgrid(disp)
sf_s = interpolate2d(sceneflow, [h, w], mode="bilinear")
pts_tform = pts + sf_s
coord = pts2pixel(pts_tform, intrinsic)
flow = coord - pixelgrid[:, 0:2, :, :]
return flow
|
1663243
|
from __future__ import absolute_import, unicode_literals
import os
CWD = os.path.dirname(os.path.realpath(__file__))
|
1663263
|
import pickle
import os
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
def main():
directory_string = '~/Desktop/DEAP/data_preprocessed_python'
directory = os.path.expanduser(directory_string)
os.makedirs('data', exist_ok=True)
print("Importing data...")
for file in tqdm(sorted(os.listdir(directory))):
filename = os.fsdecode(file)
data_file_path = os.path.join(directory, filename)
if filename.endswith(".dat"):
data_file = open(data_file_path, 'rb')
pickle_file = pickle.load(data_file, encoding='latin1')
text_file = open(os.path.join('data/', os.path.splitext(filename)[0]) + ".txt", 'wb')
pickle.dump(pickle_file, text_file)
data_file.close()
text_file.close()
def change_label_values_to_calss (all_labels):
temp_labels = np.empty((40,4), dtype=object)
for i in range(0, len(all_labels)):
for j in range(0, np.size(all_labels, 1)):
if(all_labels[i][j] <= 5):
temp_labels[i][j] = 'L'
else:
temp_labels[i][j] = 'H '
emotions_label = np.array([['V', 'A', 'D', 'L']] * 40)
return temp_labels + emotions_label
if __name__ == "__main__":
main()
|
1663277
|
import re
import os
import sys
import json
import scrapy
import argparse
from glob import glob
from datetime import datetime
from w3lib.url import is_url
from scrapy.crawler import CrawlerProcess
from scrapy.utils.project import inside_project, get_project_settings
from scrapy.utils.python import to_unicode
from scrapy.utils.reqser import request_from_dict
from scrapy.commands.genspider import sanitize_module_name
from scrapy_testmaster.utils import (
add_sample,
auto_import,
unpickle_data,
decompress_data,
get_or_create_test_dir,
get_project_dirs,
parse_callback_result,
prepare_callback_replay,
process_result,
erase_special_metakeys
)
from scrapy_testmaster.utils_novel import (
cascade_fixtures,
get_callbacks,
get_cb_settings,
get_test_paths,
write_config,
get_homepage_cookies,
trigger_requests,
get_reqs_to_add,
get_reqs_multiple,
validate_results
)
from .parse import (
process_options,
run_command
)
class CommandLine:
def __init__(self, parser):
self.parser = parser
self.args = parser.parse_args()
if not inside_project():
self.error("No active Scrapy project")
self.command = self.args.command
self.spider = sanitize_module_name(self.args.spider) if \
self.args.spider else None
try:
self.callback = self.args.callback
except AttributeError:
self.callback = None
try:
self.fixture = self.args.fixture
except AttributeError:
self.fixture = None
if self.command == 'update':
try:
self.new = self.args.new
except AttributeError:
self.new = None
try:
self.dynamic = self.args.dynamic
except AttributeError:
self.dynamic = None
if self.command == 'clear':
self.fixtures = self.args.fixtures.split(',')
if self.fixture and not self.callback:
self.error("Can't specify a fixture without a callback")
self.project_dir, self.project_name = get_project_dirs()
sys.path.append(self.project_dir)
self.settings = get_project_settings()
if self.command == "parse":
url_list = [url.strip() for url in self.args.urls.split('|')]
for url in url_list:
if not is_url(url):
self.error("Something went wrong with your urls arg! "
"Note that as of version 1.0, the character for separating "
"multiple urls is '|', as opposed to ','")
self.args = process_options(self.args)
crawler_process = CrawlerProcess(self.settings)
run_command(crawler_process, url_list, self.args)
else:
self.base_path = self.settings.get(
'TESTMASTER_BASE_PATH',
default=os.path.join(self.project_dir, 'testmaster'))
self.tests_dir = os.path.join(self.base_path, 'tests')
self.spider_dir = os.path.join(self.tests_dir, self.spider)
if not os.path.isdir(self.spider_dir) and self.command != "establish":
self.error(
"No recorded data found "
"for spider '{}'".format(self.spider))
self.extra_path = self.settings.get('TESTMASTER_EXTRA_PATH') or ''
if self.callback:
self.callback_dir = os.path.join(
self.spider_dir, self.extra_path, self.callback)
if self.command == 'establish':
if os.path.isdir(self.callback_dir):
self.error(
"Can't use 'establish' with callback arg "
"if callback dir for spider '{}' "
"exists already".format(self.spider))
else:
if self.command == 'inspect':
self.error(
"No recorded data found for callback "
"'{}' from '{}' spider".format(self.callback, self.spider))
if self.fixture:
self.fixture_path = os.path.join(self.callback_dir,
self.parse_fixture_arg())
if not os.path.isfile(self.fixture_path):
self.error("Fixture '{}' not found".format(self.fixture_path))
def error(self, msg):
print(msg)
sys.exit(1)
def parse_fixture_arg(self):
try:
int(self.fixture)
return 'fixture{}.bin'.format(self.fixture)
except ValueError:
pass
if not self.fixture.endswith('.bin'):
return '{}.bin'.format(self.fixture)
return self.fixture
def parse_data(self, data):
if isinstance(data, (dict, scrapy.Item)):
return {
self.parse_data(k): self.parse_data(v)
for k, v in data.items()
}
elif isinstance(data, list):
return [self.parse_data(x) for x in data]
elif isinstance(data, bytes):
return to_unicode(data)
elif isinstance(data, datetime):
return data.isoformat()
elif isinstance(data, (int, float)):
return data
return str(data)
def get_fixture_data(self):
with open(self.fixture_path, 'rb') as f:
raw_data = f.read()
fixture_info = unpickle_data(decompress_data(raw_data), 'utf-8')
if 'fixture_version' in fixture_info:
encoding = fixture_info['encoding']
data = unpickle_data(fixture_info['data'], encoding)
else:
data = fixture_info # legacy tests (not all will work, just utf-8)
return data
def inspect(self):
data = self.parse_data(self.get_fixture_data())
print(json.dumps(data))
def update(self):
to_update = []
if self.fixture:
to_update.append(self.fixture_path)
elif not self.fixture and self.callback:
target = os.path.join(self.callback_dir, "*.bin")
to_update = glob(target)
# == if not self.callback
else:
spider_path = os.path.join(self.project_dir, self.project_name,
'spiders/' + self.spider + '.py')
to_update = get_test_paths(self.spider_dir, spider_path, self.extra_path, True)
req_list = []
homepage_cookies = {}
i = 0
for path in to_update:
data, _, spider, _ = prepare_callback_replay(path)
if (self.dynamic or self.new) and i == 0:
homepage_cookies = get_homepage_cookies(spider)
i += 1
request = request_from_dict(data['request'], spider)
if homepage_cookies:
request.cookies = homepage_cookies
fixture_dir, filename = os.path.split(path)
fixture_index = re.search(r"\d+", filename).group()
if self.dynamic:
request = erase_special_metakeys(request)
request.meta['_update'] = 1
request.meta['_fixture'] = fixture_index
req_list.append(request)
else:
response_cls = auto_import(
data['response'].pop('cls', 'scrapy.http.HtmlResponse')
)
response = response_cls(
request=request, **data['response'])
cb_settings = get_cb_settings(fixture_dir)
data['result'], _ = parse_callback_result(
request.callback(response), spider, cb_settings
)
items_out, requests_out = process_result(
data['result'], spider.settings, cb_settings)
validate_results(fixture_dir, spider.settings, items_out,
requests_out, data['request']['url'])
add_sample(fixture_index, fixture_dir, filename, data)
print("Fixture '{}' successfully updated.".format(
os.path.relpath(path)))
if self.dynamic or self.new:
crawler_process = CrawlerProcess(self.settings)
if self.callback:
# add any requests specified in REQUESTS_TO_ADD in config.py
req_list += get_reqs_to_add(self.callback_dir, spider)
trigger_requests(crawler_process, spider, req_list)
else:
# finds all paths to all config.py files for the spider
# potentially adding a whole lot of requests from the REQUESTS_TO_ADD fields in these
to_add = get_test_paths(self.spider_dir, spider_path, self.extra_path)
req_list += get_reqs_multiple(to_add, spider)
trigger_requests(crawler_process, spider, req_list)
def establish(self):
did_something = False
if self.callback:
if not os.path.exists(self.callback_dir):
get_or_create_test_dir(self.base_path, self.spider, self.callback, self.extra_path)
write_config(self.callback_dir)
did_something = True
else:
spider_path = os.path.join(self.project_dir, self.project_name,
'spiders/' + self.spider + '.py')
for callback in get_callbacks(spider_path):
callback_dir = os.path.join(
self.spider_dir, self.extra_path, callback)
cb_exists = False
if os.path.exists(callback_dir):
cb_exists = True
get_or_create_test_dir(self.base_path, self.spider, callback, self.extra_path)
if not cb_exists:
write_config(callback_dir)
did_something = True
if did_something:
print("Command successful! Now you can tweak callback-specific "
"settings in the config.py file/s generated.")
else:
print("Command did nothing because a dir exists for callback/s "
"indicated already.")
def clear(self):
min_fixture = min(int(f) for f in self.fixtures)
for f in self.fixtures:
dead_path = os.path.join(self.callback_dir, f'fixture{f}.bin')
os.remove(dead_path)
cascade_fixtures(self.callback_dir, min_fixture)
def parse_command(self):
if self.command == "inspect":
self.inspect()
elif self.command == "update":
self.update()
elif self.command == "establish":
self.establish()
elif self.command == "clear":
self.clear()
def main():
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(help='Action commands', dest='command')
subparsers.required = True
parse_cmd = subparsers.add_parser(
'parse',
description="Downloads and parses n requests up to depth d with different "
"urls but the same attributes otherwise",
formatter_class=argparse.RawTextHelpFormatter)
parse_cmd.add_argument("urls", help="urls separated by '|'")
parse_cmd.add_argument(
"--spider", dest="spider",
help="use this spider without looking for one")
parse_cmd.add_argument(
"-a", dest="spargs", action="append", default=[], metavar="NAME=VALUE",
help="set spider argument (may be repeated)")
parse_cmd.add_argument(
"--homepage", dest="homepage", action="store_true",
help="choose whether to get cookies from homepage")
parse_cmd.add_argument(
"--pipelines", action="store_true",
help="process items through pipelines")
parse_cmd.add_argument(
"--nolinks", dest="nolinks", action="store_true",
help="don't show links to follow (extracted requests)")
parse_cmd.add_argument(
"--noitems", dest="noitems", action="store_true",
help="don't show scraped items")
parse_cmd.add_argument(
"--nocolour", dest="nocolour", action="store_true",
help="avoid using pygments to colorize the output")
parse_cmd.add_argument(
"-r", "--rules", dest="rules", action="store_true",
help="use CrawlSpider rules to discover the callback")
parse_cmd.add_argument(
"-c", "--callback", dest="callback",
help="use this callback for parsing, instead looking for a callback")
parse_cmd.add_argument(
"-m", "--meta", dest="meta",
help="inject extra meta into the Request, it must be a valid raw json string")
parse_cmd.add_argument(
"--cbkwargs", dest="cbkwargs",
help="inject extra callback kwargs into the Request, it must be a valid raw json string")
parse_cmd.add_argument(
"-d", "--depth", dest="depth", type=int, default=1,
help="maximum depth for parsing requests [default: %default]")
parse_cmd.add_argument(
"-v", "--verbose", dest="verbose", action="store_true",
help="print each depth level one by one")
parse_cmd.add_argument(
"--headers", dest="headers",
help="inject extra headers, it must be a valid raw json string")
parse_cmd.add_argument(
"--method", dest="method",
help="specify \'post\' to get a POST request")
parse_cmd.add_argument(
"--cookies", dest="cookies",
help="add cookies to send, it must be a raw json string")
inspect_cmd = subparsers.add_parser(
'inspect',
description="Inspects fixtures data returning a JSON object",
formatter_class=argparse.RawTextHelpFormatter)
inspect_cmd.add_argument('spider', help="The spider.")
inspect_cmd.add_argument('callback', help="The callback.")
inspect_cmd.add_argument('fixture', help=(
"The fixture. Can be the fixture number or the fixture name."))
update_cmd = subparsers.add_parser(
'update',
description="Updates fixtures to callback changes",
formatter_class=argparse.RawTextHelpFormatter)
update_cmd.add_argument('spider', help="The spider to update.")
update_cmd.add_argument('-c', '--callback', help="The callback to update.")
update_cmd.add_argument('-f', '--fixture', help=(
"The fixture to update.\n"
"Can be the fixture number or the fixture name.\n"
"If not specified, all fixtures will be updated."))
update_cmd.add_argument('--dynamic', action="store_true",
help=("Include this to re-download the response."))
update_cmd.add_argument('--new', action="store_true",
help=("Downloads requests from REQUESTS_TO_ADD"))
establish_cmd = subparsers.add_parser(
'establish',
description="Sets up test structure without requiring any requests to be made.",
formatter_class=argparse.RawTextHelpFormatter)
establish_cmd.add_argument('spider', help=(
"The spider for which to set up the test environment.\n"
"If no spider specified, nothing will happen.\n"
"If no callback after this, then a directory is created for all callbacks."))
establish_cmd.add_argument('-c', '--callback', help=(
"The callback for which to set up the test structure.\n"
"If not preceded by a spider, this will fail.\n"))
clear_cmd = subparsers.add_parser(
'clear',
description="Deletes specified fixtures and enforces linear ordering to "
"the remaining fixtures in the callback directory",
formatter_class=argparse.RawTextHelpFormatter)
clear_cmd.add_argument('spider', help="The spider.")
clear_cmd.add_argument('callback', help="The callback.")
clear_cmd.add_argument('fixtures', help=(
"The fixtures to be cleared, listed in terms of their number, each"
"separated by a comma."))
cli = CommandLine(parser)
cli.parse_command()
|
1663298
|
import abc
import threading
import six
@six.add_metaclass(abc.ABCMeta)
class ExecutionContext(object):
"""Base abstract execution context class."""
@abc.abstractmethod
def run(self, func, *args, **kwargs):
pass
class GeventExecutionContext(ExecutionContext):
"""Execution context that run background function as a Greenlet.
gevent monkey patching must be done by user.
"""
def run(self, func, *args, **kwargs):
"""Run given function in a Greenlet."""
import gevent
gevent.spawn(func, *args, **kwargs)
gevent.sleep()
class ThreadingExecutionContext(ExecutionContext):
"""Execution context that run background function as a OS Thread."""
def run(self, func, *args, **kwargs):
"""Run given function in a daemon OS thread."""
thread = threading.Thread(target=func, args=args, kwargs=kwargs)
thread.daemon = True
thread.start()
|
1663321
|
import logging
import os
from torch import Tensor
from torch.optim import SGD
from torch.utils.data import TensorDataset
from knodle.data.download import MinioConnector
from knodle.model.logistic_regression_model import (
LogisticRegressionModel,
)
from examples.ImdbDataset.utils import init_logger
from examples.utils import read_train_dev_test
from examples.trainer.preprocessing import get_tfidf_features
from knodle.trainer import TrainerConfig
from knodle.trainer.trainer import BaseTrainer
logger = logging.getLogger(__name__)
OUTPUT_CLASSES = 2
RANDOM_STATE = 123
TARGET_PATH = 'data/imdb'
MAX_FEATURES = 40000
def train_simple_ds_model():
init_logger()
if not not os.path.exists('data/imdb/mapping_rules_labels_t.lib'):
minio_connect = MinioConnector()
minio_connect.download_dir("datasets/imdb/processed/", TARGET_PATH)
train_df, dev_df, test_df, train_rule_matches_z, dev_rule_matches_z, test_rule_matches_z, imdb_dataset, \
mapping_rules_labels_t = \
read_train_dev_test(
TARGET_PATH)
logger.info("Train knn tfidf similarity model")
X_train = train_df.reviews_preprocessed
X_dev = dev_df.reviews_preprocessed
X_test = test_df.reviews_preprocessed
tfidf_values = get_tfidf_features(
imdb_dataset.reviews_preprocessed.values, path_to_cache="tutorials/ImdbDataset/tfidf.lib",
max_features=MAX_FEATURES
)
train_dataset = TensorDataset(Tensor(tfidf_values[X_train.index].toarray()))
dev_dataset = TensorDataset(Tensor(tfidf_values[X_dev.index].toarray()))
model = LogisticRegressionModel(tfidf_values.shape[1], 2)
custom_model_config = TrainerConfig(
model=model, epochs=35, optimizer_=SGD(model.parameters(), lr=0.1)
)
trainer = BaseTrainer(
model,
mapping_rules_labels_t=mapping_rules_labels_t,
model_input_x=train_dataset,
rule_matches_z=train_rule_matches_z,
trainer_config=custom_model_config,
)
trainer.train()
tfidf_values_sparse = Tensor(tfidf_values[X_test.index].toarray())
tfidf_values_sparse = tfidf_values_sparse.to(custom_model_config.device)
test_tfidf = TensorDataset(tfidf_values_sparse)
y_test = Tensor(imdb_dataset.loc[X_test.index, "label_id"].values)
y_test = y_test.to(custom_model_config.device)
y_test = TensorDataset(y_test)
clf_report, _ = trainer.test(test_tfidf, y_test)
print(clf_report)
if __name__ == "__main__":
train_simple_ds_model()
|
1663332
|
import json
import logging
import os
import re
import tempfile
import m3u8
from tqdm import tqdm
yuu_log = logging.getLogger('yuu.gyao')
class GYAODownloader:
def __init__(self, url, session):
self.url = url
self.session = session
self.merge = True
if os.name == "nt":
self.yuu_folder = os.path.join(os.getenv('LOCALAPPDATA'), 'yuu_data')
sffx = '\\'
else:
self.yuu_folder = os.path.join(os.getenv('HOME'), '.yuu_data')
sffx = '/'
if not os.path.isdir(self.yuu_folder):
os.mkdir(self.yuu_folder)
self.temporary_folder = tempfile.mkdtemp(dir=self.yuu_folder)
self.temporary_folder = self.temporary_folder + sffx
def download_chunk(self, files, key, iv):
self.downloaded_files = []
try:
with tqdm(total=len(files), desc='Downloading', ascii=True, unit='file') as pbar:
for tsf in files:
outputtemp = self.temporary_folder + os.path.basename(tsf)
with open(outputtemp, 'wb') as outf:
try:
vid = self.session.get(tsf)
outf.write(vid.content)
except Exception as err:
yuu_log.error('Problem occured\nreason: {}'.format(err))
return None
pbar.update()
self.downloaded_files.append(outputtemp)
except KeyboardInterrupt:
yuu_log.warn('User pressed CTRL+C, cleaning up...')
return None
return self.downloaded_files
class GYAO:
def __init__(self, url, session):
self.session = session
self.type = 'GYAO'
self.yuu_logger = logging.getLogger('yuu.gyao.GYAO')
self.url = url
self.m3u8_url = None
self.resolution = None
self.policy_key = None
self.account = None
self.m3u8_url_list = None
self.is_m3u8 = False
self.est_filesize = None # In MiB
self.resolution_data = {
"1080p-0": ["~5000kb/s", "AAC 64kb/s 2ch"],
"720p-0": ["2000kb/s", "AAC 64kb/s 2ch"],
"480p-0": ["900kb/s", "AAC 64kb/s 2ch"],
"360p-0": ["550kb/s", "AAC 64kb/s 2ch"],
"240p-0": ["~200kb/s", "AAC 64kb/s 1ch"],
"1080p-1": ["~5000kb/s", "AAC 128kb/s 2ch"],
"720p-1": ["~2000kb/s", "AAC 128kb/s 2ch"],
"480p-1": ["~900kb/s", "AAC 128kb/s 2ch"],
"360p-1": ["~550kb/s", "AAC 128kb/s 2ch"],
"240p-1": ["~200kb/s", "AAC 128kb/s 2ch"],
}
self.authorization_required = False
self.authorized = False # Ignore for now
self.resumable = True
# Use Chrome UA
self.session.headers.update({'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36'})
def __repr__(self):
return '<yuu.GYAO: URL={}, Resolution={}, m3u8 URL={}>'.format(self.url, self.resolution, self.m3u8_url)
def get_downloader(self):
"""
Return a :class: of the Downloader
"""
return GYAODownloader(self.url, self.session)
def authorize(self, username, password):
"""
Bypassed since I need an account to test login
"""
return True, None
def get_token(self):
headers = {'X-User-Agent': 'Unknown Pc GYAO!/2.0.0 Web'}
query = '?fields=title%2Cid%2CvideoId'
v_id = re.findall(r'(?isx)http(?:|s)://gyao.yahoo.co.jp/(?:player|title[\w])/(?P<p1>[\w]*.*)', self.url)
if not v_id:
return None, 'Video URL are not valid'
self.yuu_logger.debug('Fetching data account...')
r_vid = self.session.get('https://gyao.yahoo.co.jp/dam/v1/videos/' + v_id[0].replace('/', ':').rstrip(':') + query, headers=headers)
r_cov = self.session.get("http://players.brightcove.net/4235717419001/default_default/index.html?videoId=" + r_vid.json()['videoId'])
data_account = re.findall(r'<video-js\s+[^>]*\bdata-account\s*=.([\d]*).*>', r_cov.text, re.IGNORECASE | re.DOTALL | re.VERBOSE)
r_pk = self.session.get("http://players.brightcove.net/{}/default_default/index.html".format(data_account[0]))
pkey = re.findall(r'policyKey\s*:\s*(["\'])(?P<pk>.+?)\1', r_pk.text)[0][1]
self.yuu_logger.debug('Account: {}'.format(data_account[0]))
self.yuu_logger.debug('Policy key: {}'.format(pkey))
self.account = data_account[0]
self.policy_key = pkey
return 'SUCCESS', 'SUCCESS'
def parse(self, resolution=None, check_only=False):
"""
Function to parse gyao url
"""
self.yuu_logger.debug('Requesting data to GYAO/Brightcove API')
res_list = [
'240p-0', '360p-0', '480p-0', '720p-0', '1080p-0',
'240p-1', '360p-1', '480p-1', '720p-1', '1080p-1',
'best', 'worst'
]
if resolution not in res_list:
if not check_only:
return None, 'Unknown resolution: {}. (Check it with `-R`)'.format(resolution)
if resolution == 'best':
_resolution = '1080p-0'
elif resolution == 'worst':
_resolution = '240p-1'
else:
_resolution = resolution
v_id = re.findall(r'(?isx)http(?:|s)://gyao.yahoo.co.jp/(?:player|p|title[\w])/(?P<p1>[\w]*.*)', self.url)
if not v_id:
return None, 'Video URL are not valid'
self.yuu_logger.debug('Video ID: {}'.format(v_id[0]))
headers = {'X-User-Agent': 'Unknown Pc GYAO!/2.0.0 Web'}
r_vid = self.session.get('https://gyao.yahoo.co.jp/dam/v1/videos/' + v_id[0].replace('/', ':').rstrip(':') + '?fields=title%2Cid%2CvideoId%2CshortTitle', headers=headers).json()
title = r_vid['title']
ep_title = r_vid['shortTitle']
output_name = title.replace(ep_title, '').replace('\u3000', ' ') + ' - ' + ep_title
headers_pk = {
'Accept': 'application/json;pk=' + self.policy_key,
}
error_bc = {
'CLIENT_GEO': 'This video is geo-locked for Japan only.'
}
self.yuu_logger.debug('Requesting HLS and video info')
req_bc = self.session.get('https://edge.api.brightcove.com/playback/v1/accounts/{}/videos/{}'.format(self.account, r_vid['videoId']), headers=headers_pk)
self.yuu_logger.debug('Data requested')
if req_bc.status_code == 403:
error_reason = req_bc[0]['error_subcode']
return None, error_bc[error_reason]
self.yuu_logger.debug('Parsing json API')
jsdata = req_bc.json()
hls_list = jsdata['sources'][2]['src'] # Use EXT-V4 http version as the base
hls_list2 = jsdata['sources'][0]['src'] # Use EXT-V3 http version as the one that will be sent over
self.yuu_logger.debug('M3U8 Link: {}'.format(hls_list))
self.yuu_logger.debug('Title: {}'.format(output_name))
self.m3u8_url_list = hls_list
self.yuu_logger.debug('Requesting m3u8 list')
r = self.session.get(hls_list)
r2 = self.session.get(hls_list2)
self.yuu_logger.debug('m3u8 requested')
if r.status_code == 403:
return None, 'This video is geo-locked for Japan only.'
self.yuu_logger.debug('Parsing m3u8')
r_all = m3u8.loads(r.text)
r2_all = m3u8.loads(r2.text)
band_list_v4 = []
for v4d in r_all.playlists:
s_info = v4d.stream_info
audio_inf = s_info.audio.strip('audio')
if _resolution[-2:] == audio_inf:
band_list_v4.append((s_info.bandwidth, str(s_info.resolution[1]) + audio_inf))
for v3d in r2_all.playlists:
bw = v3d.stream_info.bandwidth
for v4d in band_list_v4:
bwv4, resv4 = v4d
if bw == bwv4:
self.m3u8_url = v3d.uri
self.resolution = resv4
self.est_filesize = round(bw / 1024 / 5, 2)
break
if not self.m3u8_url:
if resolution == 'worst':
need_band = sorted(band_list_v4)[0]
elif resolution == 'best':
need_band = sorted(band_list_v4, reverse=True)[0]
else:
return None, 'Resolution {} are not exist in this video.'.format(self.resolution)
for v3 in r2_all.playlists:
bw = v3.stream_info.bandwidth
if bw == need_band:
self.m3u8_url = v3.uri
self.resolution = _resolution
self.est_filesize = round(bw / 1024 / 5, 2)
break
return output_name, None
def parse_m3u8(self, m3u8_url):
self.yuu_logger.debug('Requesting m3u8')
r = self.session.get(m3u8_url)
self.yuu_logger.debug('m3u8 requested')
if r.status_code == 403:
return None, None, 'This video is geo-locked for Japan only.'
self.yuu_logger.debug('Parsing m3u8')
x = m3u8.loads(r.text)
files = x.files
self.yuu_logger.debug('Total files: {}'.format(len(files)))
return files, None, None, 'Success'
def resolutions(self):
self.yuu_logger.debug('Requesting data to API')
r_all = m3u8.loads(self.session.get(self.m3u8_url_list).text)
ava_reso = []
for r_p in r_all.playlists:
temp_ = []
res = r_p.stream_info.resolution
aud_d = r_p.stream_info.audio.strip('audio')
r_c = '{h}p{a}'.format(h=res[1], a=aud_d)
res_name = '{w}x{h}'.format(w=res[0], h=res[1])
temp_.append(r_c)
temp_.append(res_name)
ava_reso.append(temp_)
if ava_reso:
reso = [r[0] for r in ava_reso]
self.yuu_logger.debug('Resolution list: {}'.format(', '.join(reso)))
return ava_reso
def get_video_key(self, ticket):
"""
Return True since there's not key decryption in GYAO
"""
return True, None
def check_output(self, output=None, output_name=None):
if output:
fn_, ext_ = os.path.splitext(output)
if ext_ != 'ts':
output = fn_ + '.ts'
else:
output = '{x} ({m} {r}).ts'.format(x=output_name, m=self.type, r=self.resolution[:-2])
return output
|
1663354
|
import falcon
from zappa.async import task
from mashape import fetch_quote
class RandomQuoteResource:
def on_get(self, req, resp):
"""Handles GET requests"""
try:
resp.media = fetch_quote()
except Exception as e:
raise falcon.HTTPError(falcon.HTTP_500, str(e))
@task
def async_task():
raise ValueError("Async Failure Exception")
class AsyncTaskResource:
def on_get(self, req, resp):
"""Handles GET requests"""
try:
async_task()
resp.media = 'Called async task'
except Exception as e:
raise falcon.HTTPError(falcon.HTTP_500, str(e))
api = falcon.API()
api.add_route('/', RandomQuoteResource())
api.add_route('/async-failure', AsyncTaskResource())
|
1663402
|
from spanet.network.jet_reconstruction import JetReconstructionModel
from spanet.dataset import JetReconstructionDataset
from spanet.options import Options
|
1663435
|
from hpe3parclient import exceptions
import test.hpe_docker_unit_test as hpeunittest
from oslo_config import cfg
CONF = cfg.CONF
class EnablePluginUnitTest(hpeunittest.HpeDockerUnitTestExecutor):
def _get_plugin_api(self):
return 'plugin_activate'
def check_response(self, resp):
expected_resp = {u"Implements": [u"VolumeDriver"]}
self._test_case.assertEqual(resp, expected_resp)
class TestEnablePlugin(EnablePluginUnitTest):
pass
class InitializePluginUnitTest(hpeunittest.HpeDockerUnitTestExecutor):
def _get_plugin_api(self):
return ""
class TestPluginInitializationFails(InitializePluginUnitTest):
def setup_mock_objects(self):
mock_3parclient = self.mock_objects['mock_3parclient']
# Add as many side_effect as the number of backends
side_effect = []
for backend in self._all_configs:
side_effect.append(exceptions.UnsupportedVersion)
mock_3parclient.getWsApiVersion.side_effect = side_effect
def check_response(self, resp):
self._test_case.assertEqual(resp, {u"Err": 'GOT RESPONSE'})
|
1663436
|
import torch
import torchelie as tch
import torchelie.utils as tu
from torchelie.recipes.gan import GANRecipe
import torchvision.transforms as TF
import torchelie.loss.gan.standard as gan_loss
from torchelie.loss.gan.penalty import zero_gp, R1
from torchelie.datasets.pix2pix import UnlabeledImages
from torchelie.models import *
import torch.nn as nn
class Matcher(nn.Module):
@tu.experimental
def __init__(self, n_scales=3):
super().__init__()
proj_size = 256
self.n_scales = n_scales
self.nets = nn.ModuleDict()
self.projs = nn.ModuleDict()
for i in range(n_scales):
net = patch34().remove_batchnorm()
net.classifier = nn.Sequential()
net.to_equal_lr()
proj = nn.Sequential(
nn.LeakyReLU(0.2, False),
tu.kaiming(tnn.Conv1x1(proj_size, proj_size), dynamic=True))
self.nets[str(i)] = net
self.projs[str(i)] = proj
def barlow(self, src, proj):
src = F.normalize(src, dim=1)
proj = F.normalize(proj, dim=1)
n, c, h, w = src.shape
out = torch.bmm(
src.permute(2, 3, 0, 1).reshape(-1, n, c),
proj.permute(2, 3, 0, 1).reshape(-1, n, c).permute(0, 2, 1))
out = out.view(h, w, n, n).permute(2, 3, 0, 1)
labels = torch.eye(n, device=out.device)
labels = labels.view(n, n, 1, 1).expand(n, n, h, w)
return {
'cosine': out,
'loss': F.smooth_l1_loss(out, labels, beta=0.1),
'src_feats': src,
'proj_feats': proj
}
def forward(self, src, dst):
outs = []
for scale_order in range(self.n_scales):
scale = 2**scale_order
src_scale = F.interpolate(src,
scale_factor=1 / scale,
mode='bilinear')
dst_scale = F.interpolate(dst,
scale_factor=1 / scale,
mode='bilinear')
src_feats = self.nets[str(scale_order)](src_scale)
proj_feats = self.projs[str(scale_order)](
self.nets[str(scale_order)](dst_scale))
N, c, h, w = src_feats.shape
labels = torch.arange(N, device=src.device)
labels = labels.view(N, 1, 1).expand(N, h, w)
outs.append(self.barlow(src_feats, proj_feats))
outs[-1]['labels'] = labels
total_loss = sum(out['loss'] for out in outs)
matches = torch.cat([
out['cosine'].view(out['cosine'].shape[0], out['cosine'].shape[1],
-1) for out in outs
],
dim=2)
all_labels = torch.cat(
[out['labels'].view(out['labels'].shape[0], -1) for out in outs],
dim=1)
return {
'matches': matches,
'loss': total_loss,
'labels': all_labels,
'src_feats': [out['src_feats'] for out in outs],
'proj_feats': [out['proj_feats'] for out in outs],
}
def get_dataset(typ: str, path: str, train: bool, size: int):
if typ == 'images':
return UnlabeledImages(
path,
TF.Compose([
TF.Resize(size),
TF.CenterCrop(size),
TF.RandomHorizontalFlip(),
TF.ToTensor(),
]))
if typ == 'celeba':
return celeba(
path, train,
TF.Compose([
TF.Resize(size),
TF.CenterCrop(size),
TF.RandomHorizontalFlip(),
TF.ToTensor(),
]))
@tu.experimental
def celeba(path, train: bool, tfm=None):
from torchvision.datasets import CelebA
positive = True
if path[:4] == 'not-':
positive = False
path = path[4:]
celeba = CelebA('~/.torch/celeba',
download=True,
target_type=[],
split='train' if train else 'test')
male_idx = celeba.attr_names.index(path)
files = [
f'~/.torch/celeba/celeba/img_align_celeba/{celeba.filename[i]}'
for i in range(len(celeba))
if celeba.attr[i, male_idx] == (1 if positive else 0)
]
return tch.datasets.pix2pix.ImagesPaths(files, tfm)
def big_patch34() -> PatchDiscriminator:
"""
Patch Discriminator from pix2pix
"""
return PatchDiscriminator([256, 512, 512])
@tu.experimental
def train(rank, world_size, opts):
def make_G():
G = pix2pix_128()
G.to_instance_norm()
def to_adain(m):
if isinstance(m, nn.InstanceNorm2d):
# return tnn.AdaIN2d(m.num_features, 256)
return tnn.FiLM2d(m.num_features, 256)
return m
tnn.edit_model(G, to_adain)
tnn.utils.net_to_equal_lr(G, leak=0.2)
return G
Gy = make_G()
Gx = make_G()
def make_D(inputs):
D = big_patch34()
D.set_input_specs(inputs)
D.remove_batchnorm()
tnn.utils.net_to_equal_lr(D, leak=0.2)
D = MultiScaleDiscriminator(D)
return D
D = make_D(6)
if rank == 0:
print(Gy)
print(D)
Gy = torch.nn.parallel.DistributedDataParallel(Gy.to(rank), [rank], rank)
Gx = torch.nn.parallel.DistributedDataParallel(Gx.to(rank), [rank], rank)
D = torch.nn.parallel.DistributedDataParallel(D.to(rank), [rank], rank)
SIZE = 128
ds_A = get_dataset(opts.data_A[0], opts.data_A[1], True, SIZE)
ds_B = get_dataset(opts.data_B[0], opts.data_B[1], True, SIZE)
ds = tch.datasets.RandomPairsDataset(ds_A, ds_B)
ds_test_A = get_dataset(opts.data_test[0], opts.data_test[1], False, SIZE)
ds_test_B = get_dataset(opts.data_B[0], opts.data_B[1], False, SIZE)
ds_test = tch.datasets.RandomPairsDataset(ds_test_A, ds_test_B)
if rank == 0:
print(ds)
print(ds_test)
ds = torch.utils.data.DataLoader(ds,
8,
num_workers=4,
drop_last=True,
shuffle=True,
pin_memory=True)
ds_test = torch.utils.data.DataLoader(ds_test,
128,
num_workers=4,
drop_last=True,
shuffle=True,
pin_memory=True)
def dpo(val, p=0):
if torch.rand(1).item() < p:
return torch.zeros_like(val)
else:
return val
def G_fun(batch) -> dict:
x, y = batch
out = Gy(x * 2 - 1, torch.randn(x.shape[0], 256, device=x.device))
with D.no_sync():
loss = gan_loss.real(D(torch.cat([out * 2 - 1, x * 2 - 1], dim=1)))
loss.backward()
out = Gx(y * 2 - 1, torch.randn(x.shape[0], 256, device=x.device))
with D.no_sync():
loss = gan_loss.fake(D(torch.cat([y * 2 - 1, out * 2 - 1], dim=1)))
loss.backward()
return {'G_loss': loss.item()}
class GradientPenalty:
def __init__(self, gamma):
self.gamma = gamma
self.iters = 0
self.last_norm = float('nan')
def __call__(self, model, real, fake):
if self.iters < 100 or self.iters % 4 == 0:
real = real.detach()
fake = fake.detach()
gp, g_norm = zero_gp(model, real, fake)
# gp, g_norm = R1(model, real, fake)
# Sync the gradient on the next backward
if torch.any(torch.isnan(gp)):
gp.detach_()
else:
(4 * self.gamma * gp).backward()
self.last_norm = g_norm
self.iters += 1
return self.last_norm
gradient_penalty_x = GradientPenalty(opts.r0_D)
def D_fun(batch) -> dict:
x, y = batch
x = x * 2 - 1
y = y * 2 - 1
with torch.no_grad():
with Gy.no_sync():
out = Gy(x, torch.randn(x.shape[0], 256, device=x.device))
y_ = out * 2 - 1
with torch.no_grad():
with Gx.no_sync():
out = Gx(y, torch.randn(x.shape[0], 256, device=x.device))
x_ = out * 2 - 1
neg = torch.cat([y_, x], dim=1)
pos = torch.cat([y, x_], dim=1)
with D.no_sync():
prob_fake = D(neg, flatten=False)
# fake_correct = prob_fake.detach().lt(0).int().eq(1).sum()
fake_loss = gan_loss.fake(prob_fake)
fake_loss.backward()
with D.no_sync():
g_norm = gradient_penalty_x(D, pos, neg)
prob_real = D(pos, flatten=False)
# real_correct = prob_real.detach().gt(0).int().eq(1).sum()
real_loss = gan_loss.real(prob_real)
real_loss.backward()
return {
'out': torch.cat([y_, x_], dim=0),
'fake_loss': fake_loss.item(),
# 'prob_fake': torch.sigmoid(prob_fake).mean().item(),
# 'prob_real': torch.sigmoid(prob_real).mean().item(),
'real_loss': real_loss.item(),
'g_norm': g_norm,
# 'D-correct': (fake_correct + real_correct) / (2 * prob_fake.numel()),
}
def test_fun(batch):
x, y = batch
with Gy.no_sync():
out_y = torch.cat([
Gy(
xx * 2 - 1,
tch.distributions.sample_truncated_normal(
xx.shape[0], 256).to(xx.device))
for xx in torch.split(x, 32)
],
dim=0)
return {'out': out_y}
recipe = GANRecipe(nn.ModuleList([Gy, Gx]),
D,
G_fun,
D_fun,
test_fun,
ds,
test_loader=ds_test,
test_every=5000,
log_every=100,
checkpoint='main_adain' if rank == 0 else None,
visdom_env='main_adain' if rank == 0 else None)
recipe.callbacks.add_callbacks([
tch.callbacks.Optimizer(
tch.optim.Lookahead(
tch.optim.RAdamW(D.parameters(),
lr=2e-3,
betas=(0., 0.99),
weight_decay=0))),
tch.callbacks.Log('out', 'out'),
tch.callbacks.Log('batch.0', 'x'),
tch.callbacks.Log('batch.1', 'y'),
# tch.callbacks.Log('batch.1', 'y'),
# tch.callbacks.Log('batch.0.1', 'y'),
# tch.callbacks.WindowedMetricAvg('fake_loss', 'fake_loss'),
# tch.callbacks.WindowedMetricAvg('real_loss', 'real_loss'),
# tch.callbacks.WindowedMetricAvg('prob_fake', 'prob_fake'),
# tch.callbacks.WindowedMetricAvg('prob_real', 'prob_real'),
tch.callbacks.WindowedMetricAvg('D-correct', 'D-correct'),
tch.callbacks.Log('g_norm', 'g_norm'),
])
recipe.G_loop.callbacks.add_callbacks([
tch.callbacks.Optimizer(
tch.optim.Lookahead(
tch.optim.RAdamW(Gy.parameters(),
lr=2e-3,
betas=(0., 0.99),
weight_decay=0))),
tch.callbacks.Optimizer(
tch.optim.Lookahead(
tch.optim.RAdamW(Gx.parameters(),
lr=2e-3,
betas=(0., 0.99),
weight_decay=0))),
])
recipe.test_loop.callbacks.add_callbacks([
tch.callbacks.GANMetrics('batch.1', 'out', device=rank),
tch.callbacks.Log('kid', 'kid'),
tch.callbacks.Log('fid', 'fid'),
tch.callbacks.Log('precision', 'precision'),
tch.callbacks.Log('recall', 'recall'),
tch.callbacks.Log('out', 'test_out'),
tch.callbacks.Log('batch.0', 'test_x'),
])
recipe.to(rank)
if opts.from_ckpt is not None:
recipe.load_state_dict(torch.load(opts.from_ckpt, map_location='cpu'))
recipe.run(200)
def run(opts):
G = pix2pix_128()
G.to_instance_norm()
tnn.utils.net_to_equal_lr(G, leak=0.2)
G.load_state_dict(torch.load(opts.from_ckpt, map_location='cpu')['G'])
import torchvision.transforms as TF
from PIL import Image
tfm = TF.Compose([
TF.Resize(128),
TF.CenterCrop(128),
TF.ToTensor(),
TF.Normalize([0.5] * 3, [0.5] * 3),
])
img = tfm(Image.open(opts.src).convert('RGB'))
img = torch.stack([img, img], dim=0)
TF.functional.to_pil_image(G(img, torch.randn(2, 256))[0]).save(opts.dst)
def para_run(opts):
return tu.parallel_run(train, opts=opts)
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser()
subparsers = parser.add_subparsers()
train_parser = subparsers.add_parser('train')
train_parser.add_argument('--data-A',
required=True,
type=lambda x: x.split(':'))
train_parser.add_argument('--data-B',
required=True,
type=lambda x: x.split(':'))
train_parser.add_argument('--data-test',
required=True,
type=lambda x: x.split(':'))
train_parser.add_argument('--r0-D', default=0.0001, type=float)
train_parser.add_argument('--r0-M', default=0.0001, type=float)
train_parser.add_argument('--consistency', default=0.01, type=float)
train_parser.add_argument('--from-ckpt')
train_parser.set_defaults(func=para_run)
run_parser = subparsers.add_parser('run')
run_parser.add_argument('--from-ckpt', required=True)
run_parser.add_argument('--src', required=True)
run_parser.add_argument('--dst', required=True)
run_parser.set_defaults(func=run)
opts = parser.parse_args()
opts.func(opts)
|
1663455
|
from django.db import models
class TransformQuerySet(models.query.QuerySet):
def __init__(self, *args, **kwargs):
super(TransformQuerySet, self).__init__(*args, **kwargs)
self._transform_fns = []
def _clone(self, klass=None, setup=False, **kw):
c = super(TransformQuerySet, self)._clone(klass, setup, **kw)
c._transform_fns = self._transform_fns[:]
return c
def transform(self, fn):
c = self._clone()
c._transform_fns.append(fn)
return c
def iterator(self):
result_iter = super(TransformQuerySet, self).iterator()
if self._transform_fns:
results = list(result_iter)
for fn in self._transform_fns:
fn(results)
return iter(results)
return result_iter
class TransformManager(models.Manager):
def get_query_set(self):
return TransformQuerySet(self.model)
|
1663462
|
import random
import re
from youtube_related import RateLimited
from youtube_related import preventDuplication as relatedClient
from .config import Config
from .connector import VoiceConnector
from .enums import PlayerState
from .errors import NotPlaying
from .player import Player
from .source import AudioData, AudioSource
from .utils import CallbackList, EventDispatcher
class DiscordVoiceClient(VoiceConnector):
def __init__(self, manager, data=None):
super().__init__(manager, data=data)
self.relatedClient = relatedClient()
self.dispatcher = EventDispatcher()
self.dispatcher.onAny(
lambda event, *args, **kwargs: manager.dispatcher.dispatch(
self.guild_id, *args, event=event, **kwargs
)
)
self.dispatcher.on("REQUIRE_NEXT_SOURCE", self.__fetchAutoPlay)
self.Context = {}
self.Queue = CallbackList()
self.Queue.callback = self.__queueCallback
self.player = None
self.paused = False
self.filter = {}
self.autoplay = Config.DEFAULT_AUTOPLAY
self._volume = Config.DEFAULT_VOLUME
self._crossfade = Config.DEFAULT_CROSSFADE
def __del__(self):
guild_id = self.guild_id if self.guild_id else None
if self.manager.voiceClients.get(guild_id) == self:
self.dispatcher.dispatch("VC_DESTROYED")
del self.manager.voiceClients[guild_id]
super().__del__()
if self.player and self.player.is_alive():
self.player.stop()
for Item in filter(lambda x: isinstance(x, AudioSource), self.Queue):
self.loop.call_soon_threadsafe(Item.cleanup)
def __repr__(self) -> str:
return f"<VoiceClient guild_id={self.guild_id} volume={self.volume} crossfade={self.crossfade} autoplay={self.autoplay}>"
async def __fetchAutoPlay(self, current, **_):
if (
self.autoplay
and not self.Queue
and re.match(
r"^((?:https?:)?\/\/)?((?:www|m)\.)?((?:youtube\.com|youtu.be))(\/(?:[\w\-]+\?v=|embed\/|v\/)?)([\w\-]+)(\S+)?$",
current.webpage_url,
)
):
for _ in range(5):
address = Config.RoutePlanner.get() if Config.RoutePlanner else None
try:
Related = await self.relatedClient.async_get(
current.webpage_url, local_addr=address
)
except RateLimited:
Config.RoutePlanner.mark_failed_address(address)
else:
return await self.loadSource(
"https://www.youtube.com/watch?v=" + Related["id"],
related=True,
)
def __queueCallback(self, name, *args):
self.dispatcher.dispatch("QUEUE_EVENT", name=name, args=args)
@property
def channel_id(self):
return self._channel_id
@channel_id.setter
def channel_id(self, value) -> None:
self._channel_id = value
self.dispatcher.dispatch("VC_CHANNEL_EDITED", channel_id=self._channel_id)
async def createSocket(self, data=None):
await super().createSocket(data=data)
if self.player and self.player.is_alive():
return
self.player = Player(self)
self.player.start()
@property
def state(self):
if not self.player:
return PlayerState.DISCONNECTED
elif not self.Queue and not self.player.current:
return PlayerState.STOPPED
elif self.paused:
return PlayerState.PAUSED
else:
return PlayerState.PLAYING
@property
def volume(self) -> float:
return self._volume
@volume.setter
def volume(self, value: float):
self._volume = round(max(value, 0.0), 2)
@property
def crossfade(self) -> float:
return self._crossfade
@crossfade.setter
def crossfade(self, value: float):
self._crossfade = round(max(value, 0.0), 1)
@property
def current(self):
if not self.player:
return
return self.player.current
@property
def next(self):
if not self.player:
return
return self.player.next
async def getSource(self, query):
return await AudioData.create(query)
def putSource(self, source):
sources = source if isinstance(source, list) else [source]
self.Queue.extend(sources)
self.dispatcher.dispatch("putSource", sources=sources)
return (
self.Queue.index(source)
if not isinstance(source, list)
else list(map(self.Queue.index, sources))
)
async def loadSource(self, query, **kwargs):
source = await self.getSource(query)
if isinstance(kwargs.get("related"), bool):
source.related = kwargs["related"]
self.putSource(source)
self.dispatcher.dispatch("loadSource", source=source, **kwargs)
return source
async def seek(self, offset):
return await self.player.seek(offset)
def skip(self, offset=1):
if not self.player.current:
raise NotPlaying
if len(self.Queue) < (offset - 1):
raise ValueError("`offset` is bigger than `Queue` size.")
if offset > 1:
del self.Queue[0 : (offset - 1)]
self.player.current.skip()
def pause(self):
self.paused = True
return self.paused
def resume(self):
self.paused = False
return self.paused
def shuffle(self):
if not self.Queue:
raise ValueError("`Queue` is empty now.")
random.shuffle(self.Queue)
|
1663486
|
import time
from kombu import Connection, Queue, Exchange
from kombu.common import maybe_declare
from kombu.mixins import ConsumerProducerMixin
from kombu.pools import producers
from spylunking.log.setup_logging import build_colorized_logger
from celery_connectors.utils import SUCCESS
from celery_connectors.utils import FAILED
from celery_connectors.utils import ERROR
from celery_connectors.utils import ev
from celery_connectors.utils import build_sample_msgs
from celery_connectors.utils import calc_backoff_timer
from celery_connectors.build_ssl_options import build_ssl_options
# Credits and inspirations from these great sources:
#
# https://github.com/celery/kombu/blob/master/examples/rpc-tut6/rpc_server.py
# https://gist.github.com/oubiwann/3843016
# https://gist.github.com/eavictor/ee7856581619ac60643b57987b7ed580#file-mq_kombu_rpc_server-py
# https://github.com/Skablam/kombu-examples
# https://gist.github.com/mlavin/6671079
name = ev("APP_NAME", "robopubsub")
log = build_colorized_logger(
name=name)
broker_url = ev("PUB_BROKER_URL", "pyamqp://rabbitmq:rabbitmq@localhost:5672//")
exchange_name = ev("PUBLISH_EXCHANGE", "ecomm.api")
exchange_type = ev("PUBLISH_EXCHANGE_TYPE", "topic")
routing_key = ev("PUBLISH_ROUTING_KEY", "ecomm.api.west")
queue_name = ev("PUBLISH_QUEUE", "ecomm.api.west")
prefetch_count = int(ev("PREFETCH_COUNT", "1"))
priority_routing = {"high": queue_name,
"low": queue_name}
use_exchange = Exchange(exchange_name, type=exchange_type)
use_routing_key = routing_key
use_queue = Queue(queue_name, exchange=use_exchange, routing_key=routing_key)
task_queues = [
use_queue
]
ssl_options = build_ssl_options()
transport_options = {}
def send_task_msg(conn=None,
data={},
exchange=None, # kombu.Exchange object
routing_key=None, # string
priority="high",
priority_routing={},
serializer="json",
**kwargs):
res = {"status": ERROR, # non-zero is failure
"error": ""}
use_routing_key = routing_key
if not use_routing_key:
if priority in priority_routing:
use_routing_key = priority_routing[priority]
# end of finding the routing key
payload = data
if len(payload) == 0:
res["status"] = ERROR
res["error"] = "Please set a data argument to a dict " + \
"to publish messages"
return res
if not conn:
res["status"] = ERROR
res["error"] = "Please set a valid connection (conn) " + \
"to publish messages"
return res
if not exchange:
res["status"] = ERROR
res["error"] = "Please set an exchange to publish"
return res
if not use_routing_key:
res["status"] = ERROR
res["error"] = "Please set pass in a routing_key " + \
"or a valid priority_routing with an" + \
"entry to a routing_key string to " + \
"send a task message"
return res
log.info(("{} publish - "
"ex={} rk={} sz={}")
.format(name,
exchange,
use_routing_key,
serializer))
last_step = "try"
try:
with producers[conn].acquire(block=True) as producer:
# if you throw here, please pass in a kombu.Exchange
# because the type of Exchange should not be handled in
# the send method
last_step = "Please set an exchange to publish"
last_step = "maybe declare={}".format(exchange.name)
maybe_declare(exchange,
producer.channel)
last_step = "publish rk={}".format(routing_key)
producer.publish(payload,
serializer=serializer,
exchange=exchange,
routing_key=routing_key)
res["status"] = SUCCESS
res["error"] = ""
except Exception as e:
res["status"] = FAILED
res["error"] = ("{} producer threw "
"exception={} ex={} rk={} "
"last_step={}").format(
name,
e,
exchange,
routing_key,
last_step)
log.error(("{} producer threw "
"exception={} ex={} rk={} "
"last_step={}")
.format(name,
e,
exchange,
routing_key,
last_step))
# end of try to send
return res
# end of send_task_msg
def run_publisher(broker_url,
exchange=None, # kombu.Exchange object
routing_key=None, # string
msgs=[],
num_per_batch=-1,
priority="high",
serializer="json",
ssl_options={},
transport_options={},
*args,
**kwargs):
log.info("connecting")
with Connection(broker_url,
ssl=ssl_options,
transport_options=transport_options) as conn:
num_to_send = len(msgs)
if num_to_send == 0:
log.info(("no msgs={} to publish")
.format(num_to_send))
return
log.info(("publishing ex={} rk={} "
"msgs={}")
.format(exchange,
routing_key,
num_to_send))
num_sent = 0
not_done = True
num_fails = 0
while not_done:
cur_msg = msgs[num_sent]
send_res = send_task_msg(conn=conn,
data=cur_msg,
exchange=exchange,
routing_key=routing_key,
priority=priority,
priority_routing=priority_routing,
serializer=serializer)
if send_res["status"] == SUCCESS:
num_fails = 0
num_sent += 1
if num_sent >= num_to_send:
not_done = False
else:
num_fails += 1
sleep_duration = calc_backoff_timer(num_fails)
log.info(("publish failed - {} - exch={} rk={}"
"sleep={} seconds retry={}")
.format(send_res["error"],
exchange,
routing_key,
sleep_duration,
num_fails))
if num_fails > 100000:
num_fails = 1
time.sleep(sleep_duration)
# end of if done
# end of sending all messages
# end of with kombu.Connection
# end of run_publisher
class WorkerProducerConsumerMixin(ConsumerProducerMixin):
def __init__(self,
conn=None,
callback=None,
task_queues=[],
prefetch_count=1):
self.name = "pubsub-mix"
self.connection = conn
self.task_queues = task_queues
self.prefetch_count = prefetch_count
self.use_callback = self.handle_message
if callback:
self.use_callback = callback
# end of __init__
def set_callback(self, callback):
self.use_callback = callback
# end of set_task_queues
def set_task_queues(self, task_queues=[]):
self.task_queues = task_queues
# end of set_task_queues
def get_consumers(self, Consumer, channel): # noqa F811
if len(self.task_queues) == 0:
log.error(("There are no task_queues={} "
"to consume")
.format(len(self.task_queues)))
return []
return [Consumer(queues=self.task_queues,
prefetch_count=self.prefetch_count,
callbacks=[self.use_callback])]
# end of get_consumers
def handle_message(self, body, message):
log.info(("default handle_message - "
"acking - msg={}")
.format(body))
message.ack()
# end of handle_message
# end of WorkerProducerConsumerMixin
def run_consumer(broker_url,
ssl_options={},
transport_options={},
task_queues=[],
callback=None,
prefetch_count=1,
*args,
**kwargs):
if len(broker_url) == 0:
log.error(("Please pass in a valid broker_url "
"to consume"))
return
if len(task_queues) == 0:
log.error(("Please pass in a list of task_queues to "
"consume"))
return
with Connection(broker_url,
ssl=ssl_options,
transport_options=transport_options) as conn:
try:
log.info(("consuming queues={}")
.format(task_queues))
WorkerProducerConsumerMixin(
conn=conn,
task_queues=task_queues,
callback=callback,
prefetch_count=prefetch_count).run()
except KeyboardInterrupt:
log.info("Received Interrupt - Shutting down")
# end of with kombu.Connection
# end of run_consumer
num_msgs_to_send = 10
log.info(("Generating messages={}")
.format(num_msgs_to_send))
msgs = build_sample_msgs(num=num_msgs_to_send,
data={"simulated_lag": 1.0})
log.info(("Publishing messages={}")
.format(len(msgs)))
run_publisher(broker_url=broker_url,
exchange=use_exchange, # kombu.Exchange object
routing_key=use_routing_key, # string
msgs=msgs,
ssl_options=ssl_options,
transport_options=transport_options,
priority="high")
log.info("Done Publishing")
log.info(("Consuming queues={}")
.format(len(task_queues)))
run_consumer(broker_url=broker_url,
ssl_options=ssl_options,
transport_options=transport_options,
task_queues=task_queues,
prefetch_count=prefetch_count)
log.info("Done")
|
1663510
|
import torch
import torch.nn as nn
import torch.nn.functional as F
d1 = 64
d2 = 128
d3 = 256
d4 = 512
class AttnContentStrategy(nn.Module):
def __init__(self, n_labels):
super(AttnContentStrategy, self).__init__()
self.linearStrategy = nn.Linear(n_labels, d1)
self.linearStrategy2 = nn.Linear(d1, d1)
self.linearContent = nn.Linear(d1, d1)
self.linearContent2 = nn.Linear(d1, d1)
self.strategyContentContext = nn.Parameter(torch.randn([d1, 1]).float())
self.content_proj = nn.Linear(d1, d1)
self.strat_proj = nn.Linear(d1, d1)
self.linear1 = nn.Linear(d2, d3)
self.linear2 = nn.Linear(d3, d4)
self.lstm = nn.LSTM(d4, d4)
self.s_proj = nn.Linear(d4, d4)
self.softmax = nn.Softmax(dim = 1)
self.s_context_vector = nn.Parameter(torch.randn([d4, 1]).float())
self.sent_linear1 = nn.Linear(d4, d3)
self.sent_linear2 = nn.Linear(d3, d2)
self.sent_linear3 = nn.Linear(d2, 1)
def forward(self, content, strategy):
linearContent = self.linearContent(content)
linearContent = F.relu(self.linearContent2(linearContent))
linearStrategy = self.linearStrategy(strategy)
linearStrategy = F.relu(self.linearStrategy2(linearStrategy))
out = torch.cat((linearContent, linearStrategy), axis=2)
Hcontent = torch.tanh(self.content_proj(linearContent))
Hstrategy = torch.tanh(self.strat_proj(linearStrategy))
Wcontent = Hcontent.matmul(self.strategyContentContext)
Wstrategy = Hstrategy.matmul(self.strategyContentContext)
temp = torch.cat((Wcontent, Wstrategy), dim=2)
temp = torch.softmax(temp, dim = 2)
out = torch.cat((temp[:,:,0].unsqueeze(2) * linearContent, temp[:,:,1].unsqueeze(2) * linearStrategy), axis=2)
out = F.relu(self.linear1(out))
out = F.relu(self.linear2(out))
out, _ = self.lstm(out)
Hs = torch.tanh(self.s_proj(out))
s_score = self.softmax(Hs.matmul(self.s_context_vector))
out = out.mul(s_score)
out = torch.sum(out, dim = 1)
out = F.relu(self.sent_linear1(out))
out = F.relu(self.sent_linear2(out))
out = self.sent_linear3(out)
out = F.sigmoid(out)
return out, temp[:,:,0].unsqueeze(2), temp[:,:,1].unsqueeze(2), s_score
|
1663512
|
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn import svm
digits = datasets.load_digits()
print(digits.data)
print(digits.target)
# digits.target is the actual label we've assigned to the digits data.
# Now that we've got the data ready, we're ready to do the machine learning.
# First, we specify the classifier:
# If you want, you can just leave parameters blank and use the defaults, like this:
# clf = svm.SVC()
# clf = svm.SVC(gamma=0.001, C=100)
# clf = svm.SVC(gamma=0.01, C=100)
clf = svm.SVC(gamma=0.0001, C=100)
X,y = digits.data[:-10], digits.target[:-10]
clf.fit(X,y)
print(clf.predict(digits.data[-5]))
plt.imshow(digits.images[-5], cmap=plt.cm.gray_r, interpolation='nearest')
plt.show()
|
1663513
|
import tensorflow as tf
import numpy as np
import cv2
import matplotlib.pyplot as plt
from tensorflow_graphics.math.interpolation import bspline
def get_trajectories(dataset):
trajectories = []
avails = []
object_types = []
for i, batch in enumerate(dataset):
future_states = tf.squeeze(batch['gt_future_states'], axis = 1)[:, 11:, :2]
future_is_valid = tf.squeeze(batch['gt_future_is_valid'], axis = 1)[:, 11:]
x = batch['x']
y = batch['y']
yaw = batch['yaw']
x = tf.squeeze(x, axis = 1)
y = tf.squeeze(y, axis = 1)
yaw = tf.squeeze(yaw, axis = 1)
c = tf.math.cos(yaw)
s = tf.math.sin(yaw)
object_type = tf.squeeze(batch['object_type'], axis = 1)
future_x = future_states[:, :, 0] # (B, 80)
future_y = future_states[:, :, 1] # (B, 80)
future_x_hat = future_x - x # (B, 80)
future_y_hat = future_y - y # (B, 80)
future_ego_x = c * future_x_hat + s * future_y_hat # (B, 80)
future_ego_y = -s * future_x_hat + c * future_y_hat # (B, 80)
future_states = tf.stack([future_ego_x, future_ego_y], axis = -1)
trajectories.append(future_states)
avails.append(future_is_valid)
object_types.append(object_type)
if i % 1000 == 0:
print(i)
trajectories = tf.concat(trajectories, axis = 0)
avails = tf.concat(avails, axis = 0)
object_types = tf.concat(object_types, axis = 0)
trajectories = trajectories.numpy()
avails = avails.numpy()
object_types = object_types.numpy()
np.save("drive/MyDrive/Motion/trajectories.npy", trajectories)
np.save("drive/MyDrive/Motion/avails.npy", avails)
np.save("drive/MyDrive/Motion/object_types.npy", object_types)
return trajectories, avails, object_types
def cluster(trajectories, avails, K = 8, num_iters = 30):
num = trajectories.shape[1]
trajectories = trajectories.copy().reshape([-1, 2*num])
avails = avails.reshape([-1, num, 1])
avails = np.concatenate((avails, avails), axis = 2)
avails = avails.reshape([-1, 2*num])
centroids = trajectories.copy()[0:K*17:17,:] # (8, 160)
for iteration in range(num_iters):
assignments = m_step(trajectories, avails, centroids)
e_step(trajectories, avails, centroids, assignments)
return assignments, centroids, trajectories, avails
def chunked_cluster(trajectories, avails, initial_centroids = None, K = 8, num_iters = 30, chunk_size=250000):
num = int(trajectories.shape[1])
trajectories = trajectories.copy().reshape([-1, 2*num])
avails = avails.reshape([-1, num, 1])
avails = np.concatenate((avails, avails), axis = 2)
avails = avails.reshape([-1, 2*num])
if initial_centroids is not None:
centroids = initial_centroids.copy()
else:
centroids = trajectories.copy()[0:K*17:17,:] # (8, 160)
N = len(trajectories)
for iteration in range(num_iters):
print(iteration)
assignments_list = []
for i in range(0, N, chunk_size):
j = min(i + chunk_size, N)
assignments_list.append(m_step(trajectories[i:j], avails[i:j], centroids))
assignments = np.concatenate(assignments_list, axis = 0)
e_step(trajectories, avails, centroids, assignments)
return assignments, centroids, trajectories, avails
def m_step(trajectories, avails, centroids):
"""
Parameters:
trajectories: nparray of shape (B, 160)
avails: nparray of shape (B, 160)
centroids: nparray of shape (8, 160)
Returns:
assignments: nparray of shape(B,)(Each trajectory has an assignment to a cluster)
"""
K = len(centroids)
num = trajectories.shape[1]//2
assert num != 160, "num is 160"
a = trajectories.reshape([-1, 1, 2*num])
b = centroids.reshape([1, K, 2*num])
reshaped_avails = avails.reshape([-1, 1, 2*num])
distance = ((a-b)**2)*reshaped_avails # (B, 8, 160)
distance = np.sum(distance, axis = 2) # (B, 8)
assignments = np.argmin(distance, axis = 1) # (B,)
print('total cost:', np.sum(np.min(distance, axis = 1).astype(np.float64)))
return assignments
def e_step(trajectories, avails, centroids, assignments, K = 8):
"""
Parameters:
trajectories: nparray of shape (B, 160)
avails: nparray of shape (B, 160)
centroids: nparray of shape (8, 160)
assignments: nparray of shape(B,)(Each trajectory has an assignment to a cluster)
Returns:
None: centroids are changed in place.
"""
K = len(centroids)
for i in range(K):
members = np.where(assignments == i)
member_trajectories = trajectories[members] # (C, 160)
member_avails = avails[members] # (C, 160)
sum_trajectory = np.sum(member_trajectories*member_avails, axis = 0) # (160,)
sum_avails = np.sum(member_avails, axis = 0) + 1e-6 # (160,)
centroids[i] = sum_trajectory/sum_avails
def visualize_clusters(assignments, centroids, avails):
colors = [(255, 0, 0),
(255, 255, 0),
(255, 255, 255),
(0,255, 255),
(0,255,0),
(0,0,255),
(255,0,255),
(255, 255, 100)]
for i in range(len(centroids)):
indices = np.where(assignments == i)[0]
centroid_avails = np.any(avails[indices], axis = 0)
print(f"the {i}th cluster has this many members:{len(indices)}")
trajectory = centroids[i][centroid_avails].reshape([-1, 2]).astype(np.int64)*2 + 112
image = np.zeros((224, 448, 3))
cv2.polylines(image, [trajectory], False, color = colors[i%8])
plt.imshow(image/255)
plt.show()
def visualize_trajectories(trajectories, avails, indices): # trajectories has shape (B, 160)
colors = [(255, 0, 0),
(255, 255, 0),
(255, 255, 255),
(0,255, 255),
(0,255,0),
(0,0,255),
(255,0,255),
(255, 255, 100)]
image = np.zeros((224,448,3))
for index in indices:
track_trajectory = trajectories[index]
track_avail = avails[index]
track_trajectory = track_trajectory[track_avail]
track_trajectory = 2*track_trajectory.reshape([1,-1,2]).astype(np.int64) + 112
cv2.polylines(image, track_trajectory, False, color = colors[index % 8])
plt.imshow(image/255)
plt.show()
def inspect_trajectory(dataset, index, batch_size = 32):
batch_index = index//batch_size
index_within_batch = index % batch_size
for i, batch in enumerate(dataset):
if i < batch_index:
continue
image = batch['image'][index_within_batch]
future_states = tf.squeeze(batch['gt_future_states'], axis = 1)[:, 11:, :2]
future_is_valid = tf.squeeze(batch['gt_future_is_valid'], axis = 1)[:, 11:] # (B, 80)
x = batch['x']
y = batch['y']
yaw = batch['yaw']
x = tf.squeeze(x, axis = 1)
y = tf.squeeze(y, axis = 1)
yaw = tf.squeeze(yaw, axis = 1)
c = tf.math.cos(yaw)
s = tf.math.sin(yaw)
future_x = future_states[:, :, 0] # (B, 80)
future_y = future_states[:, :, 1] # (B, 80)
future_x_hat = future_x - x # (B, 80)
future_y_hat = future_y - y # (B, 80)
future_ego_x = c * future_x_hat + s * future_y_hat # (B, 80)
future_ego_y = -s * future_x_hat + c * future_y_hat # (B, 80)
future_states = tf.stack([future_ego_x, future_ego_y], axis = -1) # (B, 80, 2)
trajectory = future_states[index_within_batch].numpy()
avails = future_is_valid[index_within_batch].numpy()
trajectory = (2.5*trajectory[avails]).astype(np.int64) + 112
image = image.numpy()
image = np.zeros((224,448,3))
cv2.polylines(image, [trajectory], False, color = (0,255,0))
plt.imshow(image/255)
plt.show()
break
def smooth(trajectories, avails, centroids, assignments):
"""
Arguments:
trajectories: nparray of shape (X, 80, 2)
avails: nparray of shape (X, 80)
centroids: nparray of shape (n, 160)
assignments: nparray of shape (X,)
Returns:
new_centroids: nparray of shape (n, 160)
"""
n = len(centroids)
new_centroids = np.zeros((n, 160))
histories = []
for i in range(n):
print(i)
initial_knots = tf.convert_to_tensor(centroids[i].reshape([80, 2])[9::10])
model = get_cluster_model(initial_knots)
opt = tf.keras.optimizers.SGD(learning_rate=10)
model.compile(opt, loss=cluster_loss)
current_trajectories = trajectories[assignments==i]
current_avails = avails[assignments==i]
output = np.stack([current_trajectories, np.stack([current_avails, current_avails], axis=-1)], axis=1)
num_examples = len(current_trajectories)
history = model.fit(x = np.zeros((num_examples,)), y=output, batch_size=num_examples, epochs=100, verbose=0)
histories.append(history)
print("loss", history.history["loss"][-1])
current_centroid = model(np.array([0])).numpy()
new_centroids[i] = current_centroid.reshape([160,])
visualize_centroids(new_centroids)
return new_centroids
def cluster_and_get_all_avails(filtered_trajectories, filtered_avails, K, num_iters, chunk_size):
assignments_K, centroids_K, trajectories_K, avails_K = chunked_cluster(filtered_trajectories, filtered_avails, K=K, num_iters=num_iters, chunk_size=chunk_size)
np.save("drive/MyDrive/Motion/clusters/filtered_veh_64.npy", centroids_K)
np.save("drive/MyDrive/Motion/clusters/filtered_assignments_64.npy", assignments_K)
all_avails_K = []
for i in range(K):
all_avails_K.append(avails_K[np.where(assignments_K==i)])
return assignments_K, centroids_K, trajectories_K, avails_K, all_avails
def visualize_centroids(centroids, all_avails=None):
"""
Call Arguments:
centroids: (K, 160)
all_avails: python list of nparrays of shape (B, 80)
"""
K = len(centroids)
num = centroids.shape[1]//2
for i in range(K):
if all_avails != None:
avails = np.any(all_avails[i], axis=0)
centroid = (2.5*centroids[i].reshape([num, 2])[avails]).astype(np.int32) + 112
else:
centroid = (2.5*centroids[i].reshape([num, 2])).astype(np.int32) + 112
print(i)
image = np.zeros((224, 448, 3))
cv2.polylines(image, [centroid], False, (255, 255, 255))
for pt in centroid[::4]:
cv2.circle(image, (pt[0], pt[1]), 1, (255, 0, 0))
plt.figure(figsize=(10, 20))
plt.imshow(image/255)
plt.show()
def chunked_m_step(trajectories, avails, centroids, chunk_size=125000):
N = len(trajectories)
trajectories = trajectories.copy().reshape([-1, 160])
avails = avails.reshape([-1, 80, 1])
avails = np.concatenate((avails, avails), axis = 2)
avails = avails.reshape([-1, 160])
assignments_list = []
for i in range(0, N, chunk_size):
j = min(i + chunk_size, N)
assignments_list.append(m_step(trajectories[i:j], avails[i:j], centroids))
assignments = np.concatenate(assignments_list, axis = 0)
return assignments
def get_cluster_model(initial_knots):
"""
initial_knots: tensor of shape (8, 2)
"""
dummy_input = tf.keras.layers.Input(shape = (1,))
knots = tf.keras.layers.Dense(16)(dummy_input)
knots = tf.reshape(knots, (-1, 1, 2, 8)) # (B, 1, 2, 8)
initial_knots = initial_knots[tf.newaxis, tf.newaxis, :, :]
knots = knots + tf.transpose(initial_knots, [0, 1, 3, 2])
max_pos = 8 - 3
positions = tf.expand_dims(tf.range(start = 0.0, limit = max_pos, delta = max_pos/80, dtype= knots.dtype), axis = -1)
spline = bspline.interpolate(knots, positions, 3, False)
spline = tf.squeeze(spline, axis = 1)
pred = tf.transpose(spline, perm = [1,2,0,3]) # (B, K, 80, 2)
pred = tf.reshape(pred, [-1, 80, 2])
model = tf.keras.Model(inputs=[dummy_input], outputs =[pred])
return model
def cluster_loss(y_true, y_pred):
return tf.reduce_mean(((y_true[:, 0] - y_pred)**2) * y_true[:, 1])
def show_trajectory(trajectory):
"""
trajectory: of shape (80, 2) or (160) or (1, 80, 2)
"""
image = np.zeros((224, 448, 3))
pts = (2.5*trajectory.reshape([80, 2])).astype(np.int32) + 112
cv2.polylines(image, pts, False, (255, 255, 255))
for pt in pts:
cv2.circle(image, (pt[0], pt[1]), 1, (255, 0, 0))
plt.figure(figsize=(10, 20))
plt.imshow(image)
plt.show()
|
1663529
|
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
_DEFAULT_PY_BUILD_FILE = """
py_library(
name = "lib",
srcs = glob(["**/*.py"]),
visibility = ["//visibility:public"],
)
"""
_PANDOC_BUILD_FILE = """
filegroup(
name = "pandoc",
srcs = ["bin/pandoc"],
visibility = ["//visibility:public"],
)"""
def protoc_docs_plugin_repositories():
_maybe(
http_archive,
name = "com_google_protobuf",
strip_prefix = "protobuf-c60aaf79e63b911b2c04c04e1eacb4f3c36ef790", # this is 3.9.1 with fixes
urls = ["https://github.com/protocolbuffers/protobuf/archive/c60aaf79e63b911b2c04c04e1eacb4f3c36ef790.zip"],
)
_maybe(
http_archive,
name = "pypi_pypandoc",
url = "https://files.pythonhosted.org/packages/71/81/00184643e5a10a456b4118fc12c96780823adb8ed974eb2289f29703b29b/pypandoc-1.4.tar.gz",
strip_prefix = "pypandoc-1.4",
build_file_content = _DEFAULT_PY_BUILD_FILE,
)
_maybe(
http_archive,
name = "pandoc_linux",
build_file_content = _PANDOC_BUILD_FILE,
strip_prefix = "pandoc-2.2.1",
url = "https://github.com/jgm/pandoc/releases/download/2.2.1/pandoc-2.2.1-linux.tar.gz",
)
_maybe(
http_archive,
name = "pandoc_macOS",
build_file_content = _PANDOC_BUILD_FILE,
strip_prefix = "pandoc-2.2.1",
url = "https://github.com/jgm/pandoc/releases/download/2.2.1/pandoc-2.2.1-macOS.zip",
)
def protoc_docs_plugin_register_toolchains():
native.register_toolchains(
"@protoc_docs_plugin//:pandoc_toolchain_linux",
"@protoc_docs_plugin//:pandoc_toolchain_macOS",
)
def _maybe(repo_rule, name, strip_repo_prefix = "", **kwargs):
if not name.startswith(strip_repo_prefix):
return
repo_name = name[len(strip_repo_prefix):]
if repo_name in native.existing_rules():
return
repo_rule(name = repo_name, **kwargs)
|
1663569
|
from geoalchemy2 import Geography, Geometry
from pytz import timezone
from shapely import wkb
from sqlalchemy import (
Column,
Integer, BigInteger,
String,
Boolean,
DateTime,
ForeignKey,
UniqueConstraint,
)
from sqlalchemy.dialects.postgresql import JSONB
from sqlalchemy.orm import deferred, relationship
from sqlalchemy.ext.declarative import declarative_base
from typing import List, Optional
import datetime
import numpy
import statistics
Base = declarative_base()
class Source(Base):
"""
A specific source data may come from.
E.g. NEXRAD L2, GFS, NAM, HRRR
"""
__tablename__ = "source"
id = Column(Integer, primary_key=True)
short_name = Column(String(8), unique=True)
name = Column(String(128), unique=True)
src_url = Column(String(1024))
last_updated = Column(DateTime)
# Fields are backref'd
def serialize(self):
return {
"id": self.id,
"short_name": self.short_name,
"name": self.name,
"src_url": self.src_url,
"last_updated": self.last_updated,
}
def __repr__(self):
return f"<Source id={self.id} short_name='{self.short_name}'>"
class Metric(Base):
"""
A metric that various source fields can have values for.
E.g. temperature, precipitation, visibility
"""
__tablename__ = "metric"
id = Column(Integer, primary_key=True)
name = Column(String(128), unique=True)
units = Column(String(16))
# intermediate metrics aren't displayed to the end user, and are only used for deriving other metrics
intermediate = Column(Boolean, nullable=False, default=False)
def serialize(self):
return {
"id": self.id,
"name": self.name,
"units": self.units,
}
def __repr__(self):
return f"<Metric id={self.id} name='{self.name}'>"
class SourceField(Base):
"""
A specific field inside of a source.
E.g. Composite reflectivity @ entire atmosphere, 2m temps, visibility @ ground
"""
__tablename__ = "source_field"
__table_args__ = (
UniqueConstraint('source_id', 'metric_id'),
)
id = Column(Integer, primary_key=True)
source_id = Column(Integer, ForeignKey('source.id'))
metric_id = Column(Integer, ForeignKey('metric.id'))
projection_id = Column(Integer, ForeignKey('projection.id'))
idx_short_name = Column(String(15)) # e.g. TMP, VIS
idx_level = Column(String(255)) # e.g. surface, 2 m above ground
selectors = Column(JSONB) # e.g. {'name': 'Temperature', 'typeOfLevel': 'surface'}. NULL means this field won't be ingested directly
source = relationship('Source', backref='fields', lazy='joined')
projection = relationship('Projection')
metric = relationship('Metric', backref='fields', lazy='joined')
def serialize(self):
return {
"id": self.id,
"source_id": self.source_id,
"metric_id": self.metric_id,
}
def __repr__(self):
return f"<SourceField id={self.id} short_name='{self.idx_short_name}'>"
class Location(Base):
"""
A specific location that we have a lat/lon for.
"""
__tablename__ = "location"
id = Column(Integer, primary_key=True)
location = Column(Geography('Point,4326'))
name = Column(String(512))
population = Column(Integer)
def get_coords(self):
"""
:return: lon, lat
"""
point = wkb.loads(bytes(self.location.data))
return point.x, point.y
def serialize(self):
coords = self.get_coords()
return {
"id": self.id,
"name": self.name,
"lon": coords[0],
"lat": coords[1],
}
def __repr__(self):
return f"<Location id={self.id} name='{self.name}'>"
class Timezone(Base):
"""
A timezone name and associated geometry.
"""
__tablename__ = "timezone"
name = Column(String(512), primary_key=True)
geom = deferred(Column(Geometry('MULTIPOLYGON')))
def utc_offset(self, dt):
return timezone(self.name).utcoffset(dt)
class Projection(Base):
"""
Table that holds data about the projection a given ingested file uses.
"""
__tablename__ = "projection"
id = Column(Integer, primary_key=True)
params = Column(JSONB)
n_x = Column(Integer)
n_y = Column(Integer)
ll_hash = Column(BigInteger)
lats = deferred(Column(JSONB))
lons = deferred(Column(JSONB))
def shape(self):
return (self.n_y, self.n_x)
class FileMeta(Base):
"""
Table that holds metadata about denormalized data in a given file.
Each file can hold any data (different fields, different sources even) as long
as it has a single projection.
"""
__tablename__ = "file_meta"
file_name = Column(String(4096), primary_key=True)
projection_id = Column(Integer, ForeignKey('projection.id'))
ctime = Column(DateTime, default=datetime.datetime.utcnow)
loc_size = Column(Integer, nullable=False)
projection = relationship('Projection')
class FileBandMeta(Base):
"""
Table that holds data about specific runs of denormalized data in the given file.
"""
__tablename__ = "file_band_meta"
# TODO: on delete of file meta, delete these
# PKs
file_name = Column(String, ForeignKey('file_meta.file_name'), primary_key=True)
offset = Column(Integer, primary_key=True) # offset within a (x,y) chunk, _not_ offset in the entire file
# Metadata used to seek into the file
vals_per_loc = Column(Integer)
# Metadata
source_field_id = Column(Integer, ForeignKey('source_field.id'))
valid_time = Column(DateTime)
run_time = Column(DateTime)
file_meta = relationship('FileMeta', backref='bands', lazy='joined')
source_field = relationship('SourceField', lazy='joined')
class DataPointSet(object):
"""
Non-db object which holds values and metadata for given data point (loc, time)
"""
values: List[float]
metric_id: int
valid_time: datetime.datetime
source_field_id: Optional[int]
run_time: Optional[datetime.datetime]
derived: bool
synthesized: bool
def __init__(
self,
values: List[float],
metric_id: int,
valid_time: datetime.datetime,
source_field_id: Optional[int] = None,
run_time: Optional[datetime.datetime] = None,
derived: bool = False,
synthesized: bool = False):
self.values = values
self.metric_id = metric_id
self.valid_time = valid_time
# Optional fields
self.source_field_id = source_field_id
self.run_time = run_time
self.derived = derived
self.synthesized = synthesized
def __repr__(self):
return f"<DataPointSet metric_id={self.metric_id} valid_time={self.valid_time} source_field_id={self.source_field_id} derived={self.derived} synthesized={self.synthesized}>"
def min(self) -> float:
return min(self.values)
def max(self) -> float:
return max(self.values)
def median(self) -> float:
return statistics.median(self.values)
def median_confidence(self) -> float:
vals = numpy.array(self.values)
n_within_stddev = (abs(vals - self.median()) < numpy.std(vals)).sum()
return n_within_stddev / len(vals)
def mean(self) -> float:
return statistics.mean(self.values)
def mean_confidence(self) -> float:
vals = numpy.array(self.values)
n_within_stddev = (abs(vals - self.mean()) < numpy.std(vals)).sum()
return n_within_stddev / len(vals)
|
1663572
|
import pybedtools as bt
from bcbio.utils import file_exists
from bcbio import utils
def decomment(bed_file, out_file):
"""
clean a BED file
"""
if file_exists(out_file):
return out_file
with utils.open_gzipsafe(bed_file) as in_handle, open(out_file, "w") as out_handle:
for line in in_handle:
if line.startswith("#") or line.startswith("browser") or line.startswith("track"):
continue
else:
out_handle.write(line)
return out_file
def concat(bed_files, catted=None):
"""
recursively concat a set of BED files, returning a
sorted bedtools object of the result
"""
bed_files = [x for x in bed_files if x]
if len(bed_files) == 0:
if catted:
# move to a .bed extension for downstream tools if not already
sorted_bed = catted.sort()
if not sorted_bed.fn.endswith(".bed"):
return sorted_bed.moveto(sorted_bed.fn + ".bed")
else:
return sorted_bed
else:
return catted
if not catted:
bed_files = list(bed_files)
catted = bt.BedTool(bed_files.pop())
else:
catted = catted.cat(bed_files.pop(), postmerge=False,
force_truncate=False)
return concat(bed_files, catted)
def merge(bedfiles):
"""
given a BED file or list of BED files merge them an return a bedtools object
"""
if isinstance(bedfiles, list):
catted = concat(bedfiles)
else:
catted = concat([bedfiles])
if catted:
return concat(bedfiles).sort().merge()
else:
return catted
def minimize(bed_file):
"""
strip a BED file down to its three necessary columns: chrom start end
"""
if not bed_file:
return bed_file
else:
sorted_bed = bt.BedTool(bed_file).cut(range(3)).sort()
if not sorted_bed.fn.endswith(".bed"):
return sorted_bed.moveto(sorted_bed.fn + ".bed")
else:
return sorted_bed
|
1663604
|
from fastapi import APIRouter
from . import ballot
from . import guardian
from . import tally_decrypt
router = APIRouter()
router.include_router(guardian.router, prefix="/guardian")
router.include_router(ballot.router, prefix="/ballot")
router.include_router(tally_decrypt.router, prefix="/tally")
|
1663607
|
from SPARQLWrapper import SPARQLWrapper, JSON
import json
import requests
def setup_query(person_complete_name: str):
"""
Return the SPARQL query for obtaining gender, birthdate and nationality (if available) of the given person from
DBpedia
:param person_complete_name: person whose metadata are of interest
:return:
"""
query_template = """
SELECT *
WHERE {{
?p foaf:name "{}"@en;
foaf:gender ?gender;
dbo:birthDate ?birthdate.
optional {{ ?p dbp:nationality ?nationality_dbp }}
optional {{ ?p dbo:nationality ?nationality_dbo }}
}}
""".format(person_complete_name)
return query_template
def query_dbpedia_endpoint(person_complete_name, sparql):
"""
Query the given SPARQL endpoint for obtaining metadata from the person of interes
:param person_complete_name: person of interest
:param sparql: SPARQL Wrapper that acts as an endpoint
:return:
"""
query = setup_query(person_complete_name)
sparql.setQuery(query)
sparql.setReturnFormat(JSON)
query_results = sparql.query().convert()
return query_results
def extract_metadata_from_query_results(query_results):
"""
Given a Sparql query result, extract nationality, gender and birthdate
:param query_results:
:return:
"""
if query_results["results"]["bindings"]:
raw_metadata = query_results["results"]["bindings"][0]
gender = raw_metadata['gender']['value'].lower()
birth_date = raw_metadata['birthdate']['value']
if "nationality_dbp" in raw_metadata.keys():
nationality = raw_metadata['nationality_dbp']['value'].lower()
elif "nationality_dbo" in raw_metadata.keys():
nationality = raw_metadata['nationality_dbo']['value'].lower()
else:
nationality = ""
return birth_date, gender, nationality
else:
raise ValueError
def get_person_metadata(person_complete_name: str, endpoint: str):
"""
Return a dictionary with gender, birth date and nationality of the person of interest
:param person_complete_name: person of interest in the format "Name Surname"
:param endpoint: which service to query
:return:
"""
if endpoint == "dbpedia":
person_metadata = get_metadata_dbpedia(person_complete_name)
elif endpoint == "wikidata":
person_metadata = get_metadata_wikidata(person_complete_name)
else:
raise ValueError("Invalid endpoint")
return person_metadata
def get_metadata_dbpedia(person_complete_name):
"""
Return gender, birth date and nationality of the current person by querying DBpedia
:param person_complete_name:
:return:
"""
sparql = SPARQLWrapper("http://dbpedia.org/sparql")
query_results = query_dbpedia_endpoint(person_complete_name, sparql)
try:
birth_date, gender, nationality = extract_metadata_from_query_results(query_results)
person_metadata = {"complete_name": person_complete_name,
"gender": gender,
"birth_date": birth_date,
"nationality": nationality}
except ValueError:
print("Could not get metadata for {}: is the person's name spelled correctly?".format(person_complete_name))
person_metadata = {}
return person_metadata
def get_wikidata_entities(person_complete_name):
"""
Return all the plausible Entities IDs associated with the current person.
IDs are ordered from the most likely to the least likely (according to Wikidata)
:param person_complete_name:
:return:
"""
endpoint = "https://www.wikidata.org/w/api.php?action=wbsearchentities&search={}&language=en&format=json".format(
person_complete_name)
content = json.loads(requests.get(endpoint).content)
entities = content['search']
entities_ids = [entity['id'] for entity in entities]
return entities_ids
def get_wikidata_properties(entity_id):
"""
Return birth date, gender and nationality of the given entity ID
:param entity_id: Wikidata Entity (e.g. Q10490 for <NAME>)
:return:
"""
entity_endpoint = "https://www.wikidata.org/w/api.php?action=wbgetclaims&entity={}&format=json"
url_of_interest = entity_endpoint.format(
entity_id
)
content = requests.get(url_of_interest).content
content = json.loads(content)['claims']
# Birth date
birth_date = None
try:
birth_date = content['P569'][0]['mainsnak']['datavalue']['value']['time']
except KeyError:
print("Birth date not available")
except Exception as ex:
print(ex)
# Sex/gender
gender = None
try:
sex_entity = content['P21'][0]['mainsnak']['datavalue']['value']['id']
sex_entity_id_desc = {
"Q6581097": "male",
"Q6581072": "female",
"Q1097630": "intersex",
"Q1052281": "transgender female",
"Q2449503": "transgender male"
} # Source: https://www.wikidata.org/wiki/Property:P21
gender = sex_entity_id_desc[sex_entity]
except KeyError:
print("Gender not available")
except Exception as ex:
print(ex)
# Citizenship
citizenship = None
try:
country_entity = content['P27'][0]['mainsnak']['datavalue']['value']['id']
country_name_id = "P3417"
url_of_interest = entity_endpoint.format(
country_entity)
country_content = requests.get(url_of_interest).content
country_content = json.loads(country_content)['claims']
citizenship = country_content[country_name_id][0]['mainsnak']['datavalue']['value']
except KeyError:
print("Citizenship not available")
except Exception as ex:
print(ex)
person_metadata = {
"gender": gender,
"birth_date": birth_date,
"nationality": citizenship
}
return person_metadata
def get_metadata_wikidata(person_complete_name):
"""
Get birth date, gender and nationality (expressed with country name) for the given person
:param person_complete_name: Person you are interested in
:return:
"""
entities_ids = get_wikidata_entities(person_complete_name)
person_metadata = {}
for entity_id in entities_ids:
if not person_metadata:
try:
person_metadata = get_wikidata_properties(entity_id)
person_metadata['name'] = person_complete_name
except Exception as ex:
print(ex)
else:
break
if not person_metadata:
print("Could not get metadata for {}: is the person's name spelled correctly?".format(person_complete_name))
return person_metadata
|
1663623
|
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from .util import number_color
from functools import partial
import glob
import math
from ui.util import number_object
from ui.mouse_event import ReferenceDialog, SnapshotDialog
import copy
Lb_width = 100
Lb_height = 40
Lb_row_shift = 25
Lb_col_shift = 5
Lb_x = 100
Lb_y = 690
Tb_width = 100
Tb_height = 40
Tb_row_shift = 50
Tb_col_shift = 5
Tb_x = 100
Tb_y = 60
_translate = QtCore.QCoreApplication.translate
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(1430, 750)
# Form.resize(1980, 1100)
self.graphicsView = QtWidgets.QGraphicsView(Form)
self.graphicsView.setGeometry(QtCore.QRect(100, 140, 518, 518))
self.graphicsView.setObjectName("graphicsView")
self.graphicsView_GT = QtWidgets.QGraphicsView(Form)
self.graphicsView_GT.setGeometry(QtCore.QRect(800, 140, 570, 570))
self.graphicsView_GT.setObjectName("graphicsView_GT")
# self.graphicsView_2 = QtWidgets.QGraphicsView(Form)
# self.graphicsView_2.setGeometry(QtCore.QRect(652, 140, 518, 518))
# self.graphicsView_2.setObjectName("graphicsView_2")
# Label Buttons to change the semantic meanings of the Brush
# First Row
self.add_brush_widgets(Form)
self.add_top_buttons(Form)
self.add_label_buttons_eg3d(Form)
self.add_tool_buttons(Form)
# self.add_checkbox_widgets(Form)
self.add_update_img_button(Form)
# self.referDialog = ReferenceDialog(self)
# self.referDialog.setObjectName('Reference Dialog')
# # self.referDialog.setWindowTitle('Reference Image:')
# self.referDialog.setWindowTitle('Style Image')
# self.referDialogImage = QtWidgets.QLabel(self.referDialog)
# self.referDialogImage.setFixedSize(512, 512)
# self.referDialog.show()
# self.snapshotDialog = SnapshotDialog(self)
# self.snapshotDialog.setObjectName('Snapshot Dialog')
# self.snapshotDialog.setWindowTitle('Reference Image:')
# self.snapshotDialogImage = QtWidgets.QLabel(self.snapshotDialog)
# self.snapshotDialogImage.setFixedSize(512, 512)
# self.add_intermediate_results_button(Form)
self.add_alpha_bar(Form)
self.add_yaw_bar(Form)
self.add_pitch_bar(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
# Form.setWindowTitle(_translate("Form", "Let's Party Face Manipulation v0.2"))
Form.setWindowTitle(_translate("Form", "Let's Party Face Manipulation"))
self.pushButton.setText(_translate("Form", "Open Image"))
self.pushButton_2.setText(_translate("Form", "StarScreening"))
self.pushButton_3.setText(_translate("Form", "SaveScreening"))
self.pushButton_4.setText(_translate("Form", "Color"))
self.pushButton_5.setText(_translate("Form", "Open Random"))
self.saveImg.setText(_translate("Form", "Save Img"))
def add_alpha_bar(self, Form):
self.alphaLabel = QtWidgets.QLabel(Form)
self.alphaLabel.setObjectName("alphaLabel")
self.alphaLabel.setGeometry(QtCore.QRect(500, 25, 150, 20))
self.alphaLabel.setText('Alpha: 0.5')
font = self.brushsizeLabel.font()
font.setPointSize(10)
font.setBold(True)
self.alphaLabel.setFont(font)
self.alphaSlider = QtWidgets.QSlider(Form)
self.alphaSlider.setOrientation(QtCore.Qt.Horizontal)
self.alphaSlider.setGeometry(QtCore.QRect(500 + 150, 30, 150, 10))
self.alphaSlider.setObjectName("alphaSlider")
self.alphaSlider.setMinimum(0)
self.alphaSlider.setMaximum(20)
self.alphaSlider.setValue(10)
self.alphaSlider.valueChanged.connect(Form.change_alpha_value)
def add_brush_widgets(self, Form):
# self.add_style_imgs_buttons(Form)
self.brushsizeLabel = QtWidgets.QLabel(Form)
self.brushsizeLabel.setObjectName("brushsizeLabel")
self.brushsizeLabel.setGeometry(QtCore.QRect(Tb_x - 1 * Lb_row_shift - 60+10 , 25, 150, 20))
self.brushsizeLabel.setText('Brush size: 6')
font = self.brushsizeLabel.font()
font.setPointSize(10)
font.setBold(True)
self.brushsizeLabel.setFont(font)
self.brushSlider = QtWidgets.QSlider(Form)
self.brushSlider.setOrientation(QtCore.Qt.Horizontal)
self.brushSlider.setGeometry(QtCore.QRect(Tb_x - 1 * Lb_row_shift - 60 + 130+10, 30, 300, 10))
self.brushSlider.setObjectName("brushSlider")
self.brushSlider.setMinimum(1)
self.brushSlider.setMaximum(100)
self.brushSlider.setValue(6)
self.brushSlider.valueChanged.connect(Form.change_brush_size)
def add_yaw_bar(self, Form):
self.yawLabel = QtWidgets.QLabel(Form)
self.yawLabel.setObjectName("yawLabel")
self.yawLabel.setGeometry(QtCore.QRect(500 + 320, 25, 150, 20))
self.yawLabel.setText('Yaw: 0')
font = self.brushsizeLabel.font()
font.setPointSize(10)
font.setBold(True)
self.yawLabel.setFont(font)
self.yawSlider = QtWidgets.QSlider(Form)
self.yawSlider.setOrientation(QtCore.Qt.Horizontal)
self.yawSlider.setGeometry(QtCore.QRect(500 + 470, 30, 150, 10))
self.yawSlider.setObjectName("yawSlider")
self.yawSlider.setMinimum(-50)
self.yawSlider.setMaximum(50)
self.yawSlider.setValue(0)
self.yawSlider.valueChanged.connect(Form.change_yaw_value)
def add_pitch_bar(self, Form):
self.pitchLabel = QtWidgets.QLabel(Form)
self.pitchLabel.setObjectName("pitchLabel")
self.pitchLabel.setGeometry(QtCore.QRect(500 + 630, 25, 150, 20))
self.pitchLabel.setText('Pitch: 0')
font = self.brushsizeLabel.font()
font.setPointSize(10)
font.setBold(True)
self.pitchLabel.setFont(font)
self.pitchSlider = QtWidgets.QSlider(Form)
self.pitchSlider.setOrientation(QtCore.Qt.Horizontal)
self.pitchSlider.setGeometry(QtCore.QRect(500 + 780, 30, 150, 10))
self.pitchSlider.setObjectName("pitchSlider")
self.pitchSlider.setMinimum(-50)
self.pitchSlider.setMaximum(50)
self.pitchSlider.setValue(0)
self.pitchSlider.valueChanged.connect(Form.change_pitch_value)
def add_intermediate_results_button(self, Form):
self.snap_scrollArea = QtWidgets.QScrollArea(Form)
self.snap_scrollArea.setGeometry(QtCore.QRect(100, Lb_y + Lb_height + Lb_col_shift + Lb_height + 30, 1622, 250))
self.snap_scrollArea.setWidgetResizable(True)
self.snap_scrollArea.setObjectName("snap_scrollArea")
self.snap_scrollArea.setAlignment(Qt.AlignCenter)
#self.snap_scrollArea.setStyleSheet("border-color: transparent")
self.snap_scrollArea.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.snap_scrollAreaWidgetContents = QtWidgets.QWidget()
self.snap_scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 1622, 250))
self.snap_scrollAreaWidgetContents.setObjectName("snap_scrollAreaWidgetContents")
self.snap_gridlLayout = QtWidgets.QGridLayout(self.snap_scrollAreaWidgetContents)
# # snap_horizontalLayout.setContentsMargins(11, 11, 11, 11)
self.snap_gridlLayout.setSpacing(20)
self.snap_gridlLayout.setAlignment(Qt.AlignLeft)
self.snap_style_button_list = []
self.mask_snap_style_button_list = []
for i in range(15):
snap_style_button = QtWidgets.QPushButton()
snap_style_button.setFixedSize(100, 100)
snap_style_button.setStyleSheet("background-color: transparent")
snap_style_button.setIcon(QIcon())
snap_style_button.setIconSize(QSize(100, 100))
snap_style_button.clicked.connect(partial(self.open, i))
# snap_style_button.snap_shot_name = None
self.snap_style_button_list.append(snap_style_button)
# style_button.hide()
self.snap_gridlLayout.addWidget(snap_style_button, 1, i)
mask_snap_style_button = QtWidgets.QPushButton()
mask_snap_style_button.setFixedSize(100, 100)
mask_snap_style_button.setStyleSheet("background-color: transparent")
mask_snap_style_button.setIcon(QIcon())
mask_snap_style_button.setIconSize(QSize(100, 100))
self.mask_snap_style_button_list.append(mask_snap_style_button)
# mask_snap_style_button.hide()
self.snap_gridlLayout.addWidget(mask_snap_style_button, 0, i)
self.snap_scrollArea.setWidget(self.snap_scrollAreaWidgetContents)
def add_update_img_button(self, Form):
self.updateButton = QtWidgets.QPushButton(Form)
self.updateButton.setGeometry(QtCore.QRect(900, 60, 60, 60))
self.updateButton.setText(_translate("Form", "Render"))
self.updateButton.setStyleSheet("background-color: %s;" % number_color[18]+ " color: white")
self.updateButton.setObjectName("updateImg")
self.updateButton.clicked.connect(Form.run_deep_model)
self.updateStyleButton = QtWidgets.QPushButton(Form)
self.updateStyleButton.setGeometry(QtCore.QRect(980, 60, 90, 60))
self.updateStyleButton.setText(_translate("Form", "Change style"))
self.updateStyleButton.setStyleSheet("background-color: %s;" % number_color[17]+ " color: white")
self.updateStyleButton.setObjectName("change style")
self.updateStyleButton.clicked.connect(Form.change_style)
self.updateStyleButton = QtWidgets.QPushButton(Form)
self.updateStyleButton.setGeometry(QtCore.QRect(1300, 60, 90, 60))
self.updateStyleButton.setText(_translate("Form", "Back style"))
self.updateStyleButton.setStyleSheet("background-color: %s;" % number_color[17]+ " color: white")
self.updateStyleButton.setObjectName("back style")
self.updateStyleButton.clicked.connect(Form.back_style)
self.updateStyleButton = QtWidgets.QPushButton(Form)
self.updateStyleButton.setGeometry(QtCore.QRect(1100, 60, 90, 60))
self.updateStyleButton.setText(_translate("Form", "Free View"))
self.updateStyleButton.setStyleSheet("background-color: %s;" % number_color[16]+ " color: white")
self.updateStyleButton.setObjectName("free view")
self.updateStyleButton.clicked.connect(Form.freeview_render)
self.updateStyleButton = QtWidgets.QPushButton(Form)
self.updateStyleButton.setGeometry(QtCore.QRect(1200, 60, 90, 60))
self.updateStyleButton.setText(_translate("Form", "Reset View"))
self.updateStyleButton.setStyleSheet("background-color: %s;" % number_color[15]+ " color: white")
self.updateStyleButton.setObjectName("reset view")
self.updateStyleButton.clicked.connect(Form.reset_view)
def add_checkbox_widgets(self, Form):
self.checkBoxGroupBox = QtWidgets.QGroupBox("Replace Style of Components", Form)
self.checkBoxGroupBox.setGeometry(QtCore.QRect(920, 10, 800, 100))
layout = QtWidgets.QGridLayout()
self.checkBoxGroup = QtWidgets.QButtonGroup(Form)
self.checkBoxGroup.setExclusive(False)
for i, j in enumerate(number_object):
cb = QtWidgets.QCheckBox(number_object[j])
self.checkBoxGroup.addButton(cb, i)
layout.addWidget(cb, i//10, i%10)
cb = QtWidgets.QCheckBox('ALL')
self.checkBoxGroup.addButton(cb, )
layout.addWidget(cb, (i+1)//10, (i+1)%10)
self.checkBoxGroupBox.setLayout(layout)
for i in range(19):
self.checkBoxGroup.button(i).setChecked(True)
checkbox_status = [cb.isChecked() for cb in self.checkBoxGroup.buttons()]
checkbox_status = checkbox_status[:19]
self.checkbox_status = checkbox_status
self.checkBoxGroup.buttonToggled.connect(self.cb_event)
def add_top_buttons(self, Form):
self.pushButton = QtWidgets.QPushButton(Form)
self.pushButton.setGeometry(QtCore.QRect(Tb_x - 1 * Lb_row_shift - 45, Tb_y, Tb_width, Tb_height))
self.pushButton.setObjectName("pushButton")
self.pushButton.clicked.connect(Form.open)
self.pushButton_2 = QtWidgets.QPushButton(Form)
self.pushButton_2.setGeometry(QtCore.QRect(Tb_x - 1 * Lb_row_shift - 45 + 1 * Tb_row_shift + 1 * Tb_width, Tb_y, Tb_width, Tb_height))
self.pushButton_2.setObjectName("pushButton_2")
self.pushButton_2.clicked.connect(Form.startScreening)
self.pushButton_3 = QtWidgets.QPushButton(Form)
self.pushButton_3.setGeometry(QtCore.QRect(Tb_x - 1 * Lb_row_shift - 45+ 2 * Tb_row_shift + 2 * Tb_width, Tb_y, Tb_width, Tb_height))
self.pushButton_3.setObjectName("pushButton_3")
self.pushButton_3.clicked.connect(Form.saveScreening)
self.pushButton_4 = QtWidgets.QPushButton(Form)
self.pushButton_4.setGeometry(QtCore.QRect(Tb_x - 1 * Lb_row_shift - 45+ 3 * Tb_row_shift + 3 * Tb_width, Tb_y, Tb_width, Tb_height))
self.pushButton_4.setObjectName("pushButton_4")
self.saveImg = QtWidgets.QPushButton(Form)
self.saveImg.setGeometry(QtCore.QRect(Tb_x - 1 * Lb_row_shift - 45+ 4 * Tb_row_shift + 4 * Tb_width, Tb_y, Tb_width, Tb_height))
self.saveImg.setObjectName("saveImg")
self.saveImg.clicked.connect(Form.save_img)
self.pushButton_5 = QtWidgets.QPushButton(Form)
self.pushButton_5.setGeometry(QtCore.QRect(Tb_x - 1 * Lb_row_shift - 45 + 4 * Tb_row_shift + 5 * Tb_width, Tb_y, Tb_width, Tb_height))
self.pushButton_5.setObjectName("pushButton_5")
self.pushButton_5.clicked.connect(Form.open_random)
self.retranslateUi(Form)
def add_tool_buttons(self, Form):
self.newButton = QtWidgets.QPushButton(Form)
self.newButton.setGeometry(QtCore.QRect(int(Lb_x - 1 * Lb_row_shift - 60), 140, 60, 60))
self.newButton.setObjectName("openButton")
self.newButton.setIcon(QIcon('icons/add_new_document.png'))
self.newButton.setIconSize(QSize(60, 60))
self.newButton.clicked.connect(Form.init_screen)
self.openButton = QtWidgets.QPushButton(Form)
self.openButton.setGeometry(QtCore.QRect(int(Lb_x - 1 * Lb_row_shift - 60), 140 + 60*1 + 10*1, 60, 60))
self.openButton.setObjectName("openButton")
self.openButton.setIcon(QIcon('icons/open.png'))
self.openButton.setIconSize(QSize(60, 60))
self.openButton.clicked.connect(Form.open_reference)
self.fillButton = QtWidgets.QPushButton(Form)
self.fillButton.setGeometry(QtCore.QRect(int(Lb_x - 1*Lb_row_shift - 60), 140 + 60*2 + 10*2, 60, 60))
self.fillButton.setObjectName("fillButton")
self.fillButton.setIcon(QIcon('icons/paint_can.png'))
self.fillButton.setIconSize(QSize(60, 60))
self.fillButton.clicked.connect(partial(Form.mode_select, 2))
self.brushButton = QtWidgets.QPushButton(Form)
self.brushButton.setGeometry(QtCore.QRect(int(Lb_x - 1*Lb_row_shift - 60), 140 + 60*3 + 10*3, 60, 60))
self.brushButton.setObjectName("brushButton")
self.brushButton.setIcon(QIcon('icons/paint_brush.png'))
self.brushButton.setIconSize(QSize(60, 60))
self.brushButton.setStyleSheet("background-color: #85adad")
#self.brushButton.setStyleSheet("background-color:")
self.brushButton.clicked.connect(partial(Form.mode_select, 0))
self.recButton = QtWidgets.QPushButton(Form)
self.recButton.setGeometry(QtCore.QRect(int(Lb_x - 1 * Lb_row_shift - 60), 140 + 60 * 4 + 10 * 4, 60, 60))
self.recButton.setObjectName("undolButton")
self.recButton.setIcon(QIcon('icons/brush_square.png'))
self.recButton.setIconSize(QSize(60, 60))
self.recButton.clicked.connect(partial(Form.mode_select, 1))
self.undoButton = QtWidgets.QPushButton(Form)
self.undoButton.setGeometry(QtCore.QRect(int(Lb_x - 1*Lb_row_shift - 60), 140 + 60*5 + 10*5, 60, 60))
self.undoButton.setObjectName("undolButton")
self.undoButton.setIcon(QIcon('icons/undo.png'))
self.undoButton.setIconSize(QSize(60, 60))
self.undoButton.clicked.connect(Form.undo)
self.saveButton = QtWidgets.QPushButton(Form)
self.saveButton.setGeometry(QtCore.QRect(int(Lb_x - 1 * Lb_row_shift - 60), 140 + 60 * 6 + 10 * 6, 60, 60))
self.saveButton.setObjectName("clean forground")
self.saveButton.setIcon(QIcon('icons/add_new_document.png'))
self.saveButton.setIconSize(QSize(60, 60))
self.saveButton.clicked.connect(Form.cleanForground)
def add_style_imgs_buttons(self, Form):
self.scrollArea = QtWidgets.QScrollArea(Form)
self.scrollArea.setGeometry(QtCore.QRect(1756, 140, 140, 512))
self.scrollArea.setWidgetResizable(True)
self.scrollArea.setObjectName("scrollArea")
self.scrollArea.setAlignment(Qt.AlignCenter)
# self.scrollArea.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.scrollArea.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.scrollAreaWidgetContents = QtWidgets.QWidget()
self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 140, 512))
self.scrollAreaWidgetContents.setObjectName("scrollAreaWidgetContents")
verticalLayout = QtWidgets.QVBoxLayout(self.scrollAreaWidgetContents)
verticalLayout.setContentsMargins(11, 11, 11, 11)
verticalLayout.setSpacing(6)
img_path_list = glob.glob('imgs/style_imgs_test/*.jpg')
img_path_list.sort()
style_button = QtWidgets.QPushButton(self.scrollAreaWidgetContents)
style_button.setFixedSize(100, 100)
style_button.setIcon(QIcon('icons/random.png'))
style_button.setIconSize(QSize(100, 100))
style_button.clicked.connect(Form.open)
verticalLayout.addWidget(style_button)
for img_path in img_path_list:
style_button = QtWidgets.QPushButton(self.scrollAreaWidgetContents)
style_button.setFixedSize(100, 100)
style_button.setIcon(QIcon(img_path))
style_button.setIconSize(QSize(100, 100))
style_button.clicked.connect(partial(Form.open, img_path))
verticalLayout.addWidget(style_button)
verticalLayout.addWidget(style_button)
self.scrollArea.setWidget(self.scrollAreaWidgetContents)
def add_label_buttons(self, Form):
top_x, top_y = 642, 140
row_shift = 10
self.color_Button = QtWidgets.QPushButton(Form)
self.color_Button.setGeometry(QtCore.QRect(int(Lb_x - 1*Lb_row_shift - 60), Lb_y-50, 60, 60))
self.color_Button.setObjectName("labelButton_0")
self.color_Button.setText(_translate("Form", "%s" % number_object[1]))
self.color_Button.setStyleSheet("background-color: %s;" % number_color[1] + " color: black")
self.labelButton_0 = QtWidgets.QPushButton(Form)
self.labelButton_0.setGeometry(QtCore.QRect(top_x, top_y, Lb_width, Lb_height))
self.labelButton_0.setObjectName("labelButton_0")
self.labelButton_0.setText(_translate("Form", "background"))
self.labelButton_0.setStyleSheet("background-color: %s;" % number_color[0]+ " color: white")
self.labelButton_0.clicked.connect(partial(Form.switch_labels, 0))
self.labelButton_1 = QtWidgets.QPushButton(Form)
self.labelButton_1.setGeometry(QtCore.QRect(top_x, top_y + 1*Lb_height + 1*row_shift, Lb_width, Lb_height))
self.labelButton_1.setObjectName("labelButton_1")
self.labelButton_1.setText(_translate("Form", "%s"%number_object[1]))
self.labelButton_1.setStyleSheet("background-color: %s;" % number_color[1] + " color: black")
self.labelButton_1.clicked.connect(partial(Form.switch_labels, 1))
# eye
self.labelButton_3 = QtWidgets.QPushButton(Form)
self.labelButton_3.setGeometry(QtCore.QRect(top_x, top_y + 2*Lb_height + 2*row_shift, int(0.48*Lb_width), Lb_height))
self.labelButton_3.setObjectName("labelButton_3")
self.labelButton_3.setText(_translate("Form", "%s"%number_object[4]))
self.labelButton_3.setStyleSheet("background-color: %s;" % number_color[4] + " color: black")
self.labelButton_3.clicked.connect(partial(Form.switch_labels, 4))
self.labelButton_17 = QtWidgets.QPushButton(Form)
self.labelButton_17.setGeometry(QtCore.QRect(top_x + int(0.54*Lb_width), top_y + 2*Lb_height + 2*row_shift, int(0.48*Lb_width), Lb_height))
self.labelButton_17.setObjectName("labelButton_17")
self.labelButton_17.setText(_translate("Form", "%s"%number_object[5]))
self.labelButton_17.setStyleSheet("background-color: %s;" % number_color[5] + " color: black")
self.labelButton_17.clicked.connect(partial(Form.switch_labels, 5))
# eyebrow
self.labelButton_2 = QtWidgets.QPushButton(Form)
self.labelButton_2.setGeometry(QtCore.QRect(top_x, top_y + 3*Lb_height + 3*row_shift, int(0.48*Lb_width), Lb_height))
self.labelButton_2.setObjectName("labelButton_2")
self.labelButton_2.setText(_translate("Form", "%s"%number_object[2]))
self.labelButton_2.setStyleSheet("background-color: %s;" % number_color[2] + " color: black")
self.labelButton_2.clicked.connect(partial(Form.switch_labels, 2))
self.labelButton_18 = QtWidgets.QPushButton(Form)
self.labelButton_18.setGeometry(QtCore.QRect(top_x + int(0.54*Lb_width), top_y + 3*Lb_height + 3*row_shift, int(0.48*Lb_width), Lb_height))
self.labelButton_18.setObjectName("labelButton_18")
self.labelButton_18.setText(_translate("Form", "%s"%number_object[3]))
self.labelButton_18.setStyleSheet("background-color: %s;" % number_color[3] + " color: black")
self.labelButton_18.clicked.connect(partial(Form.switch_labels, 3))
# nose
self.labelButton_4 = QtWidgets.QPushButton(Form)
self.labelButton_4.setGeometry(QtCore.QRect(top_x, top_y + 4*Lb_height + 4*row_shift, int(0.48*Lb_width), Lb_height))
self.labelButton_4.setObjectName("labelButton_4")
self.labelButton_4.setText(_translate("Form", "%s"%number_object[7]))
self.labelButton_4.setStyleSheet("background-color: %s;" % number_color[7] + " color: black")
self.labelButton_4.clicked.connect(partial(Form.switch_labels, 7))
self.labelButton_5 = QtWidgets.QPushButton(Form)
self.labelButton_5.setGeometry(QtCore.QRect(top_x+ int(0.54*Lb_width), top_y + 4*Lb_height + 4*row_shift, int(0.48*Lb_width), Lb_height))
self.labelButton_5.setObjectName("labelButton_5")
self.labelButton_5.setText(_translate("Form", "%s"%number_object[6]))
self.labelButton_5.setStyleSheet("background-color: %s;" % number_color[6] + " color: black")
self.labelButton_5.clicked.connect(partial(Form.switch_labels, 6))
# mouse
self.labelButton_7 = QtWidgets.QPushButton(Form)
self.labelButton_7.setGeometry(QtCore.QRect(top_x, top_y + 5.5*Lb_height + 5.5*row_shift, Lb_width, int(Lb_height*0.5)))
self.labelButton_7.setObjectName("labelButton_7")
self.labelButton_7.setText(_translate("Form", "%s"%number_object[9]))
self.labelButton_7.setStyleSheet("background-color: %s;" % number_color[9] + " color: black")
self.labelButton_7.clicked.connect(partial(Form.switch_labels, 9))
self.labelButton_6 = QtWidgets.QPushButton(Form)
self.labelButton_6.setGeometry(QtCore.QRect(top_x, int(top_y + 6.0*Lb_height + 6.0*row_shift), Lb_width, int(Lb_height*0.8)))
self.labelButton_6.setObjectName("labelButton_6")
self.labelButton_6.setText(_translate("Form", "%s"%number_object[8]))
self.labelButton_6.setStyleSheet("background-color: %s;" % number_color[8] + " color: black")
self.labelButton_6.clicked.connect(partial(Form.switch_labels, 8))
self.labelButton_8 = QtWidgets.QPushButton(Form)
self.labelButton_8.setGeometry(QtCore.QRect(top_x, int(top_y + 6.8*Lb_height + 6.5*row_shift), Lb_width, int(Lb_height*0.5)))
self.labelButton_8.setObjectName("labelButton_8")
self.labelButton_8.setText(_translate("Form", "%s"%number_object[10]))
self.labelButton_8.setStyleSheet("background-color: %s;" % number_color[10] + " color: black")
self.labelButton_8.clicked.connect(partial(Form.switch_labels, 10))
# ear
self.labelButton_9 = QtWidgets.QPushButton(Form)
self.labelButton_9.setGeometry(QtCore.QRect(top_x, top_y + 8*Lb_height + 8*row_shift, int(0.48*Lb_width), Lb_height))
self.labelButton_9.setObjectName("labelButton_9")
self.labelButton_9.setText(_translate("Form", "%s"%number_object[11]))
self.labelButton_9.setStyleSheet("background-color: %s;" % number_color[11] + " color: black")
self.labelButton_9.clicked.connect(partial(Form.switch_labels, 11))
self.labelButton_19 = QtWidgets.QPushButton(Form)
self.labelButton_19.setGeometry(QtCore.QRect(top_x+int(0.54*Lb_width), top_y + 8*Lb_height + 8*row_shift, int(0.48*Lb_width), Lb_height))
self.labelButton_19.setObjectName("labelButton_19")
self.labelButton_19.setText(_translate("Form", "%s"%number_object[12]))
self.labelButton_19.setStyleSheet("background-color: %s;" % number_color[12] + " color: black")
self.labelButton_19.clicked.connect(partial(Form.switch_labels, 12))
self.labelButton_10 = QtWidgets.QPushButton(Form)
self.labelButton_10.setGeometry(QtCore.QRect(top_x, top_y + 9*Lb_height + 9*row_shift, Lb_width, Lb_height))
self.labelButton_10.setObjectName("labelButton_10")
self.labelButton_10.setText(_translate("Form", "%s"%number_object[13]))
self.labelButton_10.setStyleSheet("background-color: %s;" % number_color[13] + " color: black")
self.labelButton_10.clicked.connect(partial(Form.switch_labels, 13))
########################################
row_shift, col_shift = 20, 8.1
self.labelButton_11 = QtWidgets.QPushButton(Form)
self.labelButton_11.setGeometry(QtCore.QRect(top_x, Lb_y - row_shift, Lb_width, Lb_height))
self.labelButton_11.setObjectName("labelButton_11")
self.labelButton_11.setText(_translate("Form", "%s"%number_object[14]))
self.labelButton_11.setStyleSheet("background-color: %s;" % number_color[14] + " color: black")
self.labelButton_11.clicked.connect(partial(Form.switch_labels, 14))
self.labelButton_12 = QtWidgets.QPushButton(Form)
self.labelButton_12.setGeometry(QtCore.QRect(Lb_x,Lb_y - row_shift , Lb_width, Lb_height))
self.labelButton_12.setObjectName("labelButton_12")
self.labelButton_12.setText(_translate("Form", "%s"%number_object[15]))
self.labelButton_12.setStyleSheet("background-color: %s;" % number_color[15] + " color: black")
self.labelButton_12.clicked.connect(partial(Form.switch_labels, 15))
self.labelButton_13 = QtWidgets.QPushButton(Form)
self.labelButton_13.setGeometry(QtCore.QRect(Lb_x + 1*col_shift + 1*Lb_width,
Lb_y - row_shift, Lb_width, Lb_height))
self.labelButton_13.setObjectName("labelButton_13")
self.labelButton_13.setText(_translate("Form", "%s"%number_object[16]))
self.labelButton_13.setStyleSheet("background-color: %s;" % number_color[16] + " color: black")
self.labelButton_13.clicked.connect(partial(Form.switch_labels, 16))
self.labelButton_14 = QtWidgets.QPushButton(Form)
self.labelButton_14.setGeometry(QtCore.QRect(Lb_x + 2*col_shift + 2*Lb_width,
Lb_y - row_shift, Lb_width, Lb_height))
self.labelButton_14.setObjectName("labelButton_14")
self.labelButton_14.setText(_translate("Form", "%s"%number_object[17]))
self.labelButton_14.setStyleSheet("background-color: %s;" % number_color[17] + " color: black")
self.labelButton_14.clicked.connect(partial(Form.switch_labels, 17))
self.labelButton_15 = QtWidgets.QPushButton(Form)
self.labelButton_15.setGeometry(QtCore.QRect(Lb_x + 3*col_shift + 3*Lb_width,
Lb_y - row_shift, Lb_width, Lb_height))
self.labelButton_15.setObjectName("labelButton_15")
self.labelButton_15.setText(_translate("Form", "%s"%number_object[18]))
self.labelButton_15.setStyleSheet("background-color: %s;" % number_color[18] + " color: black")
self.labelButton_15.clicked.connect(partial(Form.switch_labels, 18))
self.labelButton_16 = QtWidgets.QPushButton(Form)
self.labelButton_16.setGeometry(QtCore.QRect(Lb_x + 4*col_shift + 4*Lb_width,
Lb_y - row_shift, Lb_width, Lb_height))
self.labelButton_16.setObjectName("labelButton_16")
self.labelButton_16.setText(_translate("Form", "%s"%number_object[19]))
self.labelButton_16.setStyleSheet("background-color: %s;" % number_color[19] + " color: black")
self.labelButton_16.clicked.connect(partial(Form.switch_labels, 19))
def add_label_buttons_old(self, Form):
self.color_Button = QtWidgets.QPushButton(Form)
self.color_Button.setGeometry(QtCore.QRect(int(Lb_x - 1*Lb_row_shift - 60), Lb_y, 60, 60))
self.color_Button.setObjectName("labelButton_0")
self.color_Button.setStyleSheet("background-color: %s;" % number_color[1])
self.labelButton_0 = QtWidgets.QPushButton(Form)
self.labelButton_0.setGeometry(QtCore.QRect(Lb_x, Lb_y, Lb_width, Lb_height))
self.labelButton_0.setObjectName("labelButton_0")
self.labelButton_0.setText(_translate("Form", "background"))
self.labelButton_0.setStyleSheet("background-color: %s;" % number_color[0]+ " color: black")
self.labelButton_0.clicked.connect(partial(Form.switch_labels, 0))
self.labelButton_1 = QtWidgets.QPushButton(Form)
self.labelButton_1.setGeometry(QtCore.QRect(Lb_x + 1*Lb_row_shift + 1*Lb_width, Lb_y, Lb_width, Lb_height))
self.labelButton_1.setObjectName("labelButton_1")
self.labelButton_1.setText(_translate("Form", "skin"))
self.labelButton_1.setStyleSheet("background-color: %s;" % number_color[1] + " color: black")
self.labelButton_1.clicked.connect(partial(Form.switch_labels, 1))
self.labelButton_2 = QtWidgets.QPushButton(Form)
self.labelButton_2.setGeometry(QtCore.QRect(Lb_x + 2*Lb_row_shift + 2*Lb_width, Lb_y, Lb_width, Lb_height))
self.labelButton_2.setObjectName("labelButton_2")
self.labelButton_2.setText(_translate("Form", "nose"))
self.labelButton_2.setStyleSheet("background-color: %s;" % number_color[2] + " color: black")
self.labelButton_2.clicked.connect(partial(Form.switch_labels, 2))
self.labelButton_3 = QtWidgets.QPushButton(Form)
self.labelButton_3.setGeometry(QtCore.QRect(Lb_x + 3*Lb_row_shift + 3*Lb_width, Lb_y, Lb_width, Lb_height))
self.labelButton_3.setObjectName("labelButton_3")
self.labelButton_3.setText(_translate("Form", "eye_g"))
self.labelButton_3.setStyleSheet("background-color: %s;" % number_color[3] + " color: black")
self.labelButton_3.clicked.connect(partial(Form.switch_labels, 3))
self.labelButton_4 = QtWidgets.QPushButton(Form)
self.labelButton_4.setGeometry(QtCore.QRect(Lb_x + 4*Lb_row_shift + 4*Lb_width, Lb_y, Lb_width, Lb_height))
self.labelButton_4.setObjectName("labelButton_4")
self.labelButton_4.setText(_translate("Form", "l_eye"))
self.labelButton_4.setStyleSheet("background-color: %s;" % number_color[4] + " color: black")
self.labelButton_4.clicked.connect(partial(Form.switch_labels, 4))
self.labelButton_5 = QtWidgets.QPushButton(Form)
self.labelButton_5.setGeometry(QtCore.QRect(Lb_x + 5*Lb_row_shift + 5*Lb_width, Lb_y, Lb_width, Lb_height))
self.labelButton_5.setObjectName("labelButton_5")
self.labelButton_5.setText(_translate("Form", "r_eye"))
self.labelButton_5.setStyleSheet("background-color: %s;" % number_color[5] + " color: black")
self.labelButton_5.clicked.connect(partial(Form.switch_labels, 5))
self.labelButton_6 = QtWidgets.QPushButton(Form)
self.labelButton_6.setGeometry(QtCore.QRect(Lb_x + 6*Lb_row_shift + 6*Lb_width, Lb_y, Lb_width, Lb_height))
self.labelButton_6.setObjectName("labelButton_6")
self.labelButton_6.setText(_translate("Form", "l_brow"))
self.labelButton_6.setStyleSheet("background-color: %s;" % number_color[6] + " color: black")
self.labelButton_6.clicked.connect(partial(Form.switch_labels, 6))
self.labelButton_7 = QtWidgets.QPushButton(Form)
self.labelButton_7.setGeometry(QtCore.QRect(Lb_x + 7*Lb_row_shift + 7*Lb_width, Lb_y, Lb_width, Lb_height))
self.labelButton_7.setObjectName("labelButton_7")
self.labelButton_7.setText(_translate("Form", "r_brow"))
self.labelButton_7.setStyleSheet("background-color: %s;" % number_color[7] + " color: black")
self.labelButton_7.clicked.connect(partial(Form.switch_labels, 7))
self.labelButton_8 = QtWidgets.QPushButton(Form)
self.labelButton_8.setGeometry(QtCore.QRect(Lb_x + 8*Lb_row_shift + 8*Lb_width, Lb_y, Lb_width, Lb_height))
self.labelButton_8.setObjectName("labelButton_8")
self.labelButton_8.setText(_translate("Form", "l_ear"))
self.labelButton_8.setStyleSheet("background-color: %s;" % number_color[8] + " color: black")
self.labelButton_8.clicked.connect(partial(Form.switch_labels, 8))
self.labelButton_9 = QtWidgets.QPushButton(Form)
self.labelButton_9.setGeometry(QtCore.QRect(Lb_x + 9 * Lb_row_shift + 9 * Lb_width, Lb_y, Lb_width, Lb_height))
self.labelButton_9.setObjectName("labelButton_9")
self.labelButton_9.setText(_translate("Form", "r_ear"))
self.labelButton_9.setStyleSheet("background-color: %s;" % number_color[9] + " color: black")
self.labelButton_9.clicked.connect(partial(Form.switch_labels, 9))
# Second Row
self.labelButton_10 = QtWidgets.QPushButton(Form)
self.labelButton_10.setGeometry(QtCore.QRect(Lb_x,
Lb_y + Lb_height + Lb_col_shift, Lb_width, Lb_height))
self.labelButton_10.setObjectName("labelButton_10")
self.labelButton_10.setText(_translate("Form", "mouth"))
self.labelButton_10.setStyleSheet("background-color: %s;" % number_color[10] + " color: black")
self.labelButton_10.clicked.connect(partial(Form.switch_labels, 10))
self.labelButton_11 = QtWidgets.QPushButton(Form)
self.labelButton_11.setGeometry(QtCore.QRect(Lb_x + 1*Lb_row_shift + 1*Lb_width,
Lb_y + Lb_height + Lb_col_shift, Lb_width, Lb_height))
self.labelButton_11.setObjectName("labelButton_11")
self.labelButton_11.setText(_translate("Form", "u_lip"))
self.labelButton_11.setStyleSheet("background-color: %s;" % number_color[11] + " color: black")
self.labelButton_11.clicked.connect(partial(Form.switch_labels, 11))
self.labelButton_12 = QtWidgets.QPushButton(Form)
self.labelButton_12.setGeometry(QtCore.QRect(Lb_x + 2*Lb_row_shift + 2*Lb_width,
Lb_y + Lb_height + Lb_col_shift, Lb_width, Lb_height))
self.labelButton_12.setObjectName("labelButton_12")
self.labelButton_12.setText(_translate("Form", "l_lip"))
self.labelButton_12.setStyleSheet("background-color: %s;" % number_color[12] + " color: black")
self.labelButton_12.clicked.connect(partial(Form.switch_labels, 12))
self.labelButton_13 = QtWidgets.QPushButton(Form)
self.labelButton_13.setGeometry(QtCore.QRect(Lb_x + 3*Lb_row_shift + 3*Lb_width,
Lb_y + Lb_height + Lb_col_shift, Lb_width, Lb_height))
self.labelButton_13.setObjectName("labelButton_13")
self.labelButton_13.setText(_translate("Form", "hair"))
self.labelButton_13.setStyleSheet("background-color: %s;" % number_color[13] + " color: black")
self.labelButton_13.clicked.connect(partial(Form.switch_labels, 13))
self.labelButton_14 = QtWidgets.QPushButton(Form)
self.labelButton_14.setGeometry(QtCore.QRect(Lb_x + 4*Lb_row_shift + 4*Lb_width,
Lb_y + Lb_height + Lb_col_shift, Lb_width, Lb_height))
self.labelButton_14.setObjectName("labelButton_14")
self.labelButton_14.setText(_translate("Form", "hat"))
self.labelButton_14.setStyleSheet("background-color: %s;" % number_color[14] + " color: black")
self.labelButton_14.clicked.connect(partial(Form.switch_labels, 14))
self.labelButton_15 = QtWidgets.QPushButton(Form)
self.labelButton_15.setGeometry(QtCore.QRect(Lb_x + 5*Lb_row_shift + 5*Lb_width,
Lb_y + Lb_height + Lb_col_shift, Lb_width, Lb_height))
self.labelButton_15.setObjectName("labelButton_15")
self.labelButton_15.setText(_translate("Form", "ear_r"))
self.labelButton_15.setStyleSheet("background-color: %s;" % number_color[15] + " color: black")
self.labelButton_15.clicked.connect(partial(Form.switch_labels, 15))
self.labelButton_16 = QtWidgets.QPushButton(Form)
self.labelButton_16.setGeometry(QtCore.QRect(Lb_x + 6*Lb_row_shift + 6*Lb_width,
Lb_y + Lb_height + Lb_col_shift, Lb_width, Lb_height))
self.labelButton_16.setObjectName("labelButton_16")
self.labelButton_16.setText(_translate("Form", "neck_l"))
self.labelButton_16.setStyleSheet("background-color: %s;" % number_color[16] + " color: black")
self.labelButton_16.clicked.connect(partial(Form.switch_labels, 16))
self.labelButton_17 = QtWidgets.QPushButton(Form)
self.labelButton_17.setGeometry(QtCore.QRect(Lb_x + 7*Lb_row_shift + 7*Lb_width,
Lb_y + Lb_height + Lb_col_shift, Lb_width, Lb_height))
self.labelButton_17.setObjectName("labelButton_17")
self.labelButton_17.setText(_translate("Form", "neck"))
self.labelButton_17.setStyleSheet("background-color: %s;" % number_color[17] + " color: black")
self.labelButton_17.clicked.connect(partial(Form.switch_labels, 17))
self.labelButton_18 = QtWidgets.QPushButton(Form)
self.labelButton_18.setGeometry(QtCore.QRect(Lb_x + 8 * Lb_row_shift + 8 * Lb_width,
Lb_y + Lb_height + Lb_col_shift, Lb_width, Lb_height))
self.labelButton_18.setObjectName("labelButton_18")
self.labelButton_18.setText(_translate("Form", "cloth"))
self.labelButton_18.setStyleSheet("background-color: %s;" % number_color[18] + " color: black")
self.labelButton_18.clicked.connect(partial(Form.switch_labels, 18))
def add_label_buttons_eg3d(self, Form):
top_x, top_y = 642, 140
row_shift = 10
self.color_Button = QtWidgets.QPushButton(Form)
self.color_Button.setGeometry(QtCore.QRect(int(Lb_x - 1*Lb_row_shift - 60), Lb_y-50, 60, 60))
self.color_Button.setObjectName("labelButton_0")
self.color_Button.setText(_translate("Form", "%s" % number_object[1]))
self.color_Button.setStyleSheet("background-color: %s;" % number_color[1] + " color: black")
self.labelButton_0 = QtWidgets.QPushButton(Form)
self.labelButton_0.setGeometry(QtCore.QRect(top_x, top_y, Lb_width, Lb_height))
self.labelButton_0.setObjectName("labelButton_0")
self.labelButton_0.setText(_translate("Form", "background"))
self.labelButton_0.setStyleSheet("background-color: %s;" % number_color[0]+ " color: white")
self.labelButton_0.clicked.connect(partial(Form.switch_labels, 0))
self.labelButton_1 = QtWidgets.QPushButton(Form)
self.labelButton_1.setGeometry(QtCore.QRect(top_x, top_y + 1*Lb_height + 1*row_shift, Lb_width, Lb_height))
self.labelButton_1.setObjectName("labelButton_1")
self.labelButton_1.setText(_translate("Form", "skin"))
self.labelButton_1.setStyleSheet("background-color: %s;" % number_color[1] + " color: black")
self.labelButton_1.clicked.connect(partial(Form.switch_labels, 1))
self.labelButton_2 = QtWidgets.QPushButton(Form)
self.labelButton_2.setGeometry(QtCore.QRect(top_x, top_y + 2*Lb_height + 2*row_shift, Lb_width, Lb_height))
self.labelButton_2.setObjectName("labelButton_2")
self.labelButton_2.setText(_translate("Form", "nose"))
self.labelButton_2.setStyleSheet("background-color: %s;" % number_color[2] + " color: black")
self.labelButton_2.clicked.connect(partial(Form.switch_labels, 2))
self.labelButton_4 = QtWidgets.QPushButton(Form)
self.labelButton_4.setGeometry(QtCore.QRect(top_x, top_y + 3*Lb_height + 3*row_shift, int(0.48*Lb_width), Lb_height))
self.labelButton_4.setObjectName("labelButton_4")
self.labelButton_4.setText(_translate("Form", "l_eye"))
self.labelButton_4.setStyleSheet("background-color: %s;" % number_color[4] + " color: black")
self.labelButton_4.clicked.connect(partial(Form.switch_labels, 4))
self.labelButton_5 = QtWidgets.QPushButton(Form)
self.labelButton_5.setGeometry(QtCore.QRect(top_x + int(0.54*Lb_width), top_y + 3*Lb_height + 3*row_shift, int(0.48*Lb_width), Lb_height))
self.labelButton_5.setObjectName("labelButton_5")
self.labelButton_5.setText(_translate("Form", "r_eye"))
self.labelButton_5.setStyleSheet("background-color: %s;" % number_color[5] + " color: black")
self.labelButton_5.clicked.connect(partial(Form.switch_labels, 5))
self.labelButton_6 = QtWidgets.QPushButton(Form)
self.labelButton_6.setGeometry(QtCore.QRect(top_x, top_y + 4*Lb_height + 4*row_shift, int(0.48*Lb_width), Lb_height))
self.labelButton_6.setObjectName("labelButton_6")
self.labelButton_6.setText(_translate("Form", "l_brow"))
self.labelButton_6.setStyleSheet("background-color: %s;" % number_color[6] + " color: black")
self.labelButton_6.clicked.connect(partial(Form.switch_labels, 6))
self.labelButton_7 = QtWidgets.QPushButton(Form)
self.labelButton_7.setGeometry(QtCore.QRect(top_x + int(0.54*Lb_width), top_y + 4*Lb_height + 4*row_shift, int(0.48*Lb_width), Lb_height))
self.labelButton_7.setObjectName("labelButton_7")
self.labelButton_7.setText(_translate("Form", "r_brow"))
self.labelButton_7.setStyleSheet("background-color: %s;" % number_color[7] + " color: black")
self.labelButton_7.clicked.connect(partial(Form.switch_labels, 7))
self.labelButton_3 = QtWidgets.QPushButton(Form)
self.labelButton_3.setGeometry(QtCore.QRect(top_x, top_y + 5*Lb_height + 5*row_shift, Lb_width, Lb_height))
self.labelButton_3.setObjectName("labelButton_3")
self.labelButton_3.setText(_translate("Form", "eye_g"))
self.labelButton_3.setStyleSheet("background-color: %s;" % number_color[3] + " color: black")
self.labelButton_3.clicked.connect(partial(Form.switch_labels, 3))
self.labelButton_8 = QtWidgets.QPushButton(Form)
self.labelButton_8.setGeometry(QtCore.QRect(top_x, top_y + 6*Lb_height + 6*row_shift, int(0.48*Lb_width), Lb_height))
self.labelButton_8.setObjectName("labelButton_8")
self.labelButton_8.setText(_translate("Form", "l_ear"))
self.labelButton_8.setStyleSheet("background-color: %s;" % number_color[8] + " color: black")
self.labelButton_8.clicked.connect(partial(Form.switch_labels, 8))
self.labelButton_9 = QtWidgets.QPushButton(Form)
self.labelButton_9.setGeometry(QtCore.QRect(top_x + int(0.54*Lb_width), top_y + 6*Lb_height + 6*row_shift, int(0.48*Lb_width), Lb_height))
self.labelButton_9.setObjectName("labelButton_9")
self.labelButton_9.setText(_translate("Form", "r_ear"))
self.labelButton_9.setStyleSheet("background-color: %s;" % number_color[9] + " color: black")
self.labelButton_9.clicked.connect(partial(Form.switch_labels, 9))
self.labelButton_10 = QtWidgets.QPushButton(Form)
self.labelButton_10.setGeometry(QtCore.QRect(top_x, top_y + 7*Lb_height + 7*row_shift, Lb_width, Lb_height))
self.labelButton_10.setObjectName("labelButton_10")
self.labelButton_10.setText(_translate("Form", "mouth"))
self.labelButton_10.setStyleSheet("background-color: %s;" % number_color[10] + " color: black")
self.labelButton_10.clicked.connect(partial(Form.switch_labels, 10))
self.labelButton_11 = QtWidgets.QPushButton(Form)
self.labelButton_11.setGeometry(QtCore.QRect(top_x, top_y + 8*Lb_height + 8*row_shift, Lb_width, Lb_height))
self.labelButton_11.setObjectName("labelButton_11")
self.labelButton_11.setText(_translate("Form", "u_lip"))
self.labelButton_11.setStyleSheet("background-color: %s;" % number_color[11] + " color: black")
self.labelButton_11.clicked.connect(partial(Form.switch_labels, 11))
self.labelButton_12 = QtWidgets.QPushButton(Form)
self.labelButton_12.setGeometry(QtCore.QRect(top_x, top_y + 9*Lb_height + 9*row_shift, Lb_width, Lb_height))
self.labelButton_12.setObjectName("labelButton_12")
self.labelButton_12.setText(_translate("Form", "l_lip"))
self.labelButton_12.setStyleSheet("background-color: %s;" % number_color[12] + " color: black")
self.labelButton_12.clicked.connect(partial(Form.switch_labels, 12))
########################################
row_shift, col_shift = 20, 8.1
self.labelButton_13 = QtWidgets.QPushButton(Form)
self.labelButton_13.setGeometry(QtCore.QRect(top_x, Lb_y - row_shift, Lb_width, Lb_height))
self.labelButton_13.setObjectName("labelButton_13")
self.labelButton_13.setText(_translate("Form", "hair"))
self.labelButton_13.setStyleSheet("background-color: %s;" % number_color[13] + " color: black")
self.labelButton_13.clicked.connect(partial(Form.switch_labels, 13))
self.labelButton_14 = QtWidgets.QPushButton(Form)
self.labelButton_14.setGeometry(QtCore.QRect(Lb_x, Lb_y - row_shift , Lb_width, Lb_height))
self.labelButton_14.setObjectName("labelButton_14")
self.labelButton_14.setText(_translate("Form", "hat"))
self.labelButton_14.setStyleSheet("background-color: %s;" % number_color[14] + " color: black")
self.labelButton_14.clicked.connect(partial(Form.switch_labels, 14))
self.labelButton_15 = QtWidgets.QPushButton(Form)
self.labelButton_15.setGeometry(QtCore.QRect(Lb_x + 1*col_shift + 1*Lb_width,
Lb_y - row_shift, Lb_width, Lb_height))
self.labelButton_15.setObjectName("labelButton_15")
self.labelButton_15.setText(_translate("Form", "ear_r"))
self.labelButton_15.setStyleSheet("background-color: %s;" % number_color[15] + " color: black")
self.labelButton_15.clicked.connect(partial(Form.switch_labels, 15))
self.labelButton_16 = QtWidgets.QPushButton(Form)
self.labelButton_16.setGeometry(QtCore.QRect(Lb_x + 2*col_shift + 2*Lb_width,
Lb_y - row_shift, Lb_width, Lb_height))
self.labelButton_16.setObjectName("labelButton_16")
self.labelButton_16.setText(_translate("Form", "neck_l"))
self.labelButton_16.setStyleSheet("background-color: %s;" % number_color[16] + " color: black")
self.labelButton_16.clicked.connect(partial(Form.switch_labels, 16))
self.labelButton_17 = QtWidgets.QPushButton(Form)
self.labelButton_17.setGeometry(QtCore.QRect(Lb_x + 3*col_shift + 3*Lb_width,
Lb_y - row_shift, Lb_width, Lb_height))
self.labelButton_17.setObjectName("labelButton_17")
self.labelButton_17.setText(_translate("Form", "neck"))
self.labelButton_17.setStyleSheet("background-color: %s;" % number_color[17] + " color: black")
self.labelButton_17.clicked.connect(partial(Form.switch_labels, 17))
self.labelButton_18 = QtWidgets.QPushButton(Form)
self.labelButton_18.setGeometry(QtCore.QRect(Lb_x + 4*col_shift + 4*Lb_width,
Lb_y - row_shift, Lb_width, Lb_height))
self.labelButton_18.setObjectName("labelButton_18")
self.labelButton_18.setText(_translate("Form", "cloth"))
self.labelButton_18.setStyleSheet("background-color: %s;" % number_color[18] + " color: black")
self.labelButton_18.clicked.connect(partial(Form.switch_labels, 18))
def cb_event(self, id, ifchecked):
if id.text() == 'ALL':
if ifchecked:
for cb in self.checkBoxGroup.buttons():
cb.setChecked(True)
else:
for cb in self.checkBoxGroup.buttons():
cb.setChecked(False)
self.change_cb_state()
def change_cb_state(self):
checkbox_status = [cb.isChecked() for cb in self.checkBoxGroup.buttons()]
checkbox_status = checkbox_status[:19]
#self.obj_dic_back = copy.deepcopy(self.obj_dic)
self.checkbox_status = checkbox_status
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Form = QtWidgets.QWidget()
ui = Ui_Form()
ui.setupUi(Form)
Form.show()
sys.exit(app.exec_())
|
1663628
|
from graphics import *
import random
class TspPainter:
def __init__(self):
self.win = GraphWin('TSP', 500, 500)
self.win.setCoords(0, 0, 80, 80)
self.win.width = 100
self.coord_mat = None
self.nodes = []
self.lockers = []
self.paths = []
def reset(self):
for pt in self.nodes:
self.win.delete(pt)
self.nodes = []
for locker in self.lockers:
self.win.delete(locker)
self.lockers = []
for path in self.paths:
self.win.delete(path)
self.paths = []
def drawMap(self):
self.reset()
coord_mat = self.coord_mat
for coord in coord_mat:
pt = Point(coord[0], coord[1])
cir = Circle(pt, 0.5)
cir.setFill("black")
cir.setOutline("black")
cir.draw(self.win)
self.nodes.append(cir)
def drawLockers(self, lockers):
for locker in lockers:
pt = Point(self.coord_mat[locker.pos][0], self.coord_mat[locker.pos][1])
cir = Circle(pt, 0.5)
cir.setFill("red")
cir.setOutline("red")
cir.draw(self.win)
self.lockers.append(cir)
def drawRoutes(self, routes):
for key in routes:
color = color_rgb(random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))
self.drawPath(routes[key], color)
def drawPath(self, path, color):
for i in range(0, len(path) ):
i1 = i % len(path)
i2 = (i + 1) % len(path)
pack1 = path[i1]
pack2 = path[i2]
pt1 = Point(self.coord_mat[pack1.pos][0], self.coord_mat[pack1.pos][1])
pt2 = Point(self.coord_mat[pack2.pos][0], self.coord_mat[pack2.pos][1])
line = Line(pt1, pt2)
line.setFill(color)
line.setOutline(color)
line.draw(self.win)
self.paths.append(line)
tspPainter = TspPainter()
|
1663723
|
import click
@click.group()
def haproxy(**kwargs):
"""
Manage haproxy loadbalancer operations
"""
|
1663730
|
from flask import Flask, render_template, request
from BOT import Bot
app = Flask(__name__)
@app.route('/', methods=['GET', 'POST'])
def home():
req = request.args
credentials_in = False
if 'username' in req.keys() and 'password' in req.keys() and 'dm' in req.keys() and 'message' in req.keys():
username = req['username']
password = req['password']
bot = Bot()
bot.driver.maximize_window()
bot.login(username, password)
bot.multiple_dm_followers(req['message'])
bot.logout()
if 'username' in req.keys() and 'password' in req.keys() and 'retrieve' in req.keys():
username = req['username']
password = req['password']
bot = Bot()
bot.driver.maximize_window()
bot.login(username, password)
bot.retrieve_messages_from_inbox(tolerance=2)
bot.logout()
if 'username' in req.keys() and 'password' in req.keys() and 'share' in req.keys():
username = req['username']
password = req['password']
bot = Bot()
bot.driver.maximize_window()
bot.login(username, password)
bot.share_latest_post()
bot.logout()
# print(req)
return render_template("index.html")
|
1663758
|
import sys
sys.path.insert(0, "..")
import logging
from IPython import embed
from opcua import Client
if __name__ == "__main__":
logging.basicConfig(level=logging.WARN)
client = Client("opc.tcp://localhost:53530/OPCUA/SimulationServer/")
client.load_client_certificate("server_cert.pem")
client.load_private_key("mykey.pem")
try:
client.connect()
root = client.get_root_node()
objects = client.get_objects_node()
print("childs og objects are: ", objects.get_children())
embed()
finally:
client.disconnect()
|
1663760
|
import logging
import torch
import torch.utils.data
logger = logging.getLogger(__name__)
def _get_pytorch_version():
version = torch.__version__
major, minor = [int(x) for x in version.split(".")[:2]]
if major != 1:
raise RuntimeError(
"nonechucks only supports PyTorch major version 1 at the moment."
)
if minor > 2:
logger.warn(
"nonechucks may not work properly with this version of PyTorch ({}). "
"It has only been tested on PyTorch versions 1.0, 1.1, and 1.2".format(
version
)
)
return major, minor
MAJOR, MINOR = _get_pytorch_version()
if MINOR > 1:
SingleProcessDataLoaderIter = (
torch.utils.data.dataloader._SingleProcessDataLoaderIter
)
MultiProcessingDataLoaderIter = (
torch.utils.data.dataloader._MultiProcessingDataLoaderIter
)
else:
SingleProcessDataLoaderIter = torch.utils.data.dataloader._DataLoaderIter
MultiProcessingDataLoaderIter = torch.utils.data.dataloader._DataLoaderIter
from nonechucks.dataset import SafeDataset
from nonechucks.sampler import SafeSampler
from nonechucks.dataloader import SafeDataLoader
|
1663778
|
from bxutils.logging.log_level import LogLevel
from bxcommon import constants
from bxcommon.messages.bloxroute.abstract_bloxroute_message import AbstractBloxrouteMessage
from bxgateway.messages.gateway.gateway_message_type import GatewayMessageType
class BlockPropagationRequestMessage(AbstractBloxrouteMessage):
"""
Request for other gateways to encrypt and propagate block message.
"""
MESSAGE_TYPE = GatewayMessageType.BLOCK_PROPAGATION_REQUEST
def __init__(self, blob=None, buf=None):
if buf is None:
payload_len = len(blob) + constants.CONTROL_FLAGS_LEN
buf = bytearray(self.HEADER_LENGTH + payload_len)
off = self.HEADER_LENGTH
buf[off:off + len(blob)] = blob
else:
payload_len = len(buf) - constants.BX_HDR_COMMON_OFF
self.buf = buf
super(BlockPropagationRequestMessage, self).__init__(self.MESSAGE_TYPE, payload_len, self.buf)
self._blob = None
def log_level(self):
return LogLevel.DEBUG
def blob(self):
if self._blob is None:
off = self.HEADER_LENGTH
self._blob = self.buf[off: off + self.payload_len() - constants.CONTROL_FLAGS_LEN]
return self._blob
|
1663783
|
from torch.utils.data.sampler import Sampler
## one by one
class CustomBatchSampler_Multi(Sampler):
def __init__(self, sampler):
for samp in sampler:
if not isinstance(samp, Sampler):
raise ValueError("sampler should be an instance of "
"torch.utils.data.Sampler, but got sampler={}"
.format(samp))
self.samplers = sampler
self.n_samples = [len(samp) for samp in self.samplers]
self.sample_cnt = [0 for samp in self.samplers]
self.iters = [iter(samp) for samp in self.samplers]
def __iter__(self):
# for each iteration step
for ii in range(len(self)):
# if index is the even number
if ii % 2 == 0:
sampler_id = 0
else:
sampler_id = 1
self.sample_cnt[sampler_id] += 1 # the nubmer of used sample. One sample per one iteration.
if self.sample_cnt[sampler_id] > self.n_samples[sampler_id]: ## if exceeding the number of samples, reinitialize the sampler
self.iters[sampler_id] = iter(self.samplers[sampler_id])
self.sample_cnt[sampler_id] = 1
batch = []
## starting index of the iterator
if sampler_id is 0:
prev_idx = 0
else:
prev_idx = self.n_samples[sampler_id-1]
## include a sample in the batch
batch.append(next(self.iters[sampler_id]) + prev_idx)
yield batch
def __len__(self):
return len(self.samplers[0]) * 2
|
1663804
|
import os
import sys
import utils
import random
import datetime
import argparse
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
from sklearn.metrics import roc_curve, roc_auc_score, precision_recall_curve
class Evaluation(object):
def __init__(self, logger, dataset):
self.logger = logger
self.dataset = dataset
self.batch_size = dataset.batch_size
def EvaluateModel(self, model, epoch):
val_names = self.dataset.val_names
input_size = self.dataset.input_size
x_batch, y_batch, _ = self.dataset.get_imgs(val_names, input_size)
loss, acc = model.evaluate(x_batch, y_batch, batch_size=self.batch_size, verbose=0)
self.logger.write_tensorboard(['valid_acc', 'valid_loss'], [acc, loss], epoch)
return loss, acc
def PredictFiles(self, model, filenames, batch_size, epoch):
self.test_names = filenames
test_images, test_labels, showlabels = self.dataset.get_imgs(filenames,
self.dataset.input_size)
# predict
pre_result = model.predict(test_images, batch_size=self.batch_size)
# decode result
result = []
for i in range(test_images.shape[0]):
result.append(decode(pre_result[i, ...]))
self.test_result = result
self.test_labels = showlabels
# acc
self.Measure_Acc(epoch)
def Measure_Acc(self, epoch):
acc = 0
for i in range(len(self.test_result)):
result = self.test_result[i]
label = self.test_labels[i]
print('GT: {}\tPre: {}'.format(label, result))
if (result == label):
acc += 1
print('*' * 30)
print('Test Accuracy : {}\n'.format(acc / len(self.test_result)))
"""
evaluation utils
"""
def decode(result):
result = np.reshape(result, (10, 4), order='F')
index = np.argmax(result, axis=0)
string = ''.join(str(ch) for ch in index)
return string
|
1663810
|
import asyncio
import logging
from os.path import abspath,realpath,expanduser,expandvars
from importlib.util import spec_from_file_location, module_from_spec
try:
import asyncpg
has_pgsql = True
except:
has_pgsql = False
clogger = logging.getLogger("dnstap_receiver.console")
from dnstap_receiver.outputs import transform
def checking_conf(cfg):
"""validate the config"""
clogger.debug("Output handler: pgsql")
valid_conf = True
if not has_pgsql:
valid_conf = False
clogger.error("Output handler: pgsql: asyncpg dependency is missing")
if cfg["dsn"] is None:
valid_conf = False
clogger.error("Output handler: no dsn provided")
return valid_conf
async def plaintext_pgclient(output_cfg, queue, start_shutdown):
dsn = output_cfg["dsn"]
clogger.debug("Output handler: connection to %s" % (dsn,))
passfile = output_cfg["passfile"]
min_size = output_cfg["min_size"]
max_size = output_cfg["max_size"]
busy_wait = float(output_cfg["busy_wait"])
userfuncfile = output_cfg["userfuncfile"]
# importing functions to handle PostgreSQL.
# pgsql_init shall be executed once just after connection pool
# to PostgreSQL. Ususally it should contain "CREATE TABLE IF NOT
# EXISTS..."
# pgsql_main shall be executed on receiving every DNS queries.
# Usually it should be "INSERT INTO..."
# dnstap_receiver has default functions to fall back to, or
# user can define his/her own function in the 'userfuncfile'.
# For example,
# $ cp output_pgsql_userfunc.py output_pgsql_myfunc.py
# $ vi output_pgsql_myfunc.py
# and make 'userfuncfile: /path/to/output_pgsql_myfunc.py' in dnstap.conf
if userfuncfile is None:
clogger.debug(f"Output handler: pgsql: loading default userfuncfile.")
from .output_pgsql_userfunc import pgsql_init, pgsql_main
else:
try:
userfuncfile = abspath(realpath(expandvars(expanduser(userfuncfile))))
# Should check process euid == file owner ?
spec = spec_from_file_location('userfunc', userfuncfile)
userfunc = module_from_spec(spec)
spec.loader.exec_module(userfunc)
pgsql_init = userfunc.pgsql_init
pgsql_main = userfunc.pgsql_main
clogger.debug(f"Output handler: pgsql: loaded userfunc in {userfuncfile}.")
except:
clogger.info("Output handler: pgsql faild to load userfunc. fallback to default.")
from .output_pgsql_userfunc import pgsql_init, pgsql_main
# create connection pool to PostgreSQL server.
async with asyncpg.create_pool(dsn=dsn, passfile=<PASSWORD>file, min_size=min_size, max_size=max_size, timeout=15) as pool:
clogger.debug("Output handler: pgsql connected")
# acquire a connection and execute pgsql_init()
# such as "CREATE TABLE IF NOT EXISTS..."
async with pool.acquire() as conn:
async with conn.transaction():
await pgsql_init(conn)
# consume queue
while not start_shutdown.is_set():
#clogger.debug(f'Output handler: pgsql receiving tapmsg from queue.')
# 'tapmsg = await queue.get()' will block start_shutdown_task
# to gracefully shutdown dnstap_receiver itself.
# 'queue.get_nowait()' won't block but introduces
# busy-wait-loop instead. which do yo like?
try:
tapmsg = queue.get_nowait()
except asyncio.QueueEmpty as e:
if start_shutdown.is_set():
clogger.debug('Output handler: pgsql shutting down. ')
break
else:
await asyncio.sleep(busy_wait)
continue
else:
clogger.debug(f'Output handler: pgsql received tapmsg: {tapmsg}.')
# acquire a connection and send 'INSERT...' to PostgreSQL server.
async with pool.acquire() as conn:
async with conn.transaction():
await pgsql_main(tapmsg, conn)
clogger.debug('Output handler: pgsql INSERT dispached.')
# done continue to next item
queue.task_done()
clogger.debug(f'Output handler: pgsql closing pool.')
# something
if not start_shutdown.is_set():
clogger.error("Output handler: pgclient connection lost")
async def handle(output_cfg, queue, metrics, start_shutdown):
"""pgsql reconnect"""
loop = asyncio.get_event_loop() # do we need this?
clogger.debug("Output handler: PostgreSQL enabled")
while not start_shutdown.is_set():
try:
await plaintext_pgclient(output_cfg, queue, start_shutdown)
except ConnectionRefusedError:
clogger.error('Output handler: connection to pgsql server failed!')
except asyncio.TimeoutError:
clogger.error('Output handler: connection to pgsql server timed out!')
else:
clogger.error('Output handler: connection to pgsql is closed.')
if not start_shutdown.is_set():
clogger.debug("'Output handler: retry to connect every %ss" % output_cfg["retry"])
await asyncio.sleep(output_cfg["retry"])
|
1663831
|
import os, tempfile, re, json, six, sys
def format_dynamic_params(params):
behavior_params = {}
behavior_params["behavior"] = {str(params["first_year"]): {"_" + k:v for k, v in list(params.items()) if k.startswith("BE")}}
for key in ("growdiff_response", "consumption", "growdiff_baseline"):
behavior_params[key] = {}
return behavior_params
def get_version(url_obj, attr_name, current_version):
"""
get formatted python version of library for diplay on web page
"""
# need to chop off the commit reference on older runs
vers_disp = (getattr(url_obj, attr_name)
if getattr(url_obj, attr_name) is not None
else current_version)
# only recently start storing webapp version. for older runs display
# the current version. an alternative is to display the first stable
# version if url.webapp_version is None
if len(vers_disp.split('.')) > 3:
vers_disp = '.'.join(vers_disp.split('.')[:-1])
return vers_disp
|
1663842
|
from unittest.mock import Mock, patch, call
import pytest
from faker import Faker
import spotipy as spt
from diversify.session import SpotifySession, _get_session, _fields
fake = Faker()
Faker.seed(0)
# I'm using this project as a way to learn how to test functions
# and isolate dependencies, so there might be a bunch of useless tests here
# and overly mocked tests that tests implementation
# ------ Fixtures -------
@pytest.fixture()
def spotify_session(mocker):
"""
Creates a spotify session object with mocked dependencies
"""
mocker.patch('diversify.session.spotipy.Spotify.current_user')
mocked_get_session = mocker.patch('diversify.session._get_session')
mock_token = Mock()
mocked_get_session.return_value = spt.Spotify(auth=mock_token)
return SpotifySession()
def audio_features(song_id=None):
if song_id is None:
song_id = fake.pyint()
features = {}
features['id'] = song_id
features['speechiness'] = fake.pyfloat(min_value=0.0, max_value=1.0)
features['valence'] = fake.pyfloat(min_value=0.0, max_value=1.0)
features['mode'] = int(fake.pybool())
features['liveness'] = fake.pyfloat(min_value=0.0, max_value=1.0)
features['key'] = fake.pyint(min_value=0.0, max_value=1.0)
features['danceability'] = fake.pyfloat(min_value=0.0, max_value=1.0)
features['instrumentalness'] = fake.pyfloat(min_value=0.0, max_value=1.0)
features['energy'] = fake.pyfloat(min_value=0.0, max_value=1.0)
features['tempo'] = fake.pyfloat(min_value=50.0, max_value=150.0)
features['loudness'] = fake.pyfloat(min_value=-60.0, max_value=1.0)
features['acousticness'] = fake.pyfloat(min_value=0.0, max_value=1.0)
return features
def song_metadata():
song_meta = {}
song_meta['id'] = fake.pyint()
song_meta['name'] = " ".join(fake.words())
song_meta['popularity'] = fake.pyint(min_value=0, max_value=100)
song_meta['duration_ms'] = fake.pyint()
song_meta['album'] = " ".join(fake.words(nb=2))
song_meta['artist'] = fake.name()
song_meta['artist_id'] = fake.pyint()
return song_meta
def paginated_object(values):
for value in values[:-1]:
result = {
'value': value,
'next': 'stub'
}
yield result
# The last page has a null field
yield {'value': values[-1],
'next': None}
# ------ Tests -------
@patch('diversify.utils.cached_token')
def test_get_session_cached_token(mock_cached_token):
# WHEN: _get_session is called with authenticate=False
result = _get_session(authenticate=False)
# THEN: a Spotify object is returned
assert isinstance(result, spt.Spotify)
# with a token from the cache
assert mock_cached_token.called
@patch('diversify.utils.login_user')
def test_get_session_from_api(mock_login_user):
# WHEN: _get_session is called with authenticate=True
result = _get_session()
# THEN: a Spotify object is returned
assert isinstance(result, spt.Spotify)
# with a token from the Spotify API instead
assert mock_login_user.called
@patch('diversify.session.spotipy.Spotify.next')
def test_session_for_all(mocked_next, spotify_session):
# GIVEN: a paginated json response from the api
initial_values = list(range(10))
pages = paginated_object(initial_values)
first_page = next(pages)
# and a parser function that returns a list
double_it = (lambda json: [2 * json['value']])
# the next function gets the next page from API
mocked_next.side_effect = (lambda json_page: next(pages))
# When _for_all is called
result = spotify_session._for_all(first_page, double_it)
# THEN: the result should be all the pages' values gathered
# into a list
assert result == [2 * value for value in initial_values]
# and next is called for every page except the last
assert mocked_next.call_count == len(initial_values) - 1
# This test is kinda unecessary, since it mostly uses
# the DictWriter from the standard library
def test_write_csv_file(tmpdir, spotify_session):
# GIVEN: A list of audiofeatures
some_features = [audio_features() for _ in range(10)]
# and a file path
csv_file = tmpdir.join('test_features.csv')
# WHEN: write csv is called
spotify_session._write_csv(some_features, str(csv_file))
contents = csv_file.readlines()
# THEN: the contents should be written to file
assert len(some_features) + 1 == len(contents)
# with a csv header of the feature fields
assert contents[0].rstrip('\n') == ",".join(_fields)
def test_filter_audio_features():
# GIVEN: a list of audio features
some_features = [audio_features() for _ in range(10)]
for audio_feat in some_features:
audio_feat['stub'] = 'testing'
audio_feat['some_other'] = 'feature'
# WHEN: filter audio features is called
gen = SpotifySession._filter_audio_features(some_features)
result = list(gen)
# THEN: the unwanted features should be removed
assert result[0].get('stub') is None
assert result[0].get('some_other') is None
@patch('diversify.session.spotipy.Spotify.audio_features')
def test_session_get_features(mocked_audio_features, spotify_session):
# GIVEN: a spotify session and a list of spotify
# tracks
songs = [song_metadata() for _ in range(20)]
mocked_audio_features.side_effect = (lambda songs: [audio_features(song_id) for song_id in songs])
# WHEN: get_features is called
features = spotify_session.get_features(songs)
# Then a list of audio features is returned for each song
assert all([song['id'] == audio_feat['id'] for song, audio_feat in zip(songs, features)])
# and the api is called twice, since the limit is 10
assert mocked_audio_features.call_count == 2
@pytest.mark.skip(reason="still dont know how to generate base64 id from faker")
def test_get_features_should_raise_if_limit_too_high(spotify_session):
# GIVEN: a spotify session and a list of spotify tracks
songs = [song_metadata() for _ in range(30)]
# WHEN: get_features is called with a high limit
features = spotify_session.get_features(songs, limit=101)
@patch('diversify.session.SpotifySession._for_all')
@patch('diversify.session.spotipy.Spotify.current_user_saved_tracks')
def test_get_favorite_songs(
mocked_saved_tracks,
mocked_for_all,
spotify_session):
songs = [song_metadata() for _ in range(20)]
songs_po = paginated_object(songs)
first_page = next(songs_po)
mocked_saved_tracks.side_effect = (lambda limit: first_page)
result = spotify_session.get_favorite_songs()
# THEN: All of the pages with saved user songs should be gathered
assert result == mocked_for_all.return_value
# for all should be called with get_song_info and the first page
assert mocked_for_all.call_args == call(first_page, SpotifySession._get_song_info)
@patch('diversify.session.SpotifySession._for_all')
@patch('diversify.session.SpotifySession.get_features')
@patch('diversify.session.spotipy.Spotify.current_user_saved_tracks')
def test_get_favorite_songs_features(
mocked_saved_tracks,
mocked_get_features,
mocked_for_all,
spotify_session):
# mocked_for_all.side_effect = (lambda page, func: 1)
mocked_for_all.return_value = 1
# WHEN: get_favorite_songs is called with features=True
spotify_session.get_favorite_songs(features=True)
# THEN: get_features should be called with the gathered songs
# from the api
assert mocked_get_features.call_args == call(mocked_for_all.return_value)
@patch('diversify.session.SpotifySession._for_all')
@patch('diversify.session.spotipy.Spotify.user_playlists')
def test_get_user_playlists(mocked_for_all, mocked_spotipy, spotify_session):
# WHEN: get_user_playlists is called
spotify_session.get_user_playlists()
# Then the result should be tuples with name of the playlist and song_metadata
|
1663857
|
from django.conf.urls import url
from app.views.api.users import views
urlpatterns = [
url(r'^$', views.api_index, name='api_users_index'),
url(r'^(?P<id_number>[0-9]+)$', views.api, name='api_users_id'),
]
|
1663899
|
import pytest
from indy_common.authorize.auth_actions import ADD_PREFIX
from indy_common.authorize.auth_constraints import AuthConstraint
from indy_common.authorize import auth_map
from indy_common.constants import NYM, ROLE, ENDORSER
from indy_node.test.auth_rule.auth_framework.basic import roles_to_string, AuthTest
from indy_node.test.auth_rule.helper import create_verkey_did
from plenum.common.exceptions import RequestRejectedException
from indy_node.test.helper import build_auth_rule_request_json
class AddNewRoleTest(AuthTest):
def __init__(self, action_id: str, creator_wallet, env):
super().__init__(env, action_id)
self.role = self.action.new_value
self.role_string = roles_to_string[self.role]
self.creator_wallet = creator_wallet
self.checker_wallet = None
def prepare(self):
self.phase_req_1 = self.get_nym()
self.phase_req_2 = self.get_nym()
self.phase_req_3 = self.get_nym()
self.default_auth_rule = self.get_default_auth_rule()
self.changed_auth_rule = self.get_changed_auth_rule()
self.nym_for_new_rule = self._get_nym_for_new_rule()
def run(self):
# Step 1. Check default auth rule
self.send_and_check(self.phase_req_1, wallet=self.creator_wallet)
# Step 2. Change auth rule
self.send_and_check(self.changed_auth_rule, wallet=self.trustee_wallet)
# Step 3. Check, that we cannot add new nym with role by old way
with pytest.raises(RequestRejectedException):
self.send_and_check(self.phase_req_2, wallet=self.creator_wallet)
# Step 4. Check, that new rule is working
self.send_and_check(self.nym_for_new_rule, wallet=self.checker_wallet)
# Step 5. Return default auth rule
self.send_and_check(self.default_auth_rule, wallet=self.trustee_wallet)
# Step 6. Check, that default auth rule works
self.send_and_check(self.phase_req_3, wallet=self.creator_wallet)
def result(self):
pass
def get_nym(self):
wh, _ = self.creator_wallet
did, verkey = create_verkey_did(self.looper, wh)
return self._build_nym(self.creator_wallet,
self.role_string,
did,
verkey=verkey,
skipverkey=False)
def _get_nym_for_new_rule(self):
wh, _ = self.checker_wallet
did, verkey = create_verkey_did(self.looper, wh)
return self._build_nym(self.checker_wallet,
self.role_string,
did,
verkey=verkey,
skipverkey=False)
def get_changed_auth_rule(self):
self.checker_wallet = self.env.role_to_wallet[ENDORSER]
constraint = AuthConstraint(role=ENDORSER,
sig_count=1,
need_to_be_owner=False)
return build_auth_rule_request_json(
self.looper, self.creator_wallet[1],
auth_action=ADD_PREFIX,
auth_type=NYM,
field=ROLE,
new_value=self.role,
constraint=constraint.as_dict
)
class AddNewTrusteeTest(AddNewRoleTest):
def __init__(self, env, action_id=auth_map.add_new_trustee.get_action_id()):
super().__init__(action_id, env.sdk_wallet_trustee, env)
class AddNewStewardTest(AddNewRoleTest):
def __init__(self, env, action_id=auth_map.add_new_steward.get_action_id()):
super().__init__(action_id, env.sdk_wallet_trustee, env)
class AddNewEndorserTest(AddNewRoleTest):
def __init__(self, env, action_id=auth_map.add_new_endorser.get_action_id()):
super().__init__(action_id, env.sdk_wallet_trustee, env)
class AddNewNetworkMonitorTest(AddNewRoleTest):
def __init__(self, env, action_id=auth_map.add_new_network_monitor.get_action_id()):
super().__init__(action_id, env.sdk_wallet_trustee, env)
class AddNewIdentityOwnerTest(AddNewRoleTest):
def __init__(self, env, action_id=auth_map.add_new_identity_owner.get_action_id()):
super().__init__(action_id, env.sdk_wallet_trustee, env)
|
1663906
|
import qdarkstyle
import sys
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from Voicelab.VoicelabWizard.InputTab import InputTab
from Voicelab.VoicelabWizard.OutputTab import OutputTab
from Voicelab.VoicelabWizard.SettingsTab import SettingsTab
from Voicelab.VoicelabWizard.VoicelabController import VoicelabController
from Voicelab.default_settings import available_functions, default_functions
class VoicelabWizard(QMainWindow):
# triggers when the list of loaded files has changed with the active list of files
on_files_changed = pyqtSignal(list)
# triggers when a setting is changed with the list of active settings for the respective function
on_settings_changed = pyqtSignal(dict)
# triggers when the list of active functions is changed with the list of active functions
on_functions_changed = pyqtSignal(dict)
# triggers when the pipeline is done processing all of the files with the results
on_processing_completed = pyqtSignal(dict)
# triggers on each node finishing with the node name, the start, current, and end count of processed nodes
on_progress_updated = pyqtSignal(str, int, int, int)
def __init__(self):
super().__init__()
self.setWindowIcon(QIcon('favicon.ico'))
# signals are created once and passed into each tab
# TODO: this may be possible using a singleton class or some other OOP way
self.voicelab_signals = {
"on_files_changed": self.on_files_changed,
"on_settings_changed": self.on_settings_changed,
"on_functions_changed": self.on_functions_changed,
"on_processing_completed": self.on_processing_completed,
"on_progress_update": self.on_progress_updated,
}
# Specifies the default size of the window, this should be long enough to have all the settings without a slider
self.setMinimumSize(QSize(800, 680))
# Specifies the default title, simply change the string to change this
self.setWindowTitle("Voice Lab: Reproducible Automated Voice Analysis")
central_widget = QWidget(self)
self.setCentralWidget(central_widget)
central_layout = QGridLayout(self)
central_widget.setLayout(central_layout)
self.tabs = QTabWidget()
central_layout.addWidget(self.tabs)
self.data_controller = VoicelabController()
# links the progress updating on the data controller side to a pyqt signal that can be listend to anywhere
self.data_controller.progress_callback = lambda node, start, current, end: self.voicelab_signals[
"on_progress_update"
].emit(
node.node_id, start, current, end
)
# load all of the functions specified in the default settings file
for fn in available_functions:
self.data_controller.load_function(
fn, available_functions[fn], default=fn in default_functions
)
self.tabs.addTab(
InputTab(
self.data_controller, self.voicelab_signals, self.tabs, parent=self
),
"Load Voices",
)
self.tabs.addTab(
SettingsTab(
self.data_controller, self.voicelab_signals, self.tabs, parent=self
),
"Settings",
)
self.tabs.addTab(
OutputTab(
self.data_controller, self.voicelab_signals, self.tabs, parent=self
),
"Results",
)
if __name__ == "__main__":
# boilerplate pyqt window creation
app = QApplication(sys.argv)
# setup stylesheet
app.setStyleSheet(qdarkstyle.load_stylesheet_pyqt5())
w = VoicelabWizard()
w.show()
sys.exit(app.exec_())
|
1663907
|
import PIL
import torch
from PIL import Image
import torch.nn as nn
from log_utils import get_logger
from feature_transforms import wct, wct_mask
from encoder_decoder_factory import Encoder, Decoder
import torchvision.transforms.functional as transforms
log = get_logger()
def stylize(level, content, style0, encoders, decoders, alpha, svd_device, cnn_device, interpolation_beta=None, style1=None, mask_mode=None, mask=None):
log.debug('Stylization up to ReLu' + str(level) + ' of content sized: ' + str(content.size()) + ' and style sized: ' + str(style0.size()))
with torch.no_grad():
if mask_mode:
cf = encoders[level](content).data.to(device=svd_device).squeeze(0)
s0f = encoders[level](style0).data.to(device=svd_device).squeeze(0)
s1f = encoders[level](style1).data.to(device=svd_device).squeeze(0)
log.debug('mask-mode: content features size: ' + str(cf.size()) + ', style 0 features size: ' + str(s0f.size()) + ', style 1 features size: ' + str(s1f.size()))
cf_channels, cf_width, cf_height = cf.size(0), cf.size(1), cf.size(2)
mask = transforms.to_tensor(transforms.resize(mask, (cf_height, cf_width), interpolation=PIL.Image.NEAREST))
mask_view = mask.view(-1)
mask_view = torch.gt(mask_view, 0.5)
foreground_mask_ix = (mask_view == 1).nonzero().type(torch.LongTensor)
background_mask_ix = (mask_view == 0).nonzero().type(torch.LongTensor)
log.debug('mask-mode: ' + str((foreground_mask_ix.nelement() / mask_view.nelement()) * 100) + '% of the mask is foreground')
cf_view = cf.view(cf_channels, -1)
cf_fground_masked = torch.index_select(cf_view, 1, foreground_mask_ix.view(-1)).view(cf_channels, foreground_mask_ix.nelement())
cf_bground_masked = torch.index_select(cf_view, 1, background_mask_ix.view(-1)).view(cf_channels, background_mask_ix.nelement())
csf_fground = wct_mask(cf_fground_masked, s0f)
csf_bground = wct_mask(cf_bground_masked, s1f)
csf = torch.zeros_like(cf_view)
csf.index_copy_(1, foreground_mask_ix.view(-1), csf_fground)
csf.index_copy_(1, background_mask_ix.view(-1), csf_bground)
csf = csf.view_as(cf)
csf = alpha * csf + (1.0 - alpha) * cf
csf = csf.unsqueeze(0).to(device=cnn_device)
elif interpolation_beta:
cf = encoders[level](content).data.to(device=svd_device).squeeze(0)
s0f = encoders[level](style0).data.to(device=svd_device).squeeze(0)
s1f = encoders[level](style1).data.to(device=svd_device).squeeze(0)
log.debug('interpolation-mode: content features size: ' + str(cf.size()) + ', style 0 features size: ' + str(s0f.size()) + ', style 1 features size: ' + str(s1f.size()))
csf = wct(alpha, cf, s0f, s1f, interpolation_beta).to(device=cnn_device)
else:
cf = encoders[level](content).data.to(device=svd_device).squeeze(0)
s0f = encoders[level](style0).data.to(device=svd_device).squeeze(0)
log.debug('transfer-mode: content features size: ' + str(cf.size()) + ', style features size: ' + str(s0f.size()))
csf = wct(alpha, cf, s0f).to(device=cnn_device)
return decoders[level](csf)
class SingleLevelWCT(nn.Module):
def __init__(self, args):
super(SingleLevelWCT, self).__init__()
self.svd_device = torch.device('cpu') # on average svd takes 4604ms on cpu vs gpu 5312ms on a 512x512 content/591x800 style (comprehensive of data transferring)
self.cnn_device = args.device
self.alpha = args.alpha
self.beta = args.beta
if args.mask:
self.mask_mode = True
self.mask = Image.open(args.mask).convert('1')
else:
self.mask_mode = False
self.mask = None
self.e5 = Encoder(5)
self.encoders = [self.e5]
self.d5 = Decoder(5)
self.decoders = [self.d5]
def forward(self, content_img, style_img, additional_style_flag=False, style_img1=None):
if additional_style_flag:
out = stylize(0, content_img, style_img, self.encoders, self.decoders, self.alpha, self.svd_device,
self.cnn_device, interpolation_beta=self.beta, style1=style_img1, mask_mode=self.mask_mode, mask=self.mask)
else:
out = stylize(0, content_img, style_img, self.encoders, self.decoders, self.alpha, self.svd_device,
self.cnn_device)
return out
class MultiLevelWCT(nn.Module):
def __init__(self, args):
super(MultiLevelWCT, self).__init__()
self.svd_device = torch.device('cpu')
self.cnn_device = args.device
self.alpha = args.alpha
self.beta = args.beta
if args.mask:
self.mask_mode = True
self.mask = Image.open(args.mask).convert('1')
else:
self.mask_mode = False
self.mask = None
self.e1 = Encoder(1)
self.e2 = Encoder(2)
self.e3 = Encoder(3)
self.e4 = Encoder(4)
self.e5 = Encoder(5)
self.encoders = [self.e5, self.e4, self.e3, self.e2, self.e1]
self.d1 = Decoder(1)
self.d2 = Decoder(2)
self.d3 = Decoder(3)
self.d4 = Decoder(4)
self.d5 = Decoder(5)
self.decoders = [self.d5, self.d4, self.d3, self.d2, self.d1]
def forward(self, content_img, style_img, additional_style_flag=False, style_img1=None):
for i in range(len(self.encoders)):
if additional_style_flag:
content_img = stylize(i, content_img, style_img, self.encoders, self.decoders, self.alpha, self.svd_device,
self.cnn_device, interpolation_beta=self.beta, style1=style_img1, mask_mode=self.mask_mode, mask=self.mask)
else:
content_img = stylize(i, content_img, style_img, self.encoders, self.decoders, self.alpha, self.svd_device,
self.cnn_device)
return content_img
|
1663969
|
from .client import HTTPClient
from .httperror import HTTPError
__all__ = (
"HTTPClient",
"HTTPError",
)
|
1663980
|
import scipy.signal
import numpy as np
from .cltools import HAVE_PYOPENCL, OpenCL_Helper
if HAVE_PYOPENCL:
import pyopencl
mf = pyopencl.mem_flags
#~ from pyacq.dsp.overlapfiltfilt import SosFiltfilt_Scipy
from .tools import FifoBuffer, median_mad
def offline_signal_preprocessor(sigs, sample_rate, common_ref_removal=True,
highpass_freq=300., lowpass_freq=None, output_dtype='float32', normalize=True, **unused):
#cast
sigs = sigs.astype(output_dtype)
#filter
if highpass_freq is not None:
b, a = scipy.signal.iirfilter(5, highpass_freq/sample_rate*2, analog=False,
btype = 'highpass', ftype = 'butter', output = 'ba')
filtered_sigs = scipy.signal.filtfilt(b, a, sigs, axis=0)
else:
filtered_sigs = sigs.copy()
if lowpass_freq is not None:
b, a = scipy.signal.iirfilter(5, lowpass_freq/sample_rate*2, analog=False,
btype = 'lowpass', ftype = 'butter', output = 'ba')
filtered_sigs = scipy.signal.filtfilt(b, a, filtered_sigs, axis=0)
# common reference removal
if common_ref_removal:
filtered_sigs = filtered_sigs - np.median(filtered_sigs, axis=1)[:, None]
# normalize
if normalize:
#~ med = np.median(filtered_sigs, axis=0)
#~ mad = np.median(np.abs(filtered_sigs-med),axis=0)*1.4826
med, mad = median_mad(filtered_sigs, axis=0)
normed_sigs = (filtered_sigs - med)/mad
else:
normed_sigs = filtered_sigs
return normed_sigs.astype(output_dtype)
def estimate_medians_mads_after_preprocesing(sigs, sample_rate, **params):
params2 = dict(params)
params2['normalize'] = False
filtered_sigs = offline_signal_preprocessor(sigs, sample_rate, **params2)
med, mad = median_mad(filtered_sigs, axis=0)
return med, mad
class SignalPreprocessor_base:
def __init__(self,sample_rate, nb_channel, chunksize, input_dtype):
self.sample_rate = sample_rate
self.nb_channel = nb_channel
self.chunksize = chunksize
self.input_dtype = input_dtype
def change_params(self, common_ref_removal=True,
highpass_freq=300.,
lowpass_freq=None,
smooth_size=0,
output_dtype='float32',
normalize=True,
pad_width = None,
signals_medians=None, signals_mads=None):
self.signals_medians = signals_medians
self.signals_mads = signals_mads
self.common_ref_removal = common_ref_removal
self.highpass_freq = highpass_freq
self.lowpass_freq = lowpass_freq
self.smooth_size = int(smooth_size)
self.output_dtype = np.dtype(output_dtype)
self.normalize = normalize
self.pad_width = pad_width
# set default pad_width if none is provided
if self.pad_width is None or self.pad_width<=0:
assert self.highpass_freq is not None, 'pad_width=None needs a highpass_freq'
self.pad_width = int(self.sample_rate/self.highpass_freq*3)
#~ print('self.pad_width', self.pad_width)
self.chunksize_1pad = self.chunksize + self.pad_width
self.chunksize_2pad = self.chunksize + 2 * self.pad_width
#~ print('self.pad_width', self.pad_width)
#~ print('self.chunksize_1pad', self.chunksize_1pad)
#~ assert self.chunksize_1pad>self.chunksize
self.coefficients = np.zeros((0, 6))
nyquist = self.sample_rate/2.
if self.highpass_freq is not None:
if self.highpass_freq>0 and self.highpass_freq<nyquist:
coeff_hp = scipy.signal.iirfilter(5, highpass_freq/self.sample_rate*2, analog=False,
btype = 'highpass', ftype = 'butter', output = 'sos')
self.coefficients = np.concatenate((self.coefficients, coeff_hp))
if self.lowpass_freq is not None:
if self.lowpass_freq>0 and self.lowpass_freq<nyquist:
#~ if self.lowpass_freq>(self.sample_rate/2.):
#~ self.lowpass_freq=(self.sample_rate/2.01)
coeff_lp = scipy.signal.iirfilter(5, lowpass_freq/self.sample_rate*2, analog=False,
btype = 'lowpass', ftype = 'butter', output = 'sos')
self.coefficients = np.concatenate((self.coefficients, coeff_lp))
if self.smooth_size>0:
b0 = (1./3)**.5
b1 = (1-b0)
b2 = 0.
coeff_smooth = np.array([[b0, b1, b2, 1,0,0]], dtype=self.output_dtype)
coeff_smooth = np.tile(coeff_smooth, (self.smooth_size, 1))
self.coefficients = np.concatenate((self.coefficients, coeff_smooth))
if self.coefficients.shape[0]==0:
#this is the null filter
self.coefficients = np.array([[1, 0, 0, 1,0,0]], dtype=self.output_dtype)
self.nb_section =self. coefficients.shape[0]
self.forward_buffer = FifoBuffer((self.chunksize_1pad, self.nb_channel), self.output_dtype)
self.zi = np.zeros((self.nb_section, 2, self.nb_channel), dtype= self.output_dtype)
#~ print('self.normalize', self.normalize)
if self.normalize:
assert self.signals_medians is not None
assert self.signals_mads is not None
def process_buffer(self, data):
# used for offline processing when parralisation is possible
raise(NotImplmentedError)
def initialize_stream(self):
# must be for each new segment when index
# start back
raise(NotImplmentedError)
def process_buffer_stream(self, pos, data):
# used in real time mode when chunk are given one after another
raise(NotImplmentedError)
class SignalPreprocessor_Numpy(SignalPreprocessor_base):
"""
This apply chunk by chunk on a multi signal:
* baseline removal
* hight pass filtfilt
* normalize (optional)
"""
def process_buffer(self, data):
data = data.astype(self.output_dtype)
processed_data = scipy.signal.sosfiltfilt(self.coefficients, data, axis=0)
# TODO find why sosfiltfilt reverse strides!!!
processed_data = np.ascontiguousarray(processed_data, dtype=self.output_dtype)
# removal ref
if self.common_ref_removal:
processed_data -= np.median(processed_data, axis=1)[:, None]
#normalize
if self.normalize:
processed_data -= self.signals_medians
processed_data /= self.signals_mads
return processed_data
def process_buffer_stream(self, pos, data):
# TODO rewrite this with self.process_buffer()
#Online filtfilt
chunk = data.astype(self.output_dtype)
forward_chunk_filtered, self.zi = scipy.signal.sosfilt(self.coefficients, chunk, zi=self.zi, axis=0)
forward_chunk_filtered = forward_chunk_filtered.astype(self.output_dtype)
self.forward_buffer.new_chunk(forward_chunk_filtered, index=pos)
backward_chunk = self.forward_buffer.buffer
backward_filtered = scipy.signal.sosfilt(self.coefficients, backward_chunk[::-1, :], zi=None, axis=0)
backward_filtered = backward_filtered[::-1, :]
backward_filtered = backward_filtered.astype(self.output_dtype)
pos2 = pos-self.pad_width
if pos2<0:
return None, None
i1 = self.chunksize_1pad-self.pad_width-chunk.shape[0]
i2 = self.chunksize
assert i1<i2
data2 = backward_filtered[i1:i2]
if (pos2-data2.shape[0])<0:
data2 = data2[data2.shape[0]-pos2:]
# removal ref
if self.common_ref_removal:
data2 -= np.median(data2, axis=1)[:, None]
#normalize
if self.normalize:
data2 -= self.signals_medians
data2 /= self.signals_mads
return pos2, data2
def initialize_stream(self):
self.forward_buffer.reset()
self.zi[:] = 0
class SignalPreprocessor_OpenCL(SignalPreprocessor_base, OpenCL_Helper):
"""
Implementation in OpenCL depending on material and nb_channel
this can lead to a smal speed improvement...
"""
def __init__(self,sample_rate, nb_channel, chunksize, input_dtype):
SignalPreprocessor_base.__init__(self,sample_rate, nb_channel, chunksize, input_dtype)
def _check_data(self, data):
if not data.flags['C_CONTIGUOUS'] or data.dtype!=self.output_dtype:
data = np.ascontiguousarray(data, dtype=self.output_dtype)
return data
def process_buffer(self, data):
data = self._check_data(data)
#~ print(data.shape, self.chunksize, self.chunksize_2pad, self.pad_width)
#~ assert data.shape[0] == self.chunksize_2pad
if data.shape[0] == self.chunksize_2pad:
# OK
unpad = 0
elif data.shape[0] < self.chunksize_2pad:
# put some zero
unpad = self.chunksize_2pad - data.shape[0]
data_pad = np.zeros((self.chunksize_2pad, data.shape[1]), dtype=data.dtype)
#~ print('Apply a data pad')
data = data_pad
else:
raise ValueError(f'data have wring shape{data.shape[0]} { self.chunksize_2pad}')
event = pyopencl.enqueue_copy(self.queue, self.input_2pad_cl, data)
event = self.kern_forward_backward_filter(self.queue, (self.nb_channel,), (self.nb_channel,),
self.input_2pad_cl, self.coefficients_cl, self.zi1_cl, self.zi2_cl,
self.signals_medians_cl, self.signals_mads_cl, self.output_2pad_cl)
#~ event.wait()
event = pyopencl.enqueue_copy(self.queue, self.output_2pad, self.output_2pad_cl)
event.wait()
data2 = self.output_2pad.copy()
if self.common_ref_removal:
# at the moment common_ref_removal is done on CPU
# and so to avoid transfer normalize is also done on CPU
#TODO implement OpenCL for removal ref
if self.common_ref_removal:
data2 -= np.median(data2, axis=1)[:, None]
#normalize
if self.normalize:
# OpenCL for this when no common_ref_removal
data2 -= self.signals_medians
data2 /= self.signals_mads
if unpad > 0:
data2 = data2[:-unpad, :]
return data2
def process_buffer_stream(self, pos, data):
assert data.shape[0]==self.chunksize
data = self._check_data(data)
#Online filtfilt
event = pyopencl.enqueue_copy(self.queue, self.input_cl, data)
event = self.kern_stream_forward_backward_filter(self.queue, (self.nb_channel,), (self.nb_channel,),
self.input_cl, self.coefficients_cl, self.zi1_cl, self.zi2_cl,
self.fifo_input_backward_cl, self.signals_medians_cl, self.signals_mads_cl, self.output_backward_cl)
event.wait()
#~ event.wait()
start = pos-self.chunksize_1pad
if start<-self.pad_width:
return None, None
pos2 = pos-self.pad_width
event = pyopencl.enqueue_copy(self.queue, self.output_backward, self.output_backward_cl)
if start>0:
data2 = self.output_backward[:self.chunksize, :]
else:
data2 = self.output_backward[self.pad_width:self.chunksize, :]
data2 = data2.copy()
if self.common_ref_removal:
# at the moment common_ref_removal is done on CPU
# and so to avoid transfer normalize is also done on CPU
#TODO implement OpenCL for removal ref
if self.common_ref_removal:
data2 -= np.median(data2, axis=1)[:, None]
#normalize
if self.normalize:
# OpenCL for this when no common_ref_removal
data2 -= self.signals_medians
data2 /= self.signals_mads
return pos2, data2
def change_params(self, **kargs):
cl_platform_index=kargs.pop('cl_platform_index', None)
cl_device_index=kargs.pop('cl_device_index', None)
ctx=kargs.pop('ctx', None)
queue=kargs.pop('queue', None)
OpenCL_Helper.initialize_opencl(self,cl_platform_index=cl_platform_index, cl_device_index=cl_device_index, ctx=ctx, queue=queue)
SignalPreprocessor_base.change_params(self, **kargs)
assert self.output_dtype=='float32', 'SignalPreprocessor_OpenCL support only float32 at the moment'
assert self.pad_width<self.chunksize, 'OpenCL fifo work only for self.pad_width<self.chunksize'
self.coefficients = np.ascontiguousarray(self.coefficients, dtype=self.output_dtype)
#~ print(self.coefficients.shape)
# this is for stream processing
self.zi1 = np.zeros((self.nb_channel, self.nb_section, 2), dtype= self.output_dtype)
self.zi2 = np.zeros((self.nb_channel, self.nb_section, 2), dtype= self.output_dtype)
self.output_forward = np.zeros((self.chunksize, self.nb_channel), dtype= self.output_dtype)
self.fifo_input_backward = np.zeros((self.chunksize_1pad, self.nb_channel), dtype= self.output_dtype)
self.output_backward = np.zeros((self.chunksize_1pad, self.nb_channel), dtype= self.output_dtype)
#GPU buffers
self.coefficients_cl = pyopencl.Buffer(self.ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=self.coefficients)
self.zi1_cl = pyopencl.Buffer(self.ctx, mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=self.zi1)
self.zi2_cl = pyopencl.Buffer(self.ctx, mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=self.zi2)
self.input_cl = pyopencl.Buffer(self.ctx, mf.READ_WRITE, size=self.output_forward.nbytes)
self.output_forward_cl = pyopencl.Buffer(self.ctx, mf.READ_WRITE, size=self.output_forward.nbytes)
self.fifo_input_backward_cl = pyopencl.Buffer(self.ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=self.fifo_input_backward)
self.output_backward_cl = pyopencl.Buffer(self.ctx, mf.READ_WRITE, size=self.output_backward.nbytes)
if self.signals_medians is not None:
self.signals_medians_cl = pyopencl.Buffer(self.ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=self.signals_medians)
self.signals_mads_cl = pyopencl.Buffer(self.ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=self.signals_mads)
else:
self.signals_medians_cl = pyopencl.Buffer(self.ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=np.zeros(self.nb_channel, dtype= self.output_dtype))
self.signals_mads_cl = pyopencl.Buffer(self.ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=np.zeros(self.nb_channel, dtype= self.output_dtype))
# this is for offline processing
self.input_2pad = np.zeros((self.chunksize_2pad, self.nb_channel), dtype= self.output_dtype)
self.output_2pad = np.zeros((self.chunksize_2pad, self.nb_channel), dtype= self.output_dtype)
self.input_2pad_cl = pyopencl.Buffer(self.ctx, mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=self.input_2pad)
self.output_2pad_cl = pyopencl.Buffer(self.ctx, mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=self.output_2pad)
#CL prog
if not self.common_ref_removal and self.normalize:
extra_code_nomalize = _extra_code_nomalize
extra_code_nomalize2 = _extra_code_nomalize2
else:
extra_code_nomalize = ''
extra_code_nomalize2 = ''
kernel_formated = processor_kernel%dict(chunksize=self.chunksize, chunksize_1pad=self.chunksize_1pad,
chunksize_2pad=self.chunksize_2pad,
pad_width=self.pad_width, nb_section=self.nb_section, nb_channel=self.nb_channel,
extra_code_nomalize=extra_code_nomalize, extra_code_nomalize2=extra_code_nomalize2)
#~ print(kernel_formated)
prg = pyopencl.Program(self.ctx, kernel_formated)
self.opencl_prg = prg.build(options='-cl-mad-enable')
self.max_wg_size = self.ctx.devices[0].get_info(pyopencl.device_info.MAX_WORK_GROUP_SIZE)
self.kern_stream_forward_backward_filter = getattr(self.opencl_prg, 'stream_forward_backward_filter')
self.kern_forward_backward_filter = getattr(self.opencl_prg, 'forward_backward_filter')
def initialize_stream(self):
self.output_forward[:] = 0
event = pyopencl.enqueue_copy(self.queue, self.output_backward_cl, self.output_backward)
event.wait()
self.zi1[:] = 0
event = pyopencl.enqueue_copy(self.queue, self.zi1_cl, self.zi1)
event.wait()
self.zi2[:] = 0
event = pyopencl.enqueue_copy(self.queue, self.zi2_cl, self.zi2)
event.wait()
processor_kernel = """
#define chunksize %(chunksize)d
#define chunksize_1pad %(chunksize_1pad)d
#define chunksize_2pad %(chunksize_2pad)d
#define pad_width %(pad_width)d
#define nb_section %(nb_section)d
#define nb_channel %(nb_channel)d
__kernel void sos_filter(__global float *input, __global float *output, __constant float *coefficients,
__global float *zi, int local_chunksize, int direction, int out_offset_index) {
int chan = get_global_id(0); //channel indice
int offset_filt2; //offset channel within section
int offset_zi = chan*nb_section*2;
int idx;
float w0, w1,w2;
float res;
for (int section=0; section<nb_section; section++){
//offset_filt2 = chan*nb_section*6+section*6;
offset_filt2 = section*6;
w1 = zi[offset_zi+section*2+0];
w2 = zi[offset_zi+section*2+1];
for (int s=0; s<local_chunksize;s++){
if (direction==1) {idx = s*nb_channel+chan;}
else if (direction==-1) {idx = (local_chunksize-s-1)*nb_channel+chan;}
if (section==0) {w0 = input[idx];}
else {w0 = output[idx+out_offset_index];}
w0 -= coefficients[offset_filt2+4] * w1;
w0 -= coefficients[offset_filt2+5] * w2;
res = coefficients[offset_filt2+0] * w0 + coefficients[offset_filt2+1] * w1 + coefficients[offset_filt2+2] * w2;
w2 = w1; w1 =w0;
output[idx+out_offset_index] = res;
}
zi[offset_zi+section*2+0] = w1;
zi[offset_zi+section*2+1] = w2;
}
}
__kernel void stream_forward_backward_filter(__global float *input,
__constant float * coefficients,
__global float * zi1,
__global float * zi2,
__global float *fifo_input_backward,
__global float *signals_medians,
__global float *signals_mads,
__global float *output_backward){
int chan = get_global_id(0); //channel indice
//roll
for (int s=0; s<pad_width;s++){
fifo_input_backward[(s)*nb_channel+chan] = fifo_input_backward[(s+chunksize)*nb_channel+chan];
}
int out_offset_index = pad_width*nb_channel;
sos_filter(input, fifo_input_backward, coefficients, zi1, chunksize, 1, out_offset_index);
//set zi2 to zeros
for (int s=0; s<nb_section;s++){
zi2[chan*nb_section*2+s] = 0;
zi2[chan*nb_section*2+s+1] = 0;
}
//filter backward
sos_filter(fifo_input_backward, output_backward, coefficients, zi2, chunksize_1pad, -1, 0);
// nomalize optional
%(extra_code_nomalize)s
}
__kernel void forward_backward_filter(__global float *input,
__constant float * coefficients,
__global float * zi1,
__global float * zi2,
__global float *signals_medians,
__global float *signals_mads,
__global float *output){
int chan = get_global_id(0); //channel indice
sos_filter(input, input, coefficients, zi1, chunksize_2pad, 1, 0);
//filter backward
sos_filter(input, output, coefficients, zi2, chunksize_2pad, -1, 0);
// nomalize optional
%(extra_code_nomalize2)s
}
"""
_extra_code_nomalize = """
float v;
for (int s=0; s<chunksize;s++){
v = output_backward[(s)*nb_channel+chan];
output_backward[(s)*nb_channel+chan] = (v - signals_medians[chan]) / signals_mads[chan];
}
"""
_extra_code_nomalize2 = """
float v;
for (int s=0; s<chunksize_2pad;s++){
v = output[(s)*nb_channel+chan];
output[(s)*nb_channel+chan] = (v - signals_medians[chan]) / signals_mads[chan];
}
"""
signalpreprocessor_engines = { 'numpy' : SignalPreprocessor_Numpy,
'opencl' : SignalPreprocessor_OpenCL}
|
1664009
|
def armsFront():
i01.moveHead(90,90)
i01.moveArm("left",13,115,100,50)
i01.moveArm("right",13,115,100,50)
i01.moveHand("left",50,24,54,50,82,0)
i01.moveHand("right",50,24,54,50,82,180)
i01.moveTorso(90,90,90)
|
1664010
|
from pathlib import Path
from libdotfiles.packages import has_installed, try_install
from libdotfiles.util import (
HOME_DIR,
PKG_DIR,
create_symlink,
distro_name,
run,
)
FZF_DIR = HOME_DIR / ".fzf"
if distro_name() == "arch":
try_install("fzf") # super opener
try_install("ripgrep") # super grep
elif distro_name() == "linuxmint":
if not has_installed("ripgrep"):
run(
[
"curl",
"-LO",
"https://github.com/BurntSushi/ripgrep/releases/download/11.0.2/ripgrep_11.0.2_amd64.deb",
],
check=True,
)
run(["sudo", "dpkg", "-i", "ripgrep_11.0.2_amd64.deb"], check=True)
run(
[
"git",
"clone",
"--depth",
"1",
"https://github.com/junegunn/fzf.git",
FZF_DIR,
],
check=True,
)
run(
[
FZF_DIR / "install",
"--key-bindings",
"--completion",
"--no-update-rc",
],
check=True,
)
create_symlink(PKG_DIR / "agignore", HOME_DIR / ".agignore")
|
1664056
|
from abc import ABC, abstractmethod
class ConfigStore(ABC):
data = {}
@abstractmethod
def read(self):
pass
@abstractmethod
def write(self, data: dict):
pass
|
1664117
|
from schematics.types import StringType
from schematics.exceptions import ValidationError
from openprocurement.tender.core.models import BaseDocument, Model, get_tender
class Document(BaseDocument):
documentOf = StringType(
required=True,
choices=["tender", "item"],
default="tender"
)
def validate_relatedItem(self, data, relatedItem):
if not relatedItem and data.get("documentOf") in ["item"]:
raise ValidationError("This field is required.")
parent = data["__parent__"]
if relatedItem and isinstance(parent, Model):
tender = get_tender(parent)
items = [i.id for i in tender.items if i]
if data.get("documentOf") == "item" and relatedItem not in items:
raise ValidationError("relatedItem should be one of items")
|
1664179
|
import io
from typing import Any, Callable, Dict, List, Optional, Set
import determined as det
import determined.keras
train_begin = "on_train_begin"
train_workload_begin = "on_train_workload_begin"
train_batch_begin = "on_train_batch_begin"
train_batch_end = "on_train_batch_end"
train_workload_end = "on_train_workload_end"
test_begin = "on_test_begin"
test_batch_begin = "on_test_batch_begin"
test_batch_end = "on_test_batch_end"
test_end = "on_test_end"
epoch_begin = "on_epoch_begin"
epoch_end = "on_epoch_end"
train_end = "on_train_end"
get_state = "get_state"
load_state = "load_state"
all_checks = []
def cb_check(fn: Callable) -> Callable:
all_checks.append(fn)
return fn
def do_check_with_table(lines: List[str], transitions: Dict[str, Set[Optional[str]]]) -> None:
state = None
i_prev = None
for i, line in enumerate(lines):
cb = line.split(":")[0]
if cb in transitions:
assert (
state in transitions[cb]
), f"illegal callback {cb} on line {i} after {state} on line {i_prev}"
state = cb
i_prev = i
@cb_check
def check_train_begin_and_end(lines: List[str], **kwargs: Dict) -> None:
assert lines[0].startswith(train_begin), "first call was not on_train_begin"
assert lines[-1].startswith(train_end), "last call was not on_train_end"
@cb_check
def check_pause_continues(lines: List[str], **kwargs: Dict) -> None:
do_check_with_table(
lines,
{
train_begin: {None},
load_state: {train_begin, get_state},
get_state: {train_begin, load_state, get_state},
train_end: {train_begin, load_state, get_state},
},
)
@cb_check
def check_initial_calls(lines: List[str], **kwargs: Dict) -> None:
"""
Always expect the following commands before the first training
- on_epoch_begin
- on_train_workload_begin
- on_validation_period_begin
(except in the case of continued training, of course)
"""
expect = {epoch_begin, train_workload_begin}
remain = {epoch_begin, train_workload_begin}
for i, line in enumerate(lines):
cb = line.split(":")[0]
if cb in expect:
assert cb in remain, f"got two {cb} on line {i}"
remain.remove(cb)
elif line.startswith("on_train_batch_begin"):
assert len(remain) == 0, f"still expecting {remain} on line {i}"
break
@cb_check
def check_train_callbacks(lines: List[str], **kwargs: Dict) -> None:
"""
All test_{begin,end} calls should be wrapped within validation_period_{begin,end}.
(This assumes we only have validation_period-related model.evaluate() calls.)
"""
do_check_with_table(
lines,
{
train_begin: {None},
train_workload_begin: {train_begin, train_workload_end},
train_batch_begin: {train_workload_begin, train_batch_end},
train_batch_end: {train_batch_begin},
train_workload_end: {train_batch_end},
train_end: {train_workload_end},
},
)
@cb_check
def check_test_callbacks(lines: List[str], **kwargs: Dict) -> None:
"""
All test_{begin,end} calls should happen outside of train_workload_{begin,end} periods.
"""
do_check_with_table(
lines,
{
train_begin: {None},
train_workload_begin: {train_begin, train_workload_end, test_end},
train_workload_end: {train_workload_begin},
test_begin: {train_begin, train_workload_end},
test_batch_begin: {test_begin, test_batch_end},
test_batch_end: {test_batch_begin},
test_end: {test_batch_end},
train_end: {test_end, train_workload_end},
},
)
@cb_check
def check_sane_epochs(lines: List[str], **kwargs: Dict) -> None:
do_check_with_table(
lines,
{
train_begin: {None},
epoch_begin: {train_begin, epoch_end},
epoch_end: {epoch_begin},
},
)
@cb_check
def check_validation_and_epoch_counts(lines: List[str], **kwargs: Dict) -> None:
"""Ensure that validation_period and epoch indices increment by 1 each time."""
counts = {"on_epoch": 0, "on_validation_period": 0}
for i, line in enumerate(lines):
cb = line.split(":")[0]
for prefix in counts:
if cb.startswith(prefix):
cb_idx = int(line.split(":")[1])
if cb.endswith("begin"):
"got on_validation_period_begin for index 0 but expected 0 on line {i}"
assert (
cb_idx == counts[prefix]
), f"got {cb} for index {cb_idx} but expected {counts[prefix]} on line {i}"
if cb.endswith("end"):
assert (
cb_idx == counts[prefix]
), f"got {cb} for index {cb_idx} but expected {counts[prefix]} on line {i}"
counts[prefix] += 1
@cb_check
def check_epoch_ends(lines: List[str], **kwargs: Dict) -> None:
"""Ensure the correct number of epochs were called"""
count = kwargs.get("epochs")
if not isinstance(count, int):
return
seen = 0
for i, line in enumerate(lines):
if line.startswith(epoch_end):
seen += 1
assert seen <= count, f"saw {epoch_end} {seen} on line {i} but expected only {count}"
assert seen == count, f"expected {count} {epoch_end} calls but only saw {seen}"
@cb_check
def check_test_ends(lines: List[str], **kwargs: Dict) -> None:
"""Ensure the correct number of validation period ends were called"""
count = kwargs.get("validations")
if not isinstance(count, int):
return
seen = 0
for i, line in enumerate(lines):
if line.startswith(test_end):
seen += 1
assert seen <= count, f"saw {test_end} {seen} on line {i} but expected only {count}"
assert seen == count, f"expected {count} {test_end} calls but only saw {seen}"
class CBChecker(det.keras.callbacks.Callback):
def __init__(self, epochs: Optional[int] = None, validations: Optional[int] = None) -> None:
super().__init__()
self.log = io.StringIO()
self.epochs = epochs
self.validations = validations
def on_train_begin(self, logs: Optional[Dict]) -> None:
print(f"{train_begin}:{logs}", file=self.log)
def on_train_end(self, logs: Optional[Dict]) -> None:
print(f"{train_end}:{logs}", file=self.log)
lines = self.log.getvalue().splitlines()
try:
for check in all_checks:
check(lines, epochs=self.epochs, validations=self.validations)
except AssertionError:
for i, line in enumerate(lines):
print(f"{i}:\t{line}")
raise
def on_test_begin(self, logs: Optional[Dict]) -> None:
print(f"{test_begin}:{logs}", file=self.log)
def on_test_end(self, logs: Optional[Dict]) -> None:
print(f"{test_end}:{logs}", file=self.log)
def on_epoch_begin(self, epoch: int, logs: Optional[Dict]) -> None:
print(f"{epoch_begin}:{epoch}:{logs}", file=self.log)
def on_epoch_end(self, epoch: int, logs: Optional[Dict]) -> None:
print(f"{epoch_end}:{epoch}:{logs}", file=self.log)
def on_train_batch_begin(self, batch: int, logs: Optional[Dict]) -> None:
print(f"{train_batch_begin}:{batch}:{logs}", file=self.log)
def on_train_batch_end(self, batch: int, logs: Optional[Dict]) -> None:
print(f"{train_batch_end}:{batch}:{logs}", file=self.log)
def on_test_batch_begin(self, batch: int, logs: Optional[Dict]) -> None:
print(f"{test_batch_begin}:{batch}:{logs}", file=self.log)
def on_test_batch_end(self, batch: int, logs: Optional[Dict]) -> None:
print(f"{test_batch_end}:{batch}:{logs}", file=self.log)
def on_train_workload_begin(
self, batches_trained: int, batches_requested: Optional[int], logs: Dict
) -> None:
print(f"{train_workload_begin}:{batches_trained}:{batches_requested}:{logs}", file=self.log)
def on_train_workload_end(self, batches_trained: int, logs: Dict) -> None:
print(f"{train_workload_end}:{batches_trained}:{logs}", file=self.log)
def get_state(self) -> Any:
print(f"{get_state}:", file=self.log)
return self.log.getvalue()
def load_state(self, state: Any) -> None:
self.log = io.StringIO(state)
# Seek to the end of the StringI0.
pos, whence = 0, 2
self.log.seek(pos, whence)
print(f"{load_state}:", file=self.log)
|
1664200
|
from __future__ import print_function
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
import cPickle as pickle
import argparse
import logging
from time import time
import numpy as np
class streamer(object):
def __init__(self, file_name):
self.file_name=file_name
def __iter__(self):
for s in open(self.file_name):
yield s.strip()
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# Load some categories from the training set
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
]
# Uncomment the following to do the analysis on all the categories
# categories = None
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Computes Cross-Entropy (TFIDF) weights of a raw text dataset and stores the model.')
parser.add_argument("--dataset", help="The path to the raw text dataset file",
required=True)
parser.add_argument("--cout", help="The path to the cross-entropy output model file",
default="output_tfidf.pk")
parser.add_argument("--minc", help="The minimum word frequency considered to compute CE weight.",
default=2, type=int)
parser.add_argument("--tf", help="TF normalization: none, binary, sublinear (default=none).", default="none")
parser.add_argument("--stop", help="Toggles stop words stripping.", action="store_true")
parser.add_argument("--lsa", help="Toggles LSA computation.", default=0, type=int)
parser.add_argument("--news", help="Toggles making analysis of predefined dataset.", action="store_true")
args = parser.parse_args()
t0 = time()
if not args.news:
corpus=streamer(args.dataset)
vectorizer = TfidfVectorizer(min_df=1,
encoding="latin-1",
decode_error="replace",
lowercase=False,
binary= True if args.tf.startswith("bin") else False,
sublinear_tf= True if args.tf.startswith("subl") else False,
stop_words= "english" if args.stop else None)
X = vectorizer.fit(corpus) if args.lsa<0 else vectorizer.fit_transform(corpus)
else:
print("Loading 20 newsgroups dataset for categories:")
print(categories)
dataset = fetch_20newsgroups(subset='all', categories=categories,
shuffle=True, random_state=42)
print("%d categories" % len(dataset.target_names))
print()
labels = dataset.target
true_k = np.unique(labels).shape[0]
print("%d documents" % len(dataset.data))
vectorizer = TfidfVectorizer(max_df=0.5, max_features=opts.n_features,
min_df=2, stop_words='english',
use_idf=opts.use_idf)
X = vectorizer.fit_transform(dataset.data)
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X.shape)
print()
if args.lsa==0:
with open(args.cout, 'wb') as fin:
pickle.dump(X, fin)
print("TF-IDF weights saved...")
exit()
from sklearn.decomposition import TruncatedSVD
from sklearn.preprocessing import Normalizer
from sklearn.pipeline import make_pipeline
svd = TruncatedSVD(args.lsa)
normalizer = Normalizer(copy=False)
lsa = make_pipeline(svd, normalizer)
X = lsa.fit_transform(X)
print("done in %fs" % (time() - t0))
explained_variance = svd.explained_variance_ratio_.sum()
print("Explained variance of the SVD step: {}%".format(
int(explained_variance * 100)))
print ("Saving vectors to: %s" % args.cout)
np.savetxt(args.cout,X)
|
1664218
|
from tempfile import NamedTemporaryFile
from typing import IO
from fastapi.middleware.cors import CORSMiddleware
from bson import ObjectId
from fastapi import FastAPI, UploadFile, File, HTTPException, Header, Depends, BackgroundTasks
from pybadges import badge
from pydantic import BaseModel, validator
from keras.models import load_model
from Utils.ImageTools import ImageToArrayPreprocessor
from PrePorcessor.Preprocessor import SimplePreprocessor
from dataset.SimpleDatasetLoader import SimpleDatasetLoader
import cv2
from pymongo import MongoClient
import os
from uvicorn import run
from starlette import status
import shutil
from datetime import datetime
import keras
from fastapi.requests import Request
from fastapi.responses import FileResponse
from fastapi.responses import Response, StreamingResponse
print(keras.__version__)
app = FastAPI()
from fastapi.templating import Jinja2Templates
ClassLabels = ["covid", "normal", "vira neumonia"]
uploads_collection = MongoClient(host=os.environ["DATABASE_URL"]).get_database("COVID19").get_collection("uploads")
origins = [
"*"
]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
class UploadCollectionDataModel(BaseModel):
file_name: str
system_predict: str
user_recommend: str = None
create_at: datetime = datetime.now()
is_correct: bool = False
@validator("system_predict")
def system_predict_validator(cls, v):
if v not in ClassLabels:
raise HTTPException(detail="invalid label", status_code=400)
return v
class UpdateUserRecommend(BaseModel):
file_id: str
user_recommend: str
@validator("user_recommend")
def system_predict_validator(cls, v):
if v not in ClassLabels:
raise HTTPException(detail=f"valid label is {ClassLabels} ", status_code=400)
return v
@validator("file_id")
def validate_file_id(cls, v):
if not ObjectId.is_valid(v):
raise HTTPException(detail="invalid file id", status_code=400)
return v
class DataBase:
@staticmethod
def add_new_uploaded_file(data: UploadCollectionDataModel):
item = uploads_collection.insert_one(data.dict())
return str(item.inserted_id)
@staticmethod
def add_user_predict_to_file(data: UpdateUserRecommend):
exist = uploads_collection.find_one({"_id": ObjectId(data.file_id)})
if exist['system_predict'] == data.user_recommend:
same = True
else:
same = False
uploads_collection.update_one({"_id": ObjectId(data.file_id)}, {
"$set": {
"user_recommend": data.user_recommend,
"predict_true": same
}
})
class LabelImage:
Model_Path = './SavedModel/model_keras_215.hdf5'
UploadFolder = './Files/'
@staticmethod
def path_creator(image_name: str) -> str:
return f"{LabelImage.UploadFolder}{image_name}"
@staticmethod
async def label_the_image(image_path: str, file_name: str, user_recommendation: str):
# print(image_name)
# image_path = LabelImage.path_creator(image_name)
image = cv2.imread(image_path)
size = 50
sp = SimplePreprocessor(size, size)
iap = ImageToArrayPreprocessor()
sdl = SimpleDatasetLoader(preprocessors=[sp, iap])
(data, labels) = sdl.single_load(image_path)
data = data.astype("float") / 255.0
model = load_model(LabelImage.Model_Path)
predict = model.predict(data, batch_size=size).argmax(axis=1)[0]
cv2.putText(image, "Label: {}".format(ClassLabels[predict]),
(10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
cv2.imwrite(Tools.get_predicted_file_name(file_name), image)
is_correct: bool = True if user_recommendation == ClassLabels[predict] else False
file_id = DataBase.add_new_uploaded_file(
UploadCollectionDataModel(file_name=file_name, system_predict=ClassLabels[predict],
user_recommend=user_recommendation, is_correct=is_correct))
return ClassLabels[predict], file_id
class Tools:
@staticmethod
def get_secure_file_name(file_name: str):
return f"{datetime.now().timestamp()}.{file_name.split('.')[-1]}"
@staticmethod
def get_predicted_file_name(file_name: str):
return f"./Files/predicted_{file_name}"
@staticmethod
def pagination(page=1):
PAGE_SIZE = 15
x = page - 1
skip = PAGE_SIZE * x
return skip, PAGE_SIZE
@staticmethod
def mongo_id_fix(data: dict):
data["_id"] = str(data["_id"])
return data
async def valid_content_length(content_length: int = Header(..., lt=80_000)):
return content_length
@app.post("/upload/x-ray")
async def create_upload_file(
file: UploadFile = File(...),
user_recommendation: str = None
):
real_file_size = 0
temp: IO = NamedTemporaryFile(delete=False)
secure_file_name = Tools.get_secure_file_name(file.filename)
for chunk in file.file:
real_file_size += len(chunk)
# if real_file_size > file_size:
# raise HTTPException(
# status_code=status.HTTP_413_REQUEST_ENTITY_TOO_LARGE, detail="Too large"
# )
temp.write(chunk)
temp.close()
image_path = f"./Files/{secure_file_name}"
shutil.move(temp.name, image_path)
if file.file.__sizeof__() > 4e+6:
raise HTTPException(detail="too large, file should be max 5 MB", status_code=405)
result, file_id = await LabelImage.label_the_image(image_path, secure_file_name, user_recommendation)
return {"predict": result, "file_id": file_id}
templates = Jinja2Templates(directory="templates")
@app.get("/")
async def index(request: Request):
return templates.TemplateResponse("index.html", {"request": request})
@app.get("/{name}")
async def read_item(request: Request, name: str = None):
if name is not None:
return FileResponse(path=f'./Templates/{name}')
return templates.TemplateResponse("index.html", {"request": request})
@app.get("/get_last_uploads_result")
def get_last_result():
temp = uploads_collection.aggregate([
{
"$group": {
"_id": "$_id",
"total_correct_predict": {
"$sum": {"$cond": ["$is_correct", 1, 0]}
},
"total_wrong_predict": {
"$sum": {"$cond": ["$is_correct", 0, 1]}
},
"total_uploaded": {"$sum": 1}
}
}
])
resp_2 = [item for item in temp][0]
resp_2.pop("_id")
resp_2["train_accuracy"] = "99%"
resp_2["test_accuracy"] = '91%'
resp_2["model_in_use"] = 'model_keras_215.hdf5'
return {
"stats": resp_2
}
@app.get("/github/stats/{name}.svg")
def get_stats_icon(name: str):
valid = ["total_correct_predict",
'total_wrong_predict',
"total_uploaded",
"test_accuracy",
"train_accuracy",
"model_in_use"]
if name not in valid:
raise HTTPException(detail="not found", status_code=404)
result = get_last_result()['stats'][name]
s = badge(left_text=name.replace('_', ' '), right_text=str(result),
right_color='green' if name != 'total_wrong_predict' else 'red')
return Response(content=s, media_type='image/svg+xml', status_code=200)
if __name__ == '__main__':
run(app)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.