max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
tests/test_check_types.py | oliel/python-ovirt-engine-sdk4 | 3 | 17200 | <gh_stars>1-10
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import ovirtsdk4.services as services
import ovirtsdk4.types as types
import unittest
from nose.tools import (
assert_in,
assert_raises,
)
from .server import TestServer
class CheckTypesTest(unittest.TestCase):
def test_service_type_error(self):
"""
Test that calling a method with multiple wrong parameter types
generates an informative exception.
"""
vm_service = services.VmService(None, None)
with assert_raises(TypeError) as context:
vm_service.start(
use_cloud_init='true',
vm=types.Disk(),
)
message = str(context.exception)
assert_in(
"The 'use_cloud_init' parameter should be of type 'bool', "
"but it is of type 'str'",
message
)
assert_in(
"The 'vm' parameter should be of type 'Vm', but it is of "
"type 'Disk'",
message
)
def test_locator_type_error(self):
"""
Test that calling a service locator with a wrong parameter type
generates an informative exception.
"""
vms_service = services.VmsService(None, None)
with assert_raises(TypeError) as context:
vms_service.vm_service(types.Vm())
message = str(context.exception)
assert_in(
"The 'id' parameter should be of type 'str', but it is of "
"type 'Vm'.",
message
)
| 2.484375 | 2 |
nsi/shell.py | NextStepInnovation/nsi-tools | 0 | 17201 | <filename>nsi/shell.py<gh_stars>0
import os
import io
import sys
import subprocess
import shlex
import logging
from threading import Timer
from typing import Callable, Any, List
from pathlib import Path # noqa: for doctest
import tempfile # noqa: for doctest
from .toolz import (
merge, map, pipe, curry, do, cprint
)
log = logging.getLogger(__name__)
log.addHandler(logging.NullHandler())
def start_timeout(command: List[str], process: subprocess.Popen,
timeout: int):
# https://www.blog.pythonlibrary.org/2016/05/17/python-101-how-to-timeout-a-subprocess/
def kill():
log.warning(f'Process ({command[0]}) timeout expired.')
return process.kill()
timer = Timer(timeout, kill)
timer.start()
return timer
@curry
def shell_iter(command, *, echo: bool = True,
echo_func: Callable[[Any], None] = cprint(file=sys.stderr,
end=''),
timeout: int = None, **popen_kw):
'''Execute a shell command, yield lines of output as they come
possibly echoing command output to a given echo_func, and finally
yields the status code of the process.
This will run the shell command, yielding each line of output as
it runs. When the process terminates, it will then yield the
remainder of output, then finally the integer status code. It can
also be terminated early via a timeout parameter. By default, the
command will also echo to stderr.
Args:
command (str): Shell command to execute. Tilde (~) and shell
variable completion provided
echo (bool): Should the output be echoed to echo_func in
addition to yielding lines of output?
echo_func (Callable[[Any], None]): Function to use when echoing
output. **Be warned**, this function is called __for each
character__ of output. By default, this is `cprint(end='')`
(i.e. print with end='')
timeout (int): If set, the process will be killed after this
many seconds (kill -9).
Returns: generator of the form
*output_lines, status_code = shell_iter(...)
where output_lines is a sequence of strings of output and
status_code is an integer status code
Examples:
>>> with tempfile.TemporaryDirectory() as tempdir:
... root = Path(tempdir)
... _ = Path(root, 'a.txt').write_text('')
... _ = Path(root, 'b.txt').write_text('')
... # FYI, this echos to stderr, which doctests won't capture
... *lines, status = shell_iter(f'ls {root}')
>>> lines
['a.txt', 'b.txt']
>>> status
0
>>> with tempfile.TemporaryDirectory() as tempdir:
... root = Path(tempdir)
... _ = Path(root, 'c.txt').write_text('')
... _ = Path(root, 'd.txt').write_text('')
... *lines, _ = shell_iter(f'ls {root}', echo=False)
>>> lines
['c.txt', 'd.txt']
>>> *lines, status = shell_iter(
... f'sleep 5', echo=False, timeout=0.01
... )
>>> lines
[]
>>> status
-9
'''
popen_kw = merge({
'stdout': subprocess.PIPE,
'stderr': subprocess.STDOUT,
}, popen_kw)
command_split = pipe(
shlex.split(command),
map(os.path.expanduser),
map(os.path.expandvars),
tuple,
)
process = subprocess.Popen(command_split, **popen_kw)
timer = None
if timeout:
timer = start_timeout(command_split, process, timeout)
def process_running():
return process.poll() is None
line = ''
while process_running():
char = process.stdout.read(1).decode('utf-8', errors='ignore')
if char:
echo_func(char) if echo else ''
if char == '\n':
yield line
line = ''
else:
line += char
if timer:
timer.cancel()
rest = process.stdout.read().decode('utf-8', errors='ignore')
for char in rest:
echo_func(char) if echo else ''
if char == '\n':
yield line
line = ''
else:
line += char
if line:
echo_func(char) if echo else ''
yield line
yield process.poll()
@curry
def shell(command, **kw):
'''Execute a shell command and return status code as an int and
command output as a string, possibly echoing command output to a
given echo_func.
Args:
command (str): Shell command to execute. Tilde (~) and shell
variable completion provided
echo (bool): Should the output be echoed to echo_func in
addition to yielding lines of output?
echo_func (Callable[[Any], None]): Function to use when echoing
output. **Be warned**, this funciton is called __for each
character__ of output. By default, this is `cprint(end='')`
(i.e. print with end='')
timeout (int): If set, the process will be killed after this
many seconds (kill -9).
Examples:
>>> with tempfile.TemporaryDirectory() as tempdir:
... root = Path(tempdir)
... _ = Path(root, 'a.txt').write_text('')
... _ = Path(root, 'b.txt').write_text('')
... # FYI, this echos to stderr, which doctests won't capture
... status, output = shell(f'ls {root}')
>>> output == "a.txt\\nb.txt"
True
>>> status
0
>>> with tempfile.TemporaryDirectory() as tempdir:
... root = Path(tempdir)
... _ = Path(root, 'c.txt').write_text('')
... _ = Path(root, 'd.txt').write_text('')
... _, output = shell(f'ls {root}', echo=False)
>>> output == 'c.txt\\nd.txt'
True
>>> status, output = shell(
... f'sleep 5', echo=False, timeout=0.01
... )
>>> output == ""
True
>>> status
-9
'''
*lines, status = shell_iter(command, **kw)
return status, '\n'.join(lines)
@curry
def getoutput(command, **kw):
status, content = shell(command, **kw)
return content
@curry
def shell_pipe(command, stdin, *, timeout: int = None, **popen_kw):
'''Execute a shell command with stdin content and return command
output as a string.
Args:
command (str): Shell command to execute. Tilde (~) and shell
variable completion provided
stdin (str): String content to provide to process stdin
timeout (int): If set, the process will be killed after this
many seconds (kill -9).
Examples:
>>> with tempfile.TemporaryDirectory() as tempdir:
... root = Path(tempdir)
... _ = Path(root, 'a.txt').write_text('')
... _ = Path(root, 'b.txt').write_text('')
... _ = Path(root, 'ab.txt').write_text('')
... output = pipe(
... getoutput(f'ls {root}'),
... shell_pipe('grep a')
... )
>>> sorted(output.strip().split()) == ["a.txt", "ab.txt"]
True
'''
popen_kw = merge({
'stdout': subprocess.PIPE,
'stderr': subprocess.PIPE,
'stdin': subprocess.PIPE,
}, popen_kw)
command_split = pipe(
shlex.split(command),
map(os.path.expanduser),
map(os.path.expandvars),
tuple,
)
process = subprocess.Popen(command_split, **popen_kw)
timer = None
if timeout:
timer = start_timeout()
stdout, stderr = process.communicate(
stdin.encode('utf-8', errors='ignore')
)
if timer:
timer.cancel()
return stdout.decode('utf-8', errors='ignore')
| 2.484375 | 2 |
ICLR_2022/Cubic_10D/PIVEN/DataGen.py | streeve/PI3NN | 11 | 17202 | """
Data creation:
Load the data, normalize it, and split into train and test.
"""
'''
Added the capability of loading pre-separated UCI train/test data
function LoadData_Splitted_UCI
'''
import numpy as np
import os
import pandas as pd
import tensorflow as tf
DATA_PATH = "../UCI_Datasets"
class DataGenerator:
def __init__(self, dataset_name):
self.dataset_name = dataset_name
# used for metrics calculation
self.scale_c = None # std
self.shift_c = None # mean
def create_cubic_10D_data(self):
Npar = 10
Ntrain = 5000
Nout = 1
Ntest = 1000
# x_train = tf.random.uniform(shape=(Ntrain, Npar))*4.0-2.0
x_train = tf.random.normal(shape=(Ntrain, Npar))
y_train = x_train ** 3
y_train = tf.reduce_sum(y_train, axis=1, keepdims=True)/10.0 + 1.0*tf.random.normal([x_train.shape[0], 1])
# x_test = tf.random.uniform(shape=(Ntest, Npar))
# x_test[:,1] = x_test[:,1] + 4.0
# x_test = np.random.uniform(size=(Ntest,Npar))
# x_test[:,1] = x_test[:,1] + 4.0
x_test = np.random.normal(size=(Ntest,Npar)) + 2.0
x_test = tf.convert_to_tensor(x_test, dtype=tf.float32)
scale_c = np.std(x_test.eval(session=tf.compat.v1.Session()))
y_test = x_test ** 3
y_test = tf.reduce_sum(y_test, axis=1, keepdims=True)/10.0 + 1.0*tf.random.normal([x_test.shape[0], 1])
### to Numpy array in TF1 compat environment using TF2
x_train = x_train.eval(session=tf.compat.v1.Session())
y_train = y_train.eval(session=tf.compat.v1.Session())
x_test = x_test.eval(session=tf.compat.v1.Session())
y_test = y_test.eval(session=tf.compat.v1.Session())
### normalization
x_mean = np.mean(x_train, axis=0)
x_std = np.std(x_train,axis=0)
xtrain_normal = (x_train - x_mean)/x_std
y_mean = np.mean(y_train,axis=0)
y_std = np.std(y_train,axis=0)
ytrain_normal = (y_train - y_mean)/y_std
xvalid_normal = (x_test - x_mean) / x_std
yvalid_normal = (y_test - y_mean) / y_std
X_train = xtrain_normal
y_train = ytrain_normal
X_val = xvalid_normal
y_val = yvalid_normal
self.scale_c = scale_c
return X_train, y_train, X_val, y_val
def create_data(self, seed_in=5, train_prop=0.9):
"""
@param seed_in: seed for numpy random seed
@param train_prop: train proportion
"""
np.random.seed(seed_in)
# load UCI data
dataset = self.dataset_name
dataset_path = f"{DATA_PATH}/{dataset}.txt"
if dataset == 'YearPredictionMSD':
data = np.loadtxt(dataset_path, delimiter=',')
elif dataset == 'naval':
data = np.loadtxt(dataset_path)
data = data[:, :-1] # have 2 y as GT, ignore last
else:
data = np.loadtxt(dataset_path)
# save normalization constants (used for calculating results)
if dataset == 'YearPredictionMSD':
scale_c = np.std(data[:, 0]) # in YearPredictionMSD, label's index = 0
shift_c = np.mean(data[:, 0])
else:
scale_c = np.std(data[:, -1])
shift_c = np.mean(data[:, -1])
# normalize data
for i in range(data.shape[1]):
sdev_norm = np.std(data[:, i])
sdev_norm = 0.001 if sdev_norm == 0 else sdev_norm # avoid zero variance features
data[:, i] = (data[:, i] - np.mean(data[:, i])) / sdev_norm
# split train test
if dataset == 'YearPredictionMSD':
# train: first 463,715 examples
# test: last 51,630 examples
train = data[:463715, :]
test = data[-51630:, :]
else:
# split into train/test in random
perm = np.random.permutation(data.shape[0])
train_size = int(round(train_prop * data.shape[0]))
train = data[perm[:train_size], :]
test = data[perm[train_size:], :]
# split to target and data
if dataset == 'YearPredictionMSD':
y_train = train[:, 0].reshape(-1, 1)
X_train = train[:, 1:]
y_val = test[:, 0].reshape(-1, 1)
X_val = test[:, 1:]
else:
y_train = train[:, -1].reshape(-1, 1)
X_train = train[:, :-1]
y_val = test[:, -1].reshape(-1, 1)
X_val = test[:, :-1]
self.scale_c = scale_c
self.shift_c = shift_c
return X_train, y_train, X_val, y_val
def LoadData_Splitted_UCI(self, loadCSVName, original_data_path, splitted_data_path, split_seed, **kwargs):
## (1) Load the original data for the normalization purpose
# current_dir = os.path.dirname(__file__)
# uci_dir = os.path.join(current_dir, 'UCI_datasets')
uci_dir = original_data_path
if loadCSVName == 'boston':
data = np.loadtxt(os.path.join(uci_dir, 'boston-housing/boston_housing.txt'))
if loadCSVName == 'concrete':
data_df = pd.read_excel(os.path.join(uci_dir, 'concrete/Concrete_Data.xls'))
data = data_df.values
if loadCSVName == 'energy':
data_df = pd.read_excel(os.path.join(uci_dir, 'energy-efficiency/ENB2012_data.xlsx'), engine='openpyxl')
data_df = data_df.dropna(how='all', axis='columns')
data_df = data_df.dropna(how='all', axis='rows')
data = data_df.values
if loadCSVName == 'kin8nm':
data_df = pd.read_csv(os.path.join(uci_dir, 'kin8nm/dataset_2175_kin8nm.csv'), sep=',')
data = data_df.values
if loadCSVName == 'naval':
data = np.loadtxt(os.path.join(uci_dir, 'naval/data.txt'))
if loadCSVName == 'power':
data_df = pd.read_excel(os.path.join(uci_dir, 'power-plant/Folds5x2_pp.xlsx'), engine='openpyxl')
data = data_df.values
if loadCSVName == 'protein':
data_df = pd.read_csv(os.path.join(uci_dir, 'protein/CASP.csv'), sep=',')
# print(data_df)
'''Move the Y data (originally located at the first column) to last column in order to keep consistency
with the normalization process'''
col_names = data_df.columns.tolist()
col_names.append(col_names[0])
del col_names[col_names.index(col_names[0])]
# print(col_names)
data_df = data_df[col_names]
# print(data_df)
data = data_df.values
if loadCSVName == 'wine':
data_df = pd.read_csv(os.path.join(uci_dir, 'wine-quality/winequality-red.csv'), sep=';')
data = data_df.values
if loadCSVName == 'yacht':
data = np.loadtxt(os.path.join(uci_dir, 'yacht/yacht_hydrodynamics.data'))
if loadCSVName == 'MSD':
with open(os.path.join(uci_dir, 'song/YearPredictionMSD.npy'), 'rb') as f:
data = np.load(f)
## (2) Load the pre-splitted train/test data
##
xyTrain_load = np.loadtxt(splitted_data_path+'xyTrain_'+loadCSVName+'_seed_'+str(split_seed)+'.csv', delimiter=',')
xyTest_load = np.loadtxt(splitted_data_path+'xyTest_'+loadCSVName+'_seed_'+str(split_seed)+'.csv', delimiter=',')
xyTrain_load = xyTrain_load.astype(np.float32)
# xyValid_load = xyValid_load.astype(np.float32)
xyTest_load = xyTest_load.astype(np.float32)
# original normalization functions
# work out normalisation constants (need when unnormalising later)
scale_c = np.std(data[:, -1])
shift_c = np.mean(data[:, -1])
# normalise data
num_cols = xyTrain_load.shape[1]
print('num cols: {}'.format(num_cols))
for i in range(0, num_cols):
# get the sdev_norm from original data
sdev_norm = np.std(data[:, i])
sdev_norm = 0.001 if sdev_norm == 0 else sdev_norm
# apply on the pre-splitted data
xyTrain_load[:, i] = (xyTrain_load[:, i] - np.mean(data[:, i]) )/sdev_norm
xyTest_load[:, i] = (xyTest_load[:, i] - np.mean(data[:, i]) )/sdev_norm
# xyValid_load[:, i] = (xyValid_load[:, i] - np.mean(data[:, i]) )/sdev_norm
if loadCSVName == 'energy' or loadCSVName == 'naval':
xTrain = xyTrain_load[:, :-2] ## all columns except last two columns as inputs
yTrain = xyTrain_load[:, -1] ## last column as output
xTest = xyTest_load[:, :-2]
yTest = xyTest_load[:, -1]
else:
xTrain = xyTrain_load[:, :-1]
yTrain = xyTrain_load[:, -1]
xTest = xyTest_load[:, :-1]
yTest = xyTest_load[:, -1]
self.scale_c = scale_c
self.shift_c = shift_c
return xTrain, yTrain, xTest, yTest
| 2.921875 | 3 |
after/config.py | mauvilsa/2021-config | 5 | 17203 | <reponame>mauvilsa/2021-config
from dataclasses import dataclass
@dataclass
class Paths:
log: str
data: str
@dataclass
class Files:
train_data: str
train_labels: str
test_data: str
test_labels: str
@dataclass
class Params:
epoch_count: int
lr: float
batch_size: int
@dataclass
class MNISTConfig:
paths: Paths
files: Files
params: Params
| 1.789063 | 2 |
lab7/lab7.py | cudaczek/nlp-labs-2020 | 0 | 17204 | <reponame>cudaczek/nlp-labs-2020<filename>lab7/lab7.py
import pprint
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import manifold
from gensim.models import KeyedVectors
# Download polish word embeddings for word2vec github/Google drive:
# https://github.com/sdadas/polish-nlp-resources
# with 100 dimensionality
word2vec_100 = KeyedVectors.load("word2vec/word2vec_100_3_polish.bin")
# with 300 dimensionality
word2vec_300 = KeyedVectors.load("word2vec_300_3_polish/word2vec_300_3_polish.bin")
# Using the downloaded models find the most similar words for the following expressions...
# And display 5 most similar words according to each model:
# kpk
# szkoda
# wypadek
# kolizja
# nieszczęście
# rozwód
words = ['kpk', 'szkoda', 'wypadek', 'kolizja', 'nieszczęście', 'rozwód']
def get_most_similar_words(expression):
print(f"--------- Most similar words for {expression} ---------")
print("word2vec_100:")
result = word2vec_100.most_similar(positive=[expression])
pprint.pprint(result[:5])
print("word2vec_300:")
result = word2vec_300.most_similar(positive=[expression])
pprint.pprint(result[:5])
print()
for word in words:
get_most_similar_words(word)
# --------- Most similar words for kpk ---------
# word2vec_100:
# [('kilopond', 0.6665806770324707),
# ('kpzs', 0.6363496780395508),
# ('kpu', 0.6300562024116516),
# ('sownarkomu', 0.6254925727844238),
# ('wcik', 0.6224358677864075)]
# word2vec_300:
# [('ksh', 0.5774794220924377),
# ('cywilnego', 0.5498510599136353),
# ('postępowania', 0.5285828113555908),
# ('kilopond', 0.5151568055152893),
# ('kkkw', 0.48344212770462036)]
#
# --------- Most similar words for szkoda ---------
# word2vec_100:
# [('krzywda', 0.6817898750305176),
# ('pożytek', 0.6121943593025208),
# ('strata', 0.5968126654624939),
# ('ryzyko', 0.5745570659637451),
# ('uszczerbek', 0.5639551877975464)]
# word2vec_300:
# [('uszczerbek', 0.6027276515960693),
# ('krzywda', 0.5920778512954712),
# ('strata', 0.550269365310669),
# ('despekt', 0.5382484197616577),
# ('pożytek', 0.531347393989563)]
#
# --------- Most similar words for wypadek ---------
# word2vec_100:
# [('przypadek', 0.7544811964035034),
# ('okoliczności', 0.7268072366714478),
# ('padku', 0.6788284182548523),
# ('incydent', 0.6418948173522949),
# ('zdarzenie', 0.6114422082901001)]
# word2vec_300:
# [('przypadek', 0.7066895961761475),
# ('okoliczności', 0.6121077537536621),
# ('padku', 0.6056742072105408),
# ('padki', 0.5596078634262085),
# ('incydent', 0.5496981143951416)]
#
# --------- Most similar words for kolizja ---------
# word2vec_100:
# [('zderzenie', 0.8431548476219177),
# ('awaria', 0.7090569734573364),
# ('kraksa', 0.6777161359786987),
# ('turbulencja', 0.6613468527793884),
# ('poślizg', 0.6391660571098328)]
# word2vec_300:
# [('zderzenie', 0.7603178024291992),
# ('awaria', 0.611009955406189),
# ('kraksa', 0.5939033031463623),
# ('turbulencja', 0.5664489269256592),
# ('poślizg', 0.5569967031478882)]
#
# --------- Most similar words for nieszczęście ---------
# word2vec_100:
# [('niebezpieczeństwo', 0.7519958019256592),
# ('cierpienia', 0.7408335208892822),
# ('strapienie', 0.7345459461212158),
# ('cierpienie', 0.7262567281723022),
# ('utrapienie', 0.7251379489898682)]
# word2vec_300:
# [('utrapienie', 0.6610732674598694),
# ('cierpienia', 0.6526124477386475),
# ('niedola', 0.6478177309036255),
# ('strapienie', 0.6300181150436401),
# ('cierpienie', 0.6248573064804077)]
#
# --------- Most similar words for rozwód ---------
# word2vec_100:
# [('małżeństwo', 0.7646843194961548),
# ('separacja', 0.7547168135643005),
# ('adopcja', 0.7333694696426392),
# ('ślub', 0.7324203848838806),
# ('unieważnienie', 0.7096400856971741)]
# word2vec_300:
# [('separacja', 0.7053208351135254),
# ('małżeństwo', 0.6689504384994507),
# ('ślub', 0.6553219556808472),
# ('rozwodowy', 0.614338219165802),
# ('unieważnienie', 0.6127183437347412)]
# Find the most similar words for the following expressions (average the representations for each word):
# sąd najwyższy
# trybunał konstytucyjny
# szkoda majątkowy
# kodeks cywilny
# sąd rejonowy
# Display 7 most similar words according to each model.
expressions = ['sąd najwyższy', 'trybunał konstytucyjny', 'szkoda majątkowy', 'kodeks cywilny', 'sąd rejonowy']
def get_most_similiar_words_for_expression_avg(expressions):
for expr in expressions:
print(f"--------- Most similar words for {expr} ---------")
print("word2vec_100:")
word_1, word_2 = tuple(expr.split())
result = np.array([np.mean(k) for k in zip(np.array(word2vec_100[word_1]), np.array(word2vec_100[word_2]))])
pprint.pprint(word2vec_100.similar_by_vector(result)[:7])
print("word2vec_300:")
result = np.array([np.mean(k) for k in zip(np.array(word2vec_300[word_1]), np.array(word2vec_300[word_2]))])
pprint.pprint(word2vec_300.similar_by_vector(result)[:7])
print()
get_most_similiar_words_for_expression_avg(expressions)
# --------- Most similar words for sąd najwyższy ---------
# word2vec_100:
# [('sąd', 0.8644266128540039),
# ('trybunał', 0.7672435641288757),
# ('najwyższy', 0.7527138590812683),
# ('trybunat', 0.6843459010124207),
# ('sędzia', 0.6718415021896362),
# ('areopag', 0.6571060419082642),
# ('sprawiedliwość', 0.6562486886978149)]
# word2vec_300:
# [('sąd', 0.8261206150054932),
# ('trybunał', 0.711520791053772),
# ('najwyższy', 0.7068409323692322),
# ('sędzia', 0.6023203730583191),
# ('sądowy', 0.5670486688613892),
# ('trybunat', 0.5525928735733032),
# ('sprawiedliwość', 0.5319530367851257)]
#
# --------- Most similar words for trybunał konstytucyjny ---------
# word2vec_100:
# [('trybunał', 0.9073251485824585),
# ('konstytucyjny', 0.7998723387718201),
# ('sąd', 0.7972990274429321),
# ('bunał', 0.7729247808456421),
# ('senat', 0.7585273385047913),
# ('bunału', 0.7441976070404053),
# ('trybunat', 0.7347140908241272)]
# word2vec_300:
# [('trybunał', 0.8845913410186768),
# ('konstytucyjny', 0.7739969491958618),
# ('sąd', 0.7300779819488525),
# ('trybunat', 0.6758428812026978),
# ('senat', 0.6632090210914612),
# ('parlament', 0.6614581346511841),
# ('bunału', 0.6404117941856384)]
#
# --------- Most similar words for szkoda majątkowy ---------
# word2vec_100:
# [('szkoda', 0.8172438144683838),
# ('majątkowy', 0.7424530386924744),
# ('krzywda', 0.6498408317565918),
# ('świadczenie', 0.6419471502304077),
# ('odszkodowanie', 0.6392182111740112),
# ('dochód', 0.637932538986206),
# ('wydatek', 0.6325603127479553)]
# word2vec_300:
# [('szkoda', 0.7971925735473633),
# ('majątkowy', 0.7278684973716736),
# ('uszczerbek', 0.5841633081436157),
# ('korzyść', 0.5474051237106323),
# ('krzywda', 0.5431190729141235),
# ('majątek', 0.525060772895813),
# ('strata', 0.5228629112243652)]
#
# --------- Most similar words for kodeks cywilny ---------
# word2vec_100:
# [('kodeks', 0.8756389617919922),
# ('cywilny', 0.8532464504241943),
# ('pasztunwali', 0.6438998579978943),
# ('deksu', 0.6374959945678711),
# ('teodozjańskim', 0.6283917427062988),
# ('pozakodeksowy', 0.6153194904327393),
# ('sądowo', 0.6136723160743713)]
# word2vec_300:
# [('kodeks', 0.8212110996246338),
# ('cywilny', 0.7886406779289246),
# ('amiatyński', 0.5660314559936523),
# ('cywilnego', 0.5531740188598633),
# ('deksu', 0.5472918748855591),
# ('isps', 0.5369160175323486),
# ('jōei', 0.5361183881759644)]
#
# --------- Most similar words for sąd rejonowy ---------
# word2vec_100:
# [('sąd', 0.8773891925811768),
# ('prokuratura', 0.8396657705307007),
# ('rejonowy', 0.7694871425628662),
# ('trybunał', 0.755321204662323),
# ('sądowy', 0.7153753042221069),
# ('magistrat', 0.7151126861572266),
# ('prokurator', 0.7081375122070312)]
# word2vec_300:
# [('sąd', 0.8507211208343506),
# ('rejonowy', 0.7344856262207031),
# ('prokuratura', 0.711697518825531),
# ('trybunał', 0.6748420596122742),
# ('sądowy', 0.6426382064819336),
# ('okręgowy', 0.6349465847015381),
# ('apelacyjny', 0.599929690361023)]
# Find the result of the following equations (5 top results, both models):
# sąd + konstytucja - kpk
# pasażer + kobieta - mężczyzna
# pilot + kobieta - mężczyzna
# lekarz + kobieta - mężczyzna
# nauczycielka + mężczyzna - kobieta
# przedszkolanka + mężczyzna - 'kobieta
# samochód + rzeka - droga
equations = [(['sąd', 'konstytucja'], ['kpk']),
(['pasażer', 'kobieta'], ['mężczyzna']),
(['pilot', 'kobieta'], ['mężczyzna']),
(['lekarz', 'kobieta'], ['mężczyzna']),
(['nauczycielka', 'mężczyzna'], ['kobieta']),
(['przedszkolanka', 'mężczyzna'], ['kobieta']),
(['samochód', 'rzeka'], ['droga'])]
def get_result_of_equation(positive, negative):
print(f"--------- Result for + {positive} and - {negative} ---------")
print("word2vec_100:")
result = word2vec_100.most_similar(positive=positive, negative=negative)
pprint.pprint(result[:5])
print("word2vec_300:")
result = word2vec_300.most_similar(positive=positive, negative=negative)
pprint.pprint(result[:5])
print()
for equa in equations:
get_result_of_equation(equa[0], equa[1])
# --------- Result for + ['sąd', 'konstytucja'] and - ['kpk'] ---------
# word2vec_100:
# [('trybunał', 0.6436409950256348),
# ('ustawa', 0.6028786897659302),
# ('elekcja', 0.5823959112167358),
# ('deklaracja', 0.5771891474723816),
# ('dekret', 0.5759621262550354)]
# word2vec_300:
# [('trybunał', 0.5860734581947327),
# ('senat', 0.5112544298171997),
# ('ustawa', 0.5023636817932129),
# ('dekret', 0.48704710602760315),
# ('władza', 0.4868926703929901)]
#
# --------- Result for + ['pasażer', 'kobieta'] and - ['mężczyzna'] ---------
# word2vec_100:
# [('pasażerka', 0.7234811186790466),
# ('stewardessa', 0.6305270195007324),
# ('stewardesa', 0.6282645463943481),
# ('taksówka', 0.619726300239563),
# ('podróżny', 0.614517092704773)]
# word2vec_300:
# [('pasażerka', 0.6741673946380615),
# ('stewardesa', 0.5810248255729675),
# ('stewardessa', 0.5653151273727417),
# ('podróżny', 0.5060371160507202),
# ('pasażerski', 0.4896503686904907)]
#
# --------- Result for + ['pilot', 'kobieta'] and - ['mężczyzna'] ---------
# word2vec_100:
# [('nawigator', 0.6925703287124634),
# ('oblatywacz', 0.6686224937438965),
# ('lotnik', 0.6569937467575073),
# ('pilotka', 0.6518791913986206),
# ('awionetka', 0.6428645849227905)]
# word2vec_300:
# [('pilotka', 0.6108255386352539),
# ('lotnik', 0.6020804047584534),
# ('stewardesa', 0.5943204760551453),
# ('nawigator', 0.5849766731262207),
# ('oblatywacz', 0.5674178600311279)]
#
# --------- Result for + ['lekarz', 'kobieta'] and - ['mężczyzna'] ---------
# word2vec_100:
# [('lekarka', 0.7690489292144775),
# ('ginekolog', 0.7575511336326599),
# ('pediatra', 0.7478542923927307),
# ('psychiatra', 0.732271671295166),
# ('położna', 0.7268943786621094)]
# word2vec_300:
# [('lekarka', 0.7388788461685181),
# ('pielęgniarka', 0.6719920635223389),
# ('ginekolog', 0.658279299736023),
# ('psychiatra', 0.6389409303665161),
# ('chirurg', 0.6305986642837524)]
#
# --------- Result for + ['nauczycielka', 'mężczyzna'] and - ['kobieta'] ---------
# word2vec_100:
# [('uczennica', 0.7441667318344116),
# ('studentka', 0.7274973392486572),
# ('nauczyciel', 0.7176114916801453),
# ('wychowawczyni', 0.7153530120849609),
# ('koleżanka', 0.678418755531311)]
# word2vec_300:
# [('nauczyciel', 0.6561620235443115),
# ('wychowawczyni', 0.6211140155792236),
# ('uczennica', 0.6142012476921082),
# ('koleżanka', 0.5501158237457275),
# ('przedszkolanka', 0.5497692823410034)]
#
# --------- Result for + ['przedszkolanka', 'mężczyzna'] and - ['kobieta'] ---------
# word2vec_100:
# [('stażysta', 0.6987776756286621),
# ('wychowawczyni', 0.6618361473083496),
# ('kreślarka', 0.6590923070907593),
# ('pielęgniarz', 0.6492814421653748),
# ('siedmiolatek', 0.6483469009399414)]
# word2vec_300:
# [('stażysta', 0.5117638111114502),
# ('pierwszoklasista', 0.49398648738861084),
# ('wychowawczyni', 0.49037522077560425),
# ('praktykant', 0.48884207010269165),
# ('pielęgniarz', 0.4795262813568115)]
#
# --------- Result for + ['samochód', 'rzeka'] and - ['droga'] ---------
# word2vec_100:
# [('jeep', 0.6142987608909607),
# ('buick', 0.5962571501731873),
# ('dżip', 0.5938510894775391),
# ('ponton', 0.580719530582428),
# ('landrower', 0.5799552202224731)]
# word2vec_300:
# [('dżip', 0.5567235946655273),
# ('jeep', 0.5533617734909058),
# ('auto', 0.5478508472442627),
# ('ciężarówka', 0.5461742281913757),
# ('wóz', 0.5204571485519409)]
# Using the t-SNE algorithm compute the projection of the random 1000 words with the following words highlighted (both models):
# szkoda
# strata
# uszczerbek
# krzywda
# niesprawiedliwość
# nieszczęście
# kobieta
# mężczyzna
# pasażer
# pasażerka
# student
# studentka
# lekarz
# lekarka
words = np.array(['szkoda', 'strata', 'uszczerbek', 'krzywda', 'niesprawiedliwość', 'nieszczęście', 'kobieta',
'mężczyzna', 'pasażer', 'pasażerka', 'student', 'studentka', 'lekarz', 'lekarka'])
def scatter_points(hue, point_labels, principal_components):
x = np.transpose(principal_components)[0]
y = np.transpose(principal_components)[1]
plt.scatter(x, y, c=hue, s=100, marker='o', alpha=0.2)
for i, text in enumerate(point_labels):
plt.annotate(text, (x[i], y[i]), ha="center", size=8)
def plot_with_tsne(wv, words, perplexity=30, learning_rate=100.0, iterations=1000, filename='slowa300'):
random_words = np.random.choice(list(wv.wv.vocab), 1000)
words = np.concatenate((words, random_words))
vecs = [wv[word] for word in words]
tsne = manifold.TSNE(2, perplexity=perplexity, learning_rate=learning_rate, n_iter=iterations)
results = tsne.fit_transform(vecs)
hue = [0 for _ in range(14)] + [1 for _ in range(1000)]
plt.figure(figsize=(30, 30))
scatter_points(hue, words, results)
plt.savefig(filename + '.png')
plt.show()
plt.clf()
wv = word2vec_300
plot_with_tsne(wv, words)
wv = word2vec_100
plot_with_tsne(wv, words)
| 3.09375 | 3 |
Nimbus-Controller/sqs-fastreader.py | paulfdoyle/NIMBUS | 0 | 17205 | # This script adds a new message to a specific SQS queue
#
# Author - <NAME> 2013
#
#
#from __future__ import print_function
import sys
import Queue
import boto.sqs
import argparse
import socket
import datetime
import sys
import time
from boto.sqs.attributes import Attributes
parser = argparse.ArgumentParser()
parser.add_argument('queuearg',help='name of the sqs queue to use',metavar="myQueueName")
parser.add_argument('experiment',help='name of the experiment queue to use')
args = parser.parse_args()
from boto.sqs.message import Message
import threading
conn = boto.sqs.connect_to_region("us-east-1", aws_access_key_id='<KEY>', aws_secret_access_key='<KEY>')
sqs_queue = conn.get_queue(args.queuearg)
class Sender(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def run(self):
global sqs_queue,queue
name = args.experiment+str(queue.get())+"-"+args.queuearg+".csv"
f = open(name,'w')
while True:
try:
m = sqs_queue.get_messages(num_messages=1,attributes='SentTimestamp')
f.write(str(m[0].attributes)+","+str(m[0].get_body())+"\n")
sqs_queue.delete_message(m[0])
except:
if sqs_queue.count() < 1:
f.write(args.queuearg + " is empty\n")
return
queue = Queue.Queue(0)
threads = []
for n in xrange(40):
queue.put(n)
t = Sender()
t.start()
threads.append(t)
for t in threads:
t.join()
| 2.421875 | 2 |
dero/ml/results/reformat.py | whoopnip/dero | 0 | 17206 | from typing import Optional
import pandas as pd
from dero.ml.typing import ModelDict, AllModelResultsDict, DfDict
def model_dict_to_df(model_results: ModelDict, model_name: Optional[str] = None) -> pd.DataFrame:
df = pd.DataFrame(model_results).T
df.drop('score', inplace=True)
df['score'] = model_results['score']
if model_name is not None:
df['model'] = model_name
first_cols = ['model', 'score']
else:
first_cols = ['score']
other_cols = [col for col in df.columns if col not in first_cols]
return df[first_cols + other_cols]
def all_model_results_dict_to_df(results: AllModelResultsDict) -> pd.DataFrame:
df = pd.DataFrame()
for model_type, instance_list in results.items():
for instance in instance_list:
model_df = model_dict_to_df(instance, model_name=model_type)
df = df.append(model_df)
first_cols = ['model', 'score']
other_cols = [col for col in df.columns if col not in first_cols]
return df[first_cols + other_cols].sort_values('score', ascending=False)
def all_model_results_dict_to_model_df_dict(results: AllModelResultsDict) -> DfDict:
out_dict = {}
for model_type, instance_list in results.items():
model_df = pd.DataFrame()
for instance in instance_list:
model_instance_df = model_dict_to_df(instance, model_name=model_type)
model_df = model_df.append(model_instance_df)
out_dict[model_type] = model_df.sort_values('score', ascending=False)
return out_dict
| 2.546875 | 3 |
engine/view.py | amirgeva/py2d | 0 | 17207 | import pygame
from engine.utils import Rect
from engine.app import get_screen_size
# EXPORT
class View(object):
def __init__(self, rect=None):
if rect:
self.rect = rect
else:
res = get_screen_size()
self.rect = Rect(0,0,res[0],res[1])
def offset(self, d):
self.rect.move(d[0], d[1])
def get_position(self):
return self.rect.tl
def set_position(self, pos):
self.rect = Rect(pos.x, pos.y, pos.x+self.rect.width(), pos.y+self.rect.height())
def relative_position(self, pos):
return pos - self.rect.tl
def get_rect(self):
return Rect(self.rect)
| 2.9375 | 3 |
oldplugins/coin.py | sonicrules1234/sonicbot | 1 | 17208 | <reponame>sonicrules1234/sonicbot
import shelve, random
arguments = ["self", "info", "args", "world"]
minlevel = 2
helpstring = "coin <bet>"
def main(connection, info, args, world) :
"""Decides heads or tails based on the coinchance variable. Adds or removes appropriate amount of money"""
money = shelve.open("money-%s.db" % (connection.networkname), writeback=True)
if money.has_key(info["sender"]) :
bet = int(args[1])
if bet <= money[info["sender"]]["money"] and bet >= 1 :
answer = random.choice(money[info["sender"]]["coinchance"])
if answer :
money[info["sender"]]["money"] += bet
money.sync()
connection.msg(info["channel"], _("Congrats %(sender)s! You just won %(num)s dollars!") % dict(sender=info["sender"], num=args[1]))
else :
money[info["sender"]]["money"] -= bet
money.sync()
connection.msg(info["channel"], _("Sorry %(sender)s! You just lost %(num)s dollars!") % dict(sender=info["sender"], num=args[1]))
if money[info["sender"]]["money"] > money[info["sender"]]["maxmoney"] :
money[info["sender"]]["maxmoney"] = money[info["sender"]]["money"]
money.sync()
else : connection.msg(info["channel"], _("%(sender)s: You don't have enough money to do that!") % dict(sender=info["sender"]))
else : connection.msg(info["channel"], _("%(sender)s: You have not set up a money account. If you aren't already, please register with me. Then, say moneyreset. After that you should be able to use this command.") % dict(sender=info["sender"]))
| 3.109375 | 3 |
src/rbvfit/vfit_mcmc.py | manoranjan-s/rbvfit | 0 | 17209 | from __future__ import print_function
import emcee
from multiprocessing import Pool
import numpy as np
import corner
import matplotlib.pyplot as plt
import sys
import scipy.optimize as op
from rbvfit.rb_vfit import rb_veldiff as rb_veldiff
from rbvfit import rb_setline as rb
import pdb
def plot_model(wave_obs,fnorm,enorm,fit,model,outfile= False,xlim=[-600.,600.],verbose=False):
#This model only works if there are no nuissance paramteres
theta_prime=fit.best_theta
value1=fit.low_theta
value2=fit.high_theta
n_clump=model.nclump
n_clump_total=np.int(len(theta_prime)/3)
ntransition=model.ntransition
zabs=model.zabs
samples=fit.samples
model_mcmc=fit.model
wave_list=np.zeros( len(model.lambda_rest_original),)
# Use the input lambda rest list to plot correctly
for i in range(0,len(wave_list)):
s=rb.rb_setline(model.lambda_rest_original[i],'closest')
wave_list[i]=s['wave']
wave_rest=wave_obs/(1+zabs[0])
best_N = theta_prime[0:n_clump_total]
best_b = theta_prime[n_clump_total:2 * n_clump_total]
best_v = theta_prime[2 * n_clump_total:3 * n_clump_total]
low_N = value1[0:n_clump_total]
low_b = value1[n_clump_total:2 * n_clump_total]
low_v = value1[2 * n_clump_total:3 * n_clump_total]
high_N = value2[0:n_clump_total]
high_b = value2[n_clump_total:2 * n_clump_total]
high_v = value2[2 * n_clump_total:3 * n_clump_total]
#Now extracting individual fitted components
best_fit, f1 = model.model_fit(theta_prime, wave_obs)
fig, axs = plt.subplots(ntransition, sharex=True, sharey=False,figsize=(12,18 ),gridspec_kw={'hspace': 0})
BIGGER_SIZE = 18
plt.rc('font', size=BIGGER_SIZE) # controls default text sizes
plt.rc('axes', titlesize=BIGGER_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=BIGGER_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=BIGGER_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=BIGGER_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=BIGGER_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
index = np.random.randint(0, high=len(samples), size=100)
if ntransition == 1:
#When there are no nuissance parameter
#Now loop through each transition and plot them in velocity space
vel=rb_veldiff(wave_list[0],wave_rest)
axs.step(vel, fnorm, 'k-', linewidth=1.)
axs.step(vel, enorm, color='r', linewidth=1.)
# Plotting a random sample of outputs extracted from posterior dis
for ind in range(len(index)):
axs.plot(vel, model_mcmc(samples[index[ind], :], wave_obs), color="k", alpha=0.1)
axs.set_ylim([0, 1.6])
axs.set_xlim(xlim)
axs.plot(vel, best_fit, color='b', linewidth=3)
axs.plot([0., 0.], [-0.2, 2.5], 'k:', lw=0.5)
# plot individual components
for dex in range(0,np.shape(f1)[1]):
axs.plot(vel, f1[:, dex], 'g:', linewidth=3)
for iclump in range(0,n_clump):
axs.plot([best_v[iclump],best_v[iclump]],[1.05,1.15],'k--',lw=4)
text1=r'$logN \;= '+ np.str('%.2f' % best_N[iclump]) +'^{ + ' + np.str('%.2f' % (best_N[iclump]-low_N[iclump]))+'}'+ '_{ -' + np.str('%.2f' % (high_N[iclump]-best_N[iclump]))+'}$'
axs.text(best_v[iclump],1.2,text1,
fontsize=14,rotation=90, rotation_mode='anchor')
text2=r'$b ='+np.str('%.0f' % best_b[iclump]) +'^{ + ' + np.str('%.0f' % (best_b[iclump]-low_b[iclump]))+'}'+ '_{ -' + np.str('%.0f' % (high_b[iclump]-best_b[iclump]))+'}$'
axs.text(best_v[iclump]+30,1.2, text2,fontsize=14,rotation=90, rotation_mode='anchor')
else:
#Now loop through each transition and plot them in velocity space
for i in range(0,ntransition):
print(wave_list[i])
vel=rb_veldiff(wave_list[i],wave_rest)
axs[i].step(vel, fnorm, 'k-', linewidth=1.)
axs[i].step(vel, enorm, color='r', linewidth=1.)
#pdb.set_trace()
# Plotting a random sample of outputs extracted from posterior distribution
for ind in range(len(index)):
axs[i].plot(vel, model_mcmc(samples[index[ind], :], wave_obs), color="k", alpha=0.1)
axs[i].set_ylim([0, 1.6])
axs[i].set_xlim(xlim)
axs[i].plot(vel, best_fit, color='b', linewidth=3)
axs[i].plot([0., 0.], [-0.2, 2.5], 'k:', lw=0.5)
# plot individual components
for dex in range(0,np.shape(f1)[1]):
axs[i].plot(vel, f1[:, dex], 'g:', linewidth=3)
for iclump in range(0,n_clump):
axs[i].plot([best_v[iclump],best_v[iclump]],[1.05,1.15],'k--',lw=4)
if i ==0:
text1=r'$logN \;= '+ np.str('%.2f' % best_N[iclump]) +'^{ + ' + np.str('%.2f' % (best_N[iclump]-low_N[iclump]))+'}'+ '_{ -' + np.str('%.2f' % (high_N[iclump]-best_N[iclump]))+'}$'
axs[i].text(best_v[iclump],1.2,text1,
fontsize=14,rotation=90, rotation_mode='anchor')
text2=r'$b ='+np.str('%.0f' % best_b[iclump]) +'^{ + ' + np.str('%.0f' % (best_b[iclump]-low_b[iclump]))+'}'+ '_{ -' + np.str('%.0f' % (high_b[iclump]-best_b[iclump]))+'}$'
axs[i].text(best_v[iclump]+30,1.2, text2,
fontsize=14,rotation=90, rotation_mode='anchor')
if verbose==True:
from IPython.display import display, Math
samples = fit.sampler.get_chain(discard=100, thin=15, flat=True)
nfit = int(fit.ndim / 3)
N_tile = np.tile("logN", nfit)
b_tile = np.tile("b", nfit)
v_tile = np.tile("v", nfit)
tmp = np.append(N_tile, b_tile)
text_label = np.append(tmp, v_tile)
for i in range(len(text_label)):
mcmc = np.percentile(samples[:, i], [16, 50, 84])
q = np.diff(mcmc)
txt = "\mathrm{{{3}}} = {0:.2f}_{{-{1:.2f}}}^{{{2:.2f}}}"
txt = txt.format(mcmc[1], q[0], q[1], text_label[i])
display(Math(txt))
if outfile==False:
plt.show()
else:
outfile_fig =outfile
fig.savefig(outfile_fig, bbox_inches='tight')
######## Computing Likelihoods######
def lnprior(theta, lb, ub):
for index in range(0, len(lb)):
if (lb[index] > theta[index]) or (ub[index] < theta[index]):
return -np.inf
break
return 0.0
def lnlike(theta, model, x, y, yerr):
model = model(theta, x)
inv_sigma2 = 1.0 / (yerr ** 2)
return -0.5 * (np.sum((y - model) ** 2 * inv_sigma2 - np.log(inv_sigma2)))
def lnprob(theta, lb, ub, model, x, y, yerr):
lp = lnprior(theta, lb, ub)
if not np.isfinite(lp):
return -np.inf
return lp + lnlike(theta, model, x, y, yerr)
def optimize_guess(model, theta, lb, ub, x, y, yerr):
nll = lambda *args: -lnprob(*args)
result = op.minimize(nll, [theta], args=(lb, ub, model, x, y, yerr))
p = result["x"]
return p
def set_bounds(nguess,bguess,vguess):
Nlow=np.zeros((len(nguess,)))
blow=np.zeros((len(nguess,)))
vlow=np.zeros((len(nguess,)))
NHI=np.zeros((len(nguess,)))
bHI=np.zeros((len(nguess,)))
vHI=np.zeros((len(nguess,)))
for i in range(0,len(nguess)):
Nlow[i]=nguess[i]-2.
blow[i]=bguess[i]-40.
if blow[i] < 2.:
blow[i] = 2.
vlow[i]=vguess[i]-50.
NHI[i]=nguess[i]+2.
bHI[i]=bguess[i]+40.
if bHI[i] > 200.:
bHI[i] = 150.
vHI[i]=vguess[i]+50.
lb=np.concatenate((Nlow,blow,vlow))
ub=np.concatenate((NHI,bHI,vHI))
bounds=[lb,ub]
return bounds, lb, ub
class vfit(object):
def __init__(self, model, theta, lb, ub, wave_obs, fnorm, enorm, no_of_Chain=50, no_of_steps=1000,
perturbation=1e-6):
# Main class that performs all the fitting
self.wave_obs = wave_obs
self.fnorm = fnorm
self.enorm = enorm
self.model = model
self.lb = lb
self.ub = ub
self.theta = theta
self.no_of_Chain = no_of_Chain
self.no_of_steps = no_of_steps
self.perturbation = perturbation
def runmcmc(self, optimize=True,verbose=False):
model = self.model
theta = self.theta
lb = self.lb
ub = self.ub
wave_obs = self.wave_obs
fnorm = self.fnorm
enorm = self.enorm
no_of_Chain = self.no_of_Chain
no_of_steps = self.no_of_steps
perturbation = self.perturbation
if optimize == True:
print('Optimizing Guess ***********')
# Now make a better guess
popt = optimize_guess(model, theta, lb, ub, wave_obs, fnorm, enorm)
print('Done ***********')
else:
print('Skipping Optimizing Guess ***********')
print('Using input guess for mcmc ***********')
popt = theta
print('Preparing emcee ***********')
###### Define a lot of walkers
length_of_lb = len(lb)
ndim, nwalkers = length_of_lb, no_of_Chain
guesses = [popt + perturbation * np.random.randn(ndim) for i in range(nwalkers)]
print("Starting emcee ***********")
burntime = np.round(no_of_steps * .2)
with Pool() as pool:
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob, pool=pool, args=(lb, ub, model, wave_obs, fnorm, enorm))
pos, prob, state = sampler.run_mcmc(guesses, no_of_steps,progress=True)
#sampler.reset()
print("Done!")
#print("Now starting the Final Calculations:")
print("*****************")
#width = 30
# Now Running mcmc
#for i, result in enumerate(sampler.sample(pos, iterations=no_of_steps)):
# n = int((width + 1) * float(i) / no_of_steps)
#sys.stdout.write("\r[{0}{1}]".format('#' * n, ' ' * (width - n)))
#sys.stdout.write("\n")
if verbose==True:
from IPython.display import display, Math
samples = sampler.get_chain(discard=100, thin=15, flat=True)
nfit = int(ndim / 3)
N_tile = np.tile("logN", nfit)
b_tile = np.tile("b", nfit)
v_tile = np.tile("v", nfit)
tmp = np.append(N_tile, b_tile)
text_label = np.append(tmp, v_tile)
for i in range(len(text_label)):
mcmc = np.percentile(samples[:, i], [16, 50, 84])
q = np.diff(mcmc)
txt = "\mathrm{{{3}}} = {0:.2f}_{{-{1:.2f}}}^{{{2:.2f}}}"
txt = txt.format(mcmc[1], q[0], q[1], text_label[i])
display(Math(txt))
self.sampler = sampler
self.ndim = ndim
self.nwalkers = nwalkers
def plot_corner(self,outfile=False):
ndim=self.ndim
#samples = self.sampler.chain[:, 100:, :].reshape((-1, ndim)) # sampler.flatchain
samples = self.sampler.get_chain(discard=100, thin=15, flat=True)
st = np.percentile(samples, 50, axis=0) # =np.median(samples,axis=0)#np.median(sampler.flatchain, axis=0)
# df = pd.DataFrame(samples)
# temp=df.mode()
# st=temp.values[0]
nfit = int(ndim / 3)
N_tile = np.tile("logN", nfit)
b_tile = np.tile("b", nfit)
v_tile = np.tile("v", nfit)
tmp = np.append(N_tile, b_tile)
text_label = np.append(tmp, v_tile)
figure = corner.corner(samples, labels=text_label, truths=st)
theta_prime = st
value1 = np.percentile(samples, 10, axis=0)
# This is the empirical mean of the sample:
value2 = np.percentile(samples, 90, axis=0)
# Extract the axes
axes = np.array(figure.axes).reshape((ndim, ndim))
# Loop over the diagonal
for i in range(ndim):
ax = axes[i, i]
ax.axvline(value1[i], color="aqua")
ax.axvline(value2[i], color="aqua")
# Loop over the histograms
for yi in range(ndim):
for xi in range(yi):
ax = axes[yi, xi]
ax.axvline(value1[xi], color="aqua")
ax.axvline(value2[xi], color="aqua")
# ax.axhline(value1[yi], color="g")
# ax.axhline(value2[yi], color="r")
# ax.plot(value1[xi], value1[yi], "sg")
# ax.plot(value2[xi], value2[yi], "sr")
self.best_theta=theta_prime
self.low_theta=value1
self.high_theta=value2
self.samples=samples
if outfile==False:
plt.show()
else:
outfile_fig =outfile
figure.savefig(outfile_fig, bbox_inches='tight')
| 2.328125 | 2 |
yolox/data/dataloading.py | XHYsdjkdsjsk2021/Yolox_xhy | 0 | 17210 | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Copyright (c) Megvii, Inc. and its affiliates.
import torch
from torch.utils.data.dataloader import DataLoader as torchDataLoader
from torch.utils.data.dataloader import default_collate
import os
import random
from .samplers import YoloBatchSampler
def get_yolox_datadir():
"""
get dataset dir of YOLOX. If environment variable named `YOLOX_DATADIR` is set,
this function will return value of the environment variable. Otherwise, use data
"""
yolox_datadir = os.getenv("YOLOX_DATADIR", None)
if yolox_datadir is None:
import yolox
yolox_path = os.path.dirname(os.path.dirname(yolox.__file__))
yolox_datadir = os.path.join(yolox_path, "datasets")
return yolox_datadir
class DataLoader(torchDataLoader):
"""
Lightnet dataloader that enables on the fly resizing of the images.
See :class:`torch.utils.data.DataLoader` for more information on the arguments.
Check more on the following website:
https://gitlab.com/EAVISE/lightnet/-/blob/master/lightnet/data/_dataloading.py
Note:
This dataloader only works with :class:`lightnet.data.Dataset` based datasets.
Example:
>>> class CustomSet(ln.data.Dataset):
... def __len__(self):
... return 4
... @ln.data.Dataset.resize_getitem
... def __getitem__(self, index):
... # Should return (image, anno) but here we return (input_dim,)
... return (self.input_dim,)
>>> dl = ln.data.DataLoader(
... CustomSet((200,200)),
... batch_size = 2,
... collate_fn = ln.data.list_collate # We want the data to be grouped as a list
... )
>>> dl.dataset.input_dim # Default input_dim
(200, 200)
>>> for d in dl:
... d
[[(200, 200), (200, 200)]]
[[(200, 200), (200, 200)]]
>>> dl.change_input_dim(320, random_range=None)
(320, 320)
>>> for d in dl:
... d
[[(320, 320), (320, 320)]]
[[(320, 320), (320, 320)]]
>>> dl.change_input_dim((480, 320), random_range=None)
(480, 320)
>>> for d in dl:
... d
[[(480, 320), (480, 320)]]
[[(480, 320), (480, 320)]]
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.__initialized = False
shuffle = False
batch_sampler = None
if len(args) > 5:
shuffle = args[2]
sampler = args[3]
batch_sampler = args[4]
elif len(args) > 4:
shuffle = args[2]
sampler = args[3]
if "batch_sampler" in kwargs:
batch_sampler = kwargs["batch_sampler"]
elif len(args) > 3:
shuffle = args[2]
if "sampler" in kwargs:
sampler = kwargs["sampler"]
if "batch_sampler" in kwargs:
batch_sampler = kwargs["batch_sampler"]
else:
if "shuffle" in kwargs:
shuffle = kwargs["shuffle"]
if "sampler" in kwargs:
sampler = kwargs["sampler"]
if "batch_sampler" in kwargs:
batch_sampler = kwargs["batch_sampler"]
# Use custom BatchSampler
if batch_sampler is None:
if sampler is None:
if shuffle:
sampler = torch.utils.data.sampler.RandomSampler(self.dataset)
# sampler = torch.utils.data.DistributedSampler(self.dataset)
else:
sampler = torch.utils.data.sampler.SequentialSampler(self.dataset)
batch_sampler = YoloBatchSampler(
sampler,
self.batch_size,
self.drop_last,
input_dimension=self.dataset.input_dim,
)
# batch_sampler = IterationBasedBatchSampler(batch_sampler, num_iterations =
self.batch_sampler = batch_sampler
self.__initialized = True
def close_mosaic(self):
self.batch_sampler.mosaic = False
def change_input_dim(self, multiple=32, random_range=(10, 19)):
"""This function will compute a new size and update it on the next mini_batch.
Args:
multiple (int or tuple, optional): values to multiply the randomly generated range by.
Default **32**
random_range (tuple, optional): This (min, max) tuple sets the range
for the randomisation; Default **(10, 19)**
Return:
tuple: width, height tuple with new dimension
Note:
The new size is generated as follows: |br|
First we compute a random integer inside ``[random_range]``.
We then multiply that number with the ``multiple`` argument,
which gives our final new input size. |br|
If ``multiple`` is an integer we generate a square size. If you give a tuple
of **(width, height)**, the size is computed
as :math:`rng * multiple[0], rng * multiple[1]`.
Note:
You can set the ``random_range`` argument to **None** to set
an exact size of multiply. |br|
See the example above for how this works.
"""
if random_range is None:
size = 1
else:
size = random.randint(*random_range)
if isinstance(multiple, int):
size = (size * multiple, size * multiple)
else:
size = (size * multiple[0], size * multiple[1])
self.batch_sampler.new_input_dim = size
return size
def list_collate(batch):
"""
Function that collates lists or tuples together into one list (of lists/tuples).
Use this as the collate function in a Dataloader, if you want to have a list of
items as an output, as opposed to tensors (eg. Brambox.boxes).
"""
items = list(zip(*batch))
for i in range(len(items)):
if isinstance(items[i][0], (list, tuple)):
items[i] = list(items[i])
else:
items[i] = default_collate(items[i])
return items
| 2.78125 | 3 |
env/lib/python3.9/site-packages/ansible/modules/cloud/amazon/_ec2_vpc_vpn_facts.py | unbounce/aws-name-asg-instances | 17 | 17211 | #!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = r'''
---
module: ec2_vpc_vpn_info
version_added: 1.0.0
short_description: Gather information about VPN Connections in AWS.
description:
- Gather information about VPN Connections in AWS.
- This module was called C(ec2_vpc_vpn_facts) before Ansible 2.9. The usage did not change.
requirements: [ boto3 ]
author: <NAME> (@Madhura-CSI)
options:
filters:
description:
- A dict of filters to apply. Each dict item consists of a filter key and a filter value.
See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVpnConnections.html) for possible filters.
required: false
type: dict
vpn_connection_ids:
description:
- Get details of a specific VPN connections using vpn connection ID/IDs. This value should be provided as a list.
required: false
type: list
elements: str
extends_documentation_fragment:
- amazon.aws.aws
- amazon.aws.ec2
'''
EXAMPLES = r'''
# # Note: These examples do not set authentication details, see the AWS Guide for details.
- name: Gather information about all vpn connections
community.aws.ec2_vpc_vpn_info:
- name: Gather information about a filtered list of vpn connections, based on tags
community.aws.ec2_vpc_vpn_info:
filters:
"tag:Name": test-connection
register: vpn_conn_info
- name: Gather information about vpn connections by specifying connection IDs.
community.aws.ec2_vpc_vpn_info:
filters:
vpn-gateway-id: vgw-cbe66beb
register: vpn_conn_info
'''
RETURN = r'''
vpn_connections:
description: List of one or more VPN Connections.
returned: always
type: complex
contains:
category:
description: The category of the VPN connection.
returned: always
type: str
sample: VPN
customer_gatway_configuration:
description: The configuration information for the VPN connection's customer gateway (in the native XML format).
returned: always
type: str
customer_gateway_id:
description: The ID of the customer gateway at your end of the VPN connection.
returned: always
type: str
sample: cgw-17a53c37
options:
description: The VPN connection options.
returned: always
type: dict
sample: {
"static_routes_only": false
}
routes:
description: List of static routes associated with the VPN connection.
returned: always
type: complex
contains:
destination_cidr_block:
description: The CIDR block associated with the local subnet of the customer data center.
returned: always
type: str
sample: 10.0.0.0/16
state:
description: The current state of the static route.
returned: always
type: str
sample: available
state:
description: The current state of the VPN connection.
returned: always
type: str
sample: available
tags:
description: Any tags assigned to the VPN connection.
returned: always
type: dict
sample: {
"Name": "test-conn"
}
type:
description: The type of VPN connection.
returned: always
type: str
sample: ipsec.1
vgw_telemetry:
description: Information about the VPN tunnel.
returned: always
type: complex
contains:
accepted_route_count:
description: The number of accepted routes.
returned: always
type: int
sample: 0
last_status_change:
description: The date and time of the last change in status.
returned: always
type: str
sample: "2018-02-09T14:35:27+00:00"
outside_ip_address:
description: The Internet-routable IP address of the virtual private gateway's outside interface.
returned: always
type: str
sample: 13.127.79.191
status:
description: The status of the VPN tunnel.
returned: always
type: str
sample: DOWN
status_message:
description: If an error occurs, a description of the error.
returned: always
type: str
sample: IPSEC IS DOWN
certificate_arn:
description: The Amazon Resource Name of the virtual private gateway tunnel endpoint certificate.
returned: when a private certificate is used for authentication
type: str
sample: "arn:aws:acm:us-east-1:123456789101:certificate/c544d8ce-20b8-4fff-98b0-example"
vpn_connection_id:
description: The ID of the VPN connection.
returned: always
type: str
sample: vpn-f700d5c0
vpn_gateway_id:
description: The ID of the virtual private gateway at the AWS side of the VPN connection.
returned: always
type: str
sample: vgw-cbe56bfb
'''
import json
try:
from botocore.exceptions import ClientError, BotoCoreError
except ImportError:
pass # caught by AnsibleAWSModule
from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (ansible_dict_to_boto3_filter_list,
boto3_tag_list_to_ansible_dict,
camel_dict_to_snake_dict,
)
def date_handler(obj):
return obj.isoformat() if hasattr(obj, 'isoformat') else obj
def list_vpn_connections(connection, module):
params = dict()
params['Filters'] = ansible_dict_to_boto3_filter_list(module.params.get('filters'))
params['VpnConnectionIds'] = module.params.get('vpn_connection_ids')
try:
result = json.loads(json.dumps(connection.describe_vpn_connections(**params), default=date_handler))
except ValueError as e:
module.fail_json_aws(e, msg="Cannot validate JSON data")
except (ClientError, BotoCoreError) as e:
module.fail_json_aws(e, msg="Could not describe customer gateways")
snaked_vpn_connections = [camel_dict_to_snake_dict(vpn_connection) for vpn_connection in result['VpnConnections']]
if snaked_vpn_connections:
for vpn_connection in snaked_vpn_connections:
vpn_connection['tags'] = boto3_tag_list_to_ansible_dict(vpn_connection.get('tags', []))
module.exit_json(changed=False, vpn_connections=snaked_vpn_connections)
def main():
argument_spec = dict(
vpn_connection_ids=dict(default=[], type='list', elements='str'),
filters=dict(default={}, type='dict')
)
module = AnsibleAWSModule(argument_spec=argument_spec,
mutually_exclusive=[['vpn_connection_ids', 'filters']],
supports_check_mode=True)
if module._module._name == 'ec2_vpc_vpn_facts':
module._module.deprecate("The 'ec2_vpc_vpn_facts' module has been renamed to 'ec2_vpc_vpn_info'", date='2021-12-01', collection_name='community.aws')
connection = module.client('ec2')
list_vpn_connections(connection, module)
if __name__ == '__main__':
main()
| 1.742188 | 2 |
cogs/roleselector.py | YouGotSchott/tcs-discord-bot | 1 | 17212 | import discord
from discord.ext import commands
from pathlib import Path
from config import bot
from collections import OrderedDict
import json
class RoleSelector(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.messages_path = str(Path('cogs/data/messages.json'))
async def opener(self):
with open(self.messages_path, 'r') as f:
return json.load(f)
async def closer(self, messages):
with open(self.messages_path, 'w') as f:
json.dump(messages, f)
@commands.Cog.listener()
async def on_ready(self):
emojis = self.emoji_selector(self.bot.guilds[0].id)
channel = discord.utils.get(self.bot.get_all_channels(), name='roles')
text = await self.embeder(self.data(emojis))
messages = await self.opener()
try:
self.msg = await channel.fetch_message(messages['role_message']['id'])
await self.msg.edit(embed=text)
except:
print("Role Message hasn't been added yet")
self.msg = await channel.send(embed=text)
messages['role_message'] = {}
messages['role_message']['id'] = self.msg.id
await self.closer(messages)
for emoji in emojis.values():
await self.msg.add_reaction(emoji=emoji)
@commands.Cog.listener(name='on_raw_reaction_add')
async def role_reaction_add(self, payload):
try:
if payload.message_id != self.msg.id:
return
except AttributeError:
return
guild = self.bot.get_guild(payload.guild_id)
user = guild.get_member(payload.user_id)
if user.id == self.bot.user.id:
return
emojis = self.emoji_selector(guild.id)
clean_emoji = str(payload.emoji).strip('<:>')
for k, v in emojis.items():
if v in clean_emoji:
role = discord.utils.get(user.guild.roles, name=k)
if 'mission-maker' in k:
results = await self.saturday_check()
if user.id not in results:
await self.msg.remove_reaction(v, user)
return
if 'auditor' in k:
role_mm = discord.utils.get(user.guild.roles, name='mission-maker')
if role_mm not in user.roles:
await self.msg.remove_reaction(v, user)
return
if role in user.roles:
await user.remove_roles(role)
else:
await user.add_roles(role)
await self.msg.remove_reaction(v, user)
async def saturday_check(self):
results = await self.bot.conn.fetch("""
SELECT user_id FROM attendance""")
id_list = [x["user_id"] for x in results]
return id_list
async def embeder(self, msg_embed):
em = discord.Embed(
title=self.msg_embed['title'], description=self.msg_embed['description'], color=0x008080)
em.set_thumbnail(url=self.msg_embed['thumbnail'])
for value in self.field_dict.values():
em.add_field(name=value['name'], value=value['value'], inline=False)
em.set_footer(text=self.footer['footer'])
return em
def emoji_selector(self, guild):
if 169696752461414401 == guild:
emojis = OrderedDict([
('mission-maker', 'feelscornman:485958281458876416'),
('auditor', '\U0001F913'),
('heretic', '\U0001f300'),
('liberation', 'finger_gun:300089586460131328'),
('r6siege', '\U0001f308'),
('ricefields', 'rice_fields:483791993370181632'),
('minecraft', '\U000026cf'),
('flight-sims', '\U0001f525'),
('vr', 'iron_uncle:548645154454765568'),
('zeus-op', '\U000026a1'),
('4x', '\U0001f3ed'),
('rts', 'smoothbrain:592115163390410783'),
('destiny-2', '\U0001f47e'),
('squad', 'CplChad:409868955239579649'),
('zomboid', 'the_devil:663562931681624081')
])
else:
emojis = OrderedDict([
('mission-maker', 'uncle:567728566540697635'),
('auditor', '\U0001F913'),
('heretic', '\U0001f300'),
('liberation', 'snek_uncle:567728565781528576'),
('r6siege', '\U0001f3c3'),
('ricefields', 'shadow_uncle:567728565248851989'),
('minecraft', '\U000026cf'),
('flight-sims', '\U0001f525'),
('vr', 'jensen_uncle:567728565391589399'),
('zeus-op', '\U000026a1'),
('4x', '\U0001f3ed'),
('rts', 'fast_uncle:567728565525807104'),
('destiny-2', '\U0001f47e'),
('squad', 'uncle_uncle:567728565785985025'),
('zomboid', 'uncle_hacker:567728565798567940')
])
return emojis
def data(self, emojis):
self.msg_embed = OrderedDict([
('title', '**TCS Role Selector**'),
('description', '''Use this tool to select optional Discord roles.\n\n'''
'''**DO NOT ABUSE THE BOT!**\n'''
'''\u200B'''),
('thumbnail', 'https://s3.amazonaws.com/files.enjin.com/1015535/site_logo/2020_logo.png')
])
self.field_dict = OrderedDict([
('mission_maker', OrderedDict([
('name', '<:{}> @mission-maker'.format(emojis['mission-maker'])),
('value', '''Provides access to our mission making channels, which *MAY HAVE SPOILERS*.\n\n'''
'''__**REQUIREMENTS**__\n'''
'''**__1.)__** You **MUST** attend a Saturday Op before taking this role.\n'''
'''**__2.)__** **ONLY** select this role if you plan on making missions for TCS.\n'''
'''**__3.)__** **DO NOT** use this role to provide feedback or suggestions in the mission making channel, use **#debriefing**.\n'''
'''**__4.)__** Understand that we make missions differently than other units.\n'''
'''**__5.)__** Understand that this is not an easy job and you might not get it right the first time.\n'''
'''\u200B''')])
),
('auditor', OrderedDict([
('name', '{} @auditor'.format(emojis['auditor'])),
('value', '''Allows other mission makers to ping you to check their missions for errors. *(requires @mission-maker tag)*\n''')])
),
('heretic', OrderedDict([
('name', '{} @heretic'.format(emojis['heretic'])),
('value', '''Provides access to the **#heresy** channel.\n'''
'''*A place for Warhammer 40K discussion and shitposting.*''')])
),
('liberation', OrderedDict([
('name', '<:{}> @liberation'.format(emojis['liberation'])),
('value', '''Allows other members to ping you to play *Arma 3 Liberation* on our server.''')])
),
('r6siege', OrderedDict([
('name', '{} @r6siege'.format(emojis['r6siege'])),
('value', '''Allows other members to ping you to play *Rainbow Six Siege*.''')])
),
('ricefields', OrderedDict([
('name', '<:{}> @ricefields'.format(emojis['ricefields'])),
('value', '''Allows other members to ping you to play *Rising Storm 2: Vietnam*.''')])
),
('minecraft', OrderedDict([
('name', '{} @minecraft'.format(emojis['minecraft'])),
('value', '''Allows other members to ping you to play *Minecraft* on our server.''')])
),
('flight_sims', OrderedDict([
('name', '{} @flight-sims'.format(emojis['flight-sims'])),
('value', '''Allows other members to ping you to play *DCS* or *IL2*.''')])
),
('vr', OrderedDict([
('name', '<:{}> @vr'.format(emojis['vr'])),
('value', '''Allows other members to ping you to play any *Virtual Reality Games*.''')])
),
('zeus-op', OrderedDict([
('name', '{} @zeus-op'.format(emojis['zeus-op'])),
('value', '''Allows other members to ping you to play *Impromptu Zeus Missions*.\n\n'''
'''__**RULES**__\n'''
'''**__1.)__** Don't expect someone to step-up as Zeus.\n'''
'''**__2.)__** Zeus has final say on what's allowed in their mission.\n'''
'''\u200B''')])
),
('4x', OrderedDict([
('name', '{} @4x'.format(emojis['4x'])),
('value', '''Allows other members to ping you to play *4X Games*.\n\n'''
'''__**Active Games**__\n'''
'''> *Hearts of Iron 4*\n'''
'''> *Stellaris*\n'''
'''\u200B''')])
),
('rts', OrderedDict([
('name', '<:{}> @rts'.format(emojis['rts'])),
('value', '''Allows other members to ping you to play *RTS Games*.\n\n'''
'''__**Active Games**__\n'''
'''> *Wargame: Red Dragon*\n'''
'''> *Wargame: War in the East*\n'''
'''> *Men of War: Assault Squad 2*\n'''
'''> *StarCraft 2*\n'''
'''\u200B''')])
),
('destiny-2', OrderedDict([
('name', '{} @destiny-2'.format(emojis['destiny-2'])),
('value', '''Allows other members to ping you to play *Destiny 2*.\n\n'''
)])
),
('squad', OrderedDict([
('name', '<:{}> @squad'.format(emojis['squad'])),
('value', '''Allows other members to ping you to play *Squad*.\n\n'''
)])
),
('zomboid', OrderedDict([
('name', '<:{}> @zomboid'.format(emojis['zomboid'])),
('value', '''Allows other members to ping you to play organized *Project Zomboid*.\n\n'''
)])
)
])
self.footer = OrderedDict([
('footer', '''React to toggle role on/off''')
])
def setup(bot):
bot.add_cog(RoleSelector(bot))
| 2.46875 | 2 |
Bunnies.py | fatih-iver/Intro-to-Computer-Science-with-Python | 0 | 17213 | <filename>Bunnies.py
# Define a procedure, fibonacci, that takes a natural number as its input, and
# returns the value of that fibonacci number.
# Two Base Cases:
# fibonacci(0) => 0
# fibonacci(1) => 1
# Recursive Case:
# n > 1 : fibonacci(n) => fibonacci(n-1) + fibonacci(n-2)
def fibonacci(n):
return n if n == 0 or n == 1 else fibonacci(n-1) + fibonacci(n-2)
print (fibonacci(0))
#>>> 0
print (fibonacci(1))
#>>> 1
print (fibonacci(15))
#>>> 610 | 4.28125 | 4 |
symphony/cli/graphql_compiler/tests/test_utils_codegen.py | remo5000/magma | 1 | 17214 | #!/usr/bin/env python3
from .base_test import BaseTest
from fbc.symphony.cli.graphql_compiler.gql.utils_codegen import CodeChunk
class TestRendererDataclasses(BaseTest):
def test_codegen_write_simple_strings(self):
gen = CodeChunk()
gen.write('def sum(a, b):')
gen.indent()
gen.write('return a + b')
code = str(gen)
m = self.load_module(code)
assert m.sum(2, 3) == 5
def test_codegen_write_template_strings_args(self):
gen = CodeChunk()
gen.write('def {0}(a, b):', 'sum')
gen.indent()
gen.write('return a + b')
code = str(gen)
m = self.load_module(code)
assert m.sum(2, 3) == 5
def test_codegen_write_template_strings_kwargs(self):
gen = CodeChunk()
gen.write('def {method}(a, b):', method='sum')
gen.indent()
gen.write('return a + b')
code = str(gen)
m = self.load_module(code)
assert m.sum(2, 3) == 5
def test_codegen_block(self):
gen = CodeChunk()
gen.write('def sum(a, b):')
with gen.block():
gen.write('return a + b')
code = str(gen)
m = self.load_module(code)
assert m.sum(2, 3) == 5
def test_codegen_write_block(self):
gen = CodeChunk()
with gen.write_block('def {name}(a, b):', name='sum'):
gen.write('return a + b')
code = str(gen)
m = self.load_module(code)
assert m.sum(2, 3) == 5
def test_codegen_write_lines(self):
lines = [
'@staticmethod',
'def sum(a, b):'
' return a + b'
]
gen = CodeChunk()
gen.write('class Math:')
gen.indent()
gen.write_lines(lines)
code = str(gen)
m = self.load_module(code)
assert m.Math.sum(2, 3) == 5
| 2.421875 | 2 |
api/__init__.py | zhangyouliang/TencentComicBook | 0 | 17215 | from flask import Flask
def create_app():
app = Flask(__name__)
app.config['JSON_AS_ASCII'] = False
from .views import app as main_app
app.register_blueprint(main_app)
return app
| 1.625 | 2 |
misc/pytorch_toolkit/chest_xray_screening/chest_xray_screening/utils/get_config.py | a-a-egorovich/training_extensions | 0 | 17216 | <gh_stars>0
import os
import json
def get_config(action, optimised = False):
""" action: train, test, export or gdrive
optimised: False --> DenseNet121
True --> DenseNet121Eff
"""
root_path = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
config_path = os.path.join(root_path, 'configs')
if action == 'download':
with open(os.path.join(config_path, 'download_configs.json')) as f1:
config = json.load(f1)
else:
if optimised:
with open(os.path.join(config_path, 'densenet121eff_config.json')) as f1:
config_file = json.load(f1)
config = config_file[action]
else:
with open(os.path.join(config_path, 'densenet121_config.json')) as f1:
config_file = json.load(f1)
config = config_file[action]
return config
| 2.53125 | 3 |
src/udpa/annotations/versioning_pb2.py | pomerium/enterprise-client-python | 1 | 17217 | <filename>src/udpa/annotations/versioning_pb2.py
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: udpa/annotations/versioning.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import descriptor_pb2 as google_dot_protobuf_dot_descriptor__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='udpa/annotations/versioning.proto',
package='udpa.annotations',
syntax='proto3',
serialized_options=b'Z\"github.com/cncf/xds/go/annotations',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n!udpa/annotations/versioning.proto\x12\x10udpa.annotations\x1a google/protobuf/descriptor.proto\"5\n\x14VersioningAnnotation\x12\x1d\n\x15previous_message_type\x18\x01 \x01(\t:^\n\nversioning\x12\x1f.google.protobuf.MessageOptions\x18\xd3\x88\xe1\x03 \x01(\x0b\x32&.udpa.annotations.VersioningAnnotationB$Z\"github.com/cncf/xds/go/annotationsb\x06proto3'
,
dependencies=[google_dot_protobuf_dot_descriptor__pb2.DESCRIPTOR,])
VERSIONING_FIELD_NUMBER = 7881811
versioning = _descriptor.FieldDescriptor(
name='versioning', full_name='udpa.annotations.versioning', index=0,
number=7881811, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)
_VERSIONINGANNOTATION = _descriptor.Descriptor(
name='VersioningAnnotation',
full_name='udpa.annotations.VersioningAnnotation',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='previous_message_type', full_name='udpa.annotations.VersioningAnnotation.previous_message_type', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=89,
serialized_end=142,
)
DESCRIPTOR.message_types_by_name['VersioningAnnotation'] = _VERSIONINGANNOTATION
DESCRIPTOR.extensions_by_name['versioning'] = versioning
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
VersioningAnnotation = _reflection.GeneratedProtocolMessageType('VersioningAnnotation', (_message.Message,), {
'DESCRIPTOR' : _VERSIONINGANNOTATION,
'__module__' : 'udpa.annotations.versioning_pb2'
# @@protoc_insertion_point(class_scope:udpa.annotations.VersioningAnnotation)
})
_sym_db.RegisterMessage(VersioningAnnotation)
versioning.message_type = _VERSIONINGANNOTATION
google_dot_protobuf_dot_descriptor__pb2.MessageOptions.RegisterExtension(versioning)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 1.335938 | 1 |
modules/action/scan_smbclient_nullsession.py | mrpnkt/apt2 | 37 | 17218 | <reponame>mrpnkt/apt2
import re
from core.actionModule import actionModule
from core.keystore import KeyStore as kb
from core.utils import Utils
class scan_smbclient_nullsession(actionModule):
def __init__(self, config, display, lock):
super(scan_smbclient_nullsession, self).__init__(config, display, lock)
self.title = "Test for NULL Session"
self.shortName = "NULLSessionSmbClient"
self.description = "execute [smbclient -N -L <IP>] on each target"
self.requirements = ["smbclient"]
self.triggers = ["newPort_tcp_445", "newPort_tcp_139"]
self.safeLevel = 5
def getTargets(self):
# we are interested in all hosts
self.targets = kb.get('port/tcp/139', 'port/tcp/445')
def process(self):
# load any targets we are interested in
self.getTargets()
# loop over each target
for t in self.targets:
# verify we have not tested this host before
if not self.seentarget(t):
# add the new IP to the already seen list
self.addseentarget(t)
self.display.verbose(self.shortName + " - Connecting to " + t)
# get windows domain/workgroup
temp_file2 = self.config["proofsDir"] + "nmblookup_" + t + "_" + Utils.getRandStr(10)
command2 = self.config["nmblookup"] + " -A " + t
result2 = Utils.execWait(command2, temp_file2)
workgroup = "WORKGROUP"
for line in result2.split('\n'):
m = re.match(r'\s+(.*)\s+<00> - <GROUP>.*', line)
if (m):
workgroup = m.group(1).strip()
self.display.debug("found ip [%s] is on the workgroup/domain [%s]" % (t, workgroup))
# make outfile
outfile = self.config["proofsDir"] + self.shortName + "_" + t + "_" + Utils.getRandStr(10)
# run rpcclient
command = self.config["smbclient"] + " -N -W " + workgroup + " -L " + t
result = Utils.execWait(command, outfile)
# check to see if it worked
if "Anonymous login successful" in result:
# fire a new trigger
self.fire("nullSession")
self.addVuln(t, "nullSession", {"type": "smb", "output": outfile.replace("/", "%2F")})
self.display.error("VULN [NULLSession] Found on [%s]" % t)
# TODO - process smbclient results
# parse out put and store any new info and fire any additional triggers
else:
# do nothing
self.display.verbose("Could not get NULL Session on %s" % t)
return
| 2.171875 | 2 |
UserSpace/Python/Cosmo.py | dkaramit/MiMeS | 2 | 17219 | from numpy import logspace
from sys import path as sysPath
sysPath.append('../../src')
#load the module
from interfacePy import Cosmo
cosmo=Cosmo('../../src/data/eos2020.dat',0,1e5)
for T in logspace(-5,5,50):
print(
'T=',T,'GeV\t',
'H=',cosmo.Hubble(T),'GeV\t',
'h_eff=',cosmo.heff(T),'\t',
'g_eff=',cosmo.geff(T),'\t',
's=',cosmo.s(T),'GeV^3\t',
)
if False:
import matplotlib.pyplot as plt
#########-----g_eff and h_eff-----#########
fig=plt.figure(figsize=(9,4))
fig.subplots_adjust(bottom=0.15, left=0.15, top = 0.95, right=0.9,wspace=0.0,hspace=0.0)
fig.suptitle('')
sub = fig.add_subplot(1,1,1)
T=logspace(-5,5,500)
gt=[cosmo.geff(i) for i in T]
ht=[cosmo.heff(i) for i in T]
sub.plot(T,gt,linestyle='--',c='xkcd:red',label=r"$g_{\rm eff} (T)$")
sub.plot(T,ht,linestyle=':',c='xkcd:black',label=r"$h_{\rm eff} (T)$")
sub.set_xlabel(r'$T ~ [{\rm GeV}]$')
sub.set_ylabel(r'rel. dof')
sub.legend(bbox_to_anchor=(1, 0.0),borderaxespad=0.,
borderpad=0.05,ncol=1,loc='lower right',fontsize=14,framealpha=0)
sub.set_yscale('log')
sub.set_xscale('log')
fig.savefig('rdofs-T_examplePlot.pdf',bbox_inches='tight')
#########-----dg_effdT and dh_effdT-----#########
fig=plt.figure(figsize=(9,4))
fig.subplots_adjust(bottom=0.15, left=0.15, top = 0.95, right=0.9,wspace=0.0,hspace=0.0)
fig.suptitle('')
sub = fig.add_subplot(1,1,1)
T=logspace(-5,5,500)
dg=[cosmo.dgeffdT (i) for i in T]
dh=[cosmo.dheffdT(i) for i in T]
sub.plot(T,dg,linestyle='--',c='xkcd:red',label=r"$\dfrac{d g_{\rm eff}}{dT} (T)$")
sub.plot(T,dh,linestyle=':',c='xkcd:black',label=r"$\dfrac{d h_{\rm eff}}{dT} (T)$")
sub.set_xlabel(r'$T ~ [{\rm GeV}]$')
sub.legend(bbox_to_anchor=(1, 0.5),borderaxespad=0.,
borderpad=0.05,ncol=1,loc='lower right',fontsize=14,framealpha=0)
sub.set_yscale('symlog')
sub.set_xscale('log')
fig.savefig('drdofsdT-T_examplePlot.pdf',bbox_inches='tight')
#########-----dh-----#########
fig=plt.figure(figsize=(9,4))
fig.subplots_adjust(bottom=0.15, left=0.15, top = 0.95, right=0.9,wspace=0.0,hspace=0.0)
fig.suptitle('')
sub = fig.add_subplot(1,1,1)
T=logspace(-5,5,500)
dht=[cosmo.dh(i) for i in T]
sub.plot(T,dht,linestyle='-',c='xkcd:black')
sub.set_xlabel(r'$T ~ [{\rm GeV}]$')
sub.set_ylabel(r'$\delta_h = 1 + \dfrac{1}{3} \dfrac{d \log h_{\rm eff} }{d \log T}$')
sub.set_yscale('linear')
sub.set_xscale('log')
fig.savefig('dh-T_examplePlot.pdf',bbox_inches='tight')
| 2.03125 | 2 |
src/psion/oauth2/endpoints/revocation.py | revensky/psion | 2 | 17220 | <filename>src/psion/oauth2/endpoints/revocation.py
from __future__ import annotations
from typing import Optional
from psion.oauth2.exceptions import InvalidClient, OAuth2Error, UnsupportedTokenType
from psion.oauth2.models import JSONResponse, Request
from .base import BaseEndpoint
class RevocationEndpoint(BaseEndpoint):
"""
Endpoint used by the `Client` to revoke a token in its possession.
If the Client succeeds to authenticate but provides a token that was
not issued to itself, the `Provider` **DOES NOT** revoke the token,
since the Client is not authorized to operate the token.
If the token is already invalid, does not exist within the Provider
or is otherwise unknown or invalid, it is also considered "revoked".
:cvar `__authentication_methods__`: Allowed Client Authentication methods.
:cvar `__supported_tokens__`: Token types supported by the endpoint.
"""
__endpoint__: str = "revocation"
__authentication_methods__: list[str] = None
__supported_tokens__: list[str] = ["access_token", "refresh_token"]
async def __call__(self, request: Request) -> JSONResponse:
"""
Revokes a previously issued Token.
First it validates the `Revocation Request` of the `Client`
by making sure the required parameter "token" is present,
and that the `Client` can authenticate with the allowed
authentication methods.
From the specification at
`<https://www.rfc-editor.org/rfc/rfc7009.html#section-2.1>`_::
The client constructs the request by including the following
parameters using the "application/x-www-form-urlencoded" format in
the HTTP request entity-body:
token REQUIRED. The token that the client wants to get revoked.
token_type_hint OPTIONAL. A hint about the type of the token
submitted for revocation. Clients MAY pass this parameter in
order to help the authorization server to optimize the token
lookup. If the server is unable to locate the token using
the given hint, it MUST extend its search across all of its
supported token types. An authorization server MAY ignore
this parameter, particularly if it is able to detect the
token type automatically. This specification defines two
such values:
* access_token: An access token as defined in [RFC6749],
Section 1.4
* refresh_token: A refresh token as defined in [RFC6749],
Section 1.5
Specific implementations, profiles, and extensions of this
specification MAY define other values for this parameter
using the registry defined in Section 4.1.2.
For example, a client may request the revocation of a refresh token
with the following request:
POST /revoke HTTP/1.1
Host: server.example.com
Content-Type: application/x-www-form-urlencoded
Authorization: Basic czZCaGRSa3F0MzpnWDFmQmF0M2JW
token=<PASSWORD>&token_type_hint=refresh_token
It then returns an empty response with a HTTP Status 200 OK,
signaling that the provided token has been revoked by the server.
From the specification at
`<https://www.rfc-editor.org/rfc/rfc7009.html#section-2.2>`_::
The authorization server responds with HTTP status code 200 if the
token has been revoked successfully or if the client submitted an
invalid token.
Note: invalid tokens do not cause an error response since the client
cannot handle such an error in a reasonable way. Moreover, the
purpose of the revocation request, invalidating the particular token,
is already achieved.
The content of the response body is ignored by the client as all
necessary information is conveyed in the response code.
An invalid token type hint value is ignored by the authorization
server and does not influence the revocation response.
This endpoint does not return any errors, except when the provided
`token_type_hint` is not supported by the Provider.
:raises UnsupportedTokenType: The provided token_type_hint is not supported.
"""
try:
client = await self.authenticate(request, self.__authentication_methods__)
data = request.form()
token: str = data.get("token")
token_type_hint: Optional[str] = data.get("token_type_hint")
if not token or not isinstance(token, str):
return
if token_type_hint:
if token_type_hint not in self.__supported_tokens__:
raise UnsupportedTokenType
await self.adapter.revoke_token(client, token, token_type_hint)
return JSONResponse()
except InvalidClient as exc:
return JSONResponse(401, exc.headers, exc.dump())
except OAuth2Error as exc:
return JSONResponse(400, exc.headers, exc.dump())
| 2.75 | 3 |
experiments/Browser/browser.py | rajKarra69420/bento | 3 | 17221 | #!/usr/bin/env python3
import argparse
import logging
import sys
import zlib
sys.path.append("../..")
from bento.client.api import ClientConnection
from bento.common.protocol import *
import bento.common.util as util
function_name= "browser"
function_code= """
import requests
import zlib
import os
def browser(url, padding):
body= requests.get(url, timeout=1).content
compressed= zlib.compress(body)
final= compressed
if padding - len(final) > 0:
final= final + (os.urandom(padding - len(final)))
else:
final= final + (os.urandom((len(final) + padding) % padding))
api.send(final)
"""
@util.timeit
def main():
logging.basicConfig(format='%(levelname)s:\t%(message)s',
level=logging.DEBUG)
parser = argparse.ArgumentParser(
description='Fetch a website and pad response with dummy bytes')
parser.add_argument('host', help="server's IPv4 address")
parser.add_argument('port', type=int, help="server's port")
parser.add_argument('url', help="URL to fetch")
parser.add_argument('padding', help="pad URL body to ne")
args = parser.parse_args()
conn= ClientConnection(args.host, args.port)
token, errmsg= conn.send_store_request(function_name, function_code)
if errmsg is not None:
util.fatal(f"Error message from server {errmsg}")
logging.debug(f"Got token: {token}")
call= f"{function_name}('{args.url}', {args.padding})"
session_id, errmsg= conn.send_execute_request(call, token)
if errmsg is not None:
util.fatal(f"Error message from server {errmsg}")
logging.debug(f"Got session_id: {session_id}")
logging.debug("Getting output...")
conn.send_open_request(session_id)
data, session_id, err= conn.get_sessionmsg()
print(zlib.decompress(data))
if __name__ == '__main__':
main()
| 2.515625 | 3 |
app/views/v1/search.py | daghan/Ostrich | 0 | 17222 | from app import webapp, mysql
from app.models import Search , Utils, Collection, WebUtils
from flask import request, jsonify
from flask.ext.jsonpify import jsonify as jsonp
import json
'''
Generic search call
@params
q: search query
page: the page number of search results (default 0)
type: type of search: {default: free(all fields), category, isbn}
@response
List of search result objects(ES)
'''
@webapp.route('/search')
def searchString():
response = {'status': 'False'}
results = {}
query = Utils.getParam(request.args, 'q')
page = Utils.getParam(request.args, 'page', var_type='int', default=1)
search_type = Utils.getParam(request.args, 'type', default='free')
user_id = Utils.getParam(request.args, 'userId', 'int')
flow = Utils.getParam(request.args, 'flow', default='borrow')
gcm_id = Utils.getParam(request.args, 'gcm_id', default=None)
uuid = Utils.getParam(request.args, 'distinct_id', default=None)
ref = Utils.getParam(request.args, 'ref', default='mobile')
if not query:
return Utils.errorResponse(response, 'HTTP_STATUS_CODE_DATA_MISSING')
if ref == 'web':
return jsonify(WebUtils.fetchSearchResults(query, search_type, page))
user_info = {'user_id': user_id, 'gcm_id': gcm_id, 'uuid': uuid}
search = Search(query, user_info, flow)
if search_type == 'free':
results = search.basicSearch(page=page-1)
elif search_type == 'category':
results = search.categorySearch(page=page-1)
elif search_type == 'collections':
results = search.collectionsSearch(page=page-1)
elif search_type == 'isbn':
results = search.isbnSearch(page=page-1)
elif search_type == 'auto':
results = search.autoComplete()
elif search_type == 'custom':
results = search.customQuery()
return results
#log
if user_id not in Utils.getAdmins():
Search.logSearch({_:request.args.get(_) for _ in request.args}, search_type)
return jsonify(results) if flow != 'admin' else jsonp(results)
@webapp.route('/getCategories')
def getCategories():
categories = Search.getSearchCategoriesForApp()
return jsonify(categories)
@webapp.route('/getCollectionCategory')
def getCollectionCategory():
return jsonify(Collection.getByCategory())
@webapp.route('/searchFail', methods=['POST'])
def searchFail():
#NOTE deprecated. Done directly from backend
return jsonify(status='true')
user_id = Utils.getParam(request.form, 'user_id', 'int')
q = Utils.getParam(request.form, 'q')
q_type = Utils.getParam(request.form,'type')
flow = Utils.getParam(request.form, 'flow', default='borrow')
Search(q, {'user_id': user_id}, flow).reportFail(True,True,q_type)
return jsonify(status='true')
@webapp.route('/recommended', methods=['GET'])
def recommended():
return jsonify(Search([]).mostRecommended())
@webapp.route('/mostSearched', methods=['GET'])
def mostSearched():
return jsonify(Search([]).mostSearched())
@webapp.route('/getMultiplePanels')
def getMultiplePanels():
cursor = mysql.connect().cursor()
cursor.execute("""SELECT collection_id FROM collections WHERE active = 1 AND
partial_order = 1 ORDER BY collection_id DESC""")
panels = []
for col_id in cursor.fetchall():
panels.append(Collection(col_id).getObj())
return jsonify(panels)
| 2.359375 | 2 |
run.py | orest-d/pointcloud-viewer-rs | 0 | 17223 | from flask import Flask, make_response
app = Flask(__name__)
@app.route("/")
@app.route("/index.html")
def index():
html = open("assets/index.html").read()
return html
@app.route("/assets/<name>")
def wasm(name):
r = make_response(open(f"assets/{name}","rb").read())
if name.endswith(".wasm"):
r.headers.set('Content-Type', "application/wasm")
return r
@app.route("/data.csv")
def csv():
print("GET CSV")
html = open("data.csv").read()
return html
if __name__ == "__main__":
app.run(debug=True,port=8080)
| 2.875 | 3 |
engine/audio/audio_director.py | codehearts/pickles-fetch-quest | 3 | 17224 | from .audio_source import AudioSource
from engine import disk
import pyglet.media
class AudioDirector(object):
"""Director for loading audio and controlling playback.
Attributes:
attenuation_distance (int): The default attenuation distance for newly
loaded audio. Existing audio will retain its attenuation distance,
see :fn:`set_attenuation_distance` for setting distance on existing
sources.
master_volume (float): The master volume for audio playback.
0 for silence, 1 for nominal volume. A value of 1 disables
audio attenuation and ignore the position of audio sources.
To avoid this, set volume to 0.99 or lower.
position (tuple of int): The location of the audio listener in
two-dimensional space. Listeners close to this position will be
louder than those further away.
"""
def __init__(self, master_volume=1, position=(0, 0)):
"""Creates a director for grouping and controlling audio playback.
Kwargs:
master_volume (float, optional): Master volume for audio playback.
0 for silence, 1 for nominal volume. A value of 1 will disable
audio attenuation and ignore the position of audio sources.
To avoid this, set volume to 0.99 or lower. Defaults to 1.
position (tuple of int, optional): The location of the audio
listener in two-dimensional space. Listeners close to this
position will be louder than those farther. Defaults to (0, 0).
"""
super(AudioDirector, self).__init__()
self.attenuation_distance = 1
self.master_volume = master_volume
self.position = position
# Cache of loaded resources from disk
self._disk_cache = {}
# Groupings for audio sources
self._groups = {
'all': set()
}
def load(self, filepath, streaming=True):
"""Loads and audio file from disk.
The loaded audio will be added to the 'all' group for this director.
A cached object will be returned if the file has already been loaded.
Streaming should be used for large audio sources, such as music.
Only one instance of a streaming audio source can be played at a time.
Args:
filepath (str): Path to audio, relative to the resource directory.
Kwargs:
streaming (bool, optional): Streams the audio from disk rather
than loading the entire file into memory. Defaults to True.
Returns:
An :obj:`audio.AudioSource` object for the resource on disk.
"""
# Load the file from disk and cache it if necessary
if filepath not in self._disk_cache:
disk_file = disk.DiskLoader.load_audio(filepath, streaming)
new_source = AudioSource(disk_file, streaming)
# Cache the new source
self._disk_cache[filepath] = new_source
# Apply the default attenuation distance
new_source.attenuation_distance = self.attenuation_distance
# Add this audio source to the default group
self.add(new_source)
return self._disk_cache[filepath]
def add(self, audio_source, group='all'):
"""Adds an audio source to a group.
Grouping audio allows you to control the playback of the entire group
rather than an individual source instance. By default, the audio source
is added to the 'all' group.
Args:
audio_source (:obj:`audio.AudioSource`): The audio source to add.
Kwargs:
group (str, optional): The group to add the audio to.
Defaults to 'all'.
"""
self._groups.setdefault(group, set()).add(audio_source)
def _filter_sources(self, group='all', states=None):
"""Returns all sources in the group matching the given states.
Kwargs:
group (str, optional): Name of group to filter. Defaults to 'all'.
states (list of int, optional): List of :cls:`AudioSource` states
to filter on. If the list is not empty and a source's state is
not in the list, it will be excluded from the return value.
Returns:
An iterator containing sources in the group matching the states.
"""
# If the group does not exist, return an empty iterator
if group not in self._groups:
return iter(())
# If there are no states to filter on, return all sources in the group
if not states:
return iter(self._groups[group])
# Return sources in the group matching the states to filter on
return filter(lambda src: src.state in states, self._groups[group])
def play(self, group='all'):
"""Plays all audio sources in a group.
Kwargs:
group (str, optional): Name of group to play. Defaults to 'all'.
"""
for audio_source in self._filter_sources(group=group):
audio_source.play()
def pause(self, group='all'):
"""Pauses all playing audio sources in a group.
Audio sources which are not currently playing will be left alone.
Kwargs:
group (str, optional): Name of group to pause. Defaults to 'all'.
"""
states = [AudioSource.PLAY]
for audio_source in self._filter_sources(group=group, states=states):
audio_source.pause()
def stop(self, group='all'):
"""Stops all audio sources in a group.
Kwargs:
group (str, optional): Name of group to stop. Defaults to 'all'.
"""
states = [AudioSource.PLAY, AudioSource.PAUSE]
for audio_source in self._filter_sources(group=group, states=states):
audio_source.stop()
def resume(self, group='all'):
"""Resumes playback of all paused audio sources in a group.
Audio sources which are not currently paused will be left alone.
Kwargs:
group (str, optional): Name of group to resume. Defaults to 'all'.
"""
states = [AudioSource.PAUSE]
for audio_source in self._filter_sources(group=group, states=states):
audio_source.play()
def set_volume(self, level, group='all'):
"""Sets the volume of all audio sources in a group.
Args:
volume (float): 0 for silence, 1 for nominal volume.
Kwargs:
group (str, optional): Group to set volume of. Defaults to 'all'.
"""
for audio_source in self._filter_sources(group=group):
audio_source.volume = level
def set_attenuation_distance(self, distance, group='all'):
"""Sets the distance from the listener before player volumes attenuate.
Args:
distance (int): The distance from the listener before the source
volume attenuates. Within this distance, the volume remains
nominal. Outside this distance, the volume approaches zero.
Kwargs:
group (str, optional): Group to set distance of. Defaults to 'all'.
"""
for audio_source in self._filter_sources(group=group):
audio_source.attenuation_distance = distance
@property
def position(self):
"""The position of the listener in 2d space as a tuple-like type."""
return self._position
@position.setter
def position(self, position):
"""Sets the listener location in 2d space with a tuple-like object."""
self._position = position
# Pyglet uses 3d coordinates, convert 2d to a 3d tuple
listener = pyglet.media.get_audio_driver().get_listener()
listener.position = (position[0], position[1], 0)
@property
def master_volume(self):
"""Returns the master audio volume as a float between 0 and 1."""
listener = pyglet.media.get_audio_driver().get_listener()
return listener.volume
@master_volume.setter
def master_volume(self, level):
"""Sets the master audio playback volume.
0 for silence, 1 for nominal volume. Setting this to 1 disables audio
attenuation, ignoring the position of listeners. Set to 0.99 to
allow for audio positioning.
"""
listener = pyglet.media.get_audio_driver().get_listener()
listener.volume = level
| 3.09375 | 3 |
source/windows10 system repair tool.py | programmer24680/windows10-system-repair-tool | 1 | 17225 | import os
import time
print("=====================================================================")
print(" ")
print(" STARTING SYSTEM REPAIR ")
print(" ")
print("=====================================================================")
print(" ")
print("These are the jobs this application can do for you.")
print("1.Clean The DISM Component Store")
print("2.Repair Corrupted Windows Files Using SFC")
print("3.Repair Corrupted Windows Files Using DISM")
choice = input("Enter the serial number of the job which you want this application to do (1/2/3): ")
if choice == "1":
print("Analyzing Component Store")
os.system("dism.exe /Online /Cleanup-Image /AnalyzeComponentStore")
time.sleep(3)
print("Warning: You have to cleanup component store only if necessary.")
time.sleep(3)
Confirmation = input("Do you want to cleanup the component store?(y/n): ")
if Confirmation.upper() == "Y":
os.system("dism.exe /Online /Cleanup-Image /StartComponentCleanup")
time.sleep(3)
print("Now Exiting!")
elif Confirmation.upper() == "N":
print("Skipping Component Cleanup As Per The User's Instructions")
time.sleep(3)
print("Now Exiting!")
time.sleep(1)
else:
print('You have to enter only "y" or "n"')
time.sleep(3)
print("Now Exiting!")
time.sleep(1)
elif choice == "2":
print("Starting SFC Repair Job")
os.system("SFC /SCANNOW")
time.sleep(3)
print("Operation Cpmpleted Successfully!")
time.sleep(3)
print("Now Exiting!")
elif choice == "3":
Internet_Connection = input("Do you have an active internet connection?(y/n): ")
if Internet_Connection.upper() == "N":
iso_file = input("Do you have windows10 wim file?(y/n): ")
if iso_file.upper() == "Y":
Location = input("Enter the location of the wim file: ")
print("Starting DISM")
os.system("dism.exe /Online /Cleanup-Image /RestoreHealth /Source:" + Location + " /LimitAccess")
time.sleep(3)
print("Now Exiting!")
else:
print("Sorry but you need either internet connection or wim file in order to run Dism")
time.sleep(3)
print("Now Exiting!")
elif Internet_Connection.upper() == "Y":
print("Starting DISM")
os.system("dism.exe /Online /Cleanup-Image /RestoreHealth")
time.sleep(3)
print("Now Exiting")
else:
print("You have to enter only Y/N")
time.sleep(3)
else:
print("Choice Not Valid")
time.sleep(3)
print("Now Exiting!")
| 3.765625 | 4 |
tests/test_fid_score.py | jwblangley/pytorch-fid | 1,732 | 17226 | <filename>tests/test_fid_score.py
import numpy as np
import pytest
import torch
from PIL import Image
from pytorch_fid import fid_score, inception
@pytest.fixture
def device():
return torch.device('cpu')
def test_calculate_fid_given_statistics(mocker, tmp_path, device):
dim = 2048
m1, m2 = np.zeros((dim,)), np.ones((dim,))
sigma = np.eye(dim)
def dummy_statistics(path, model, batch_size, dims, device, num_workers):
if path.endswith('1'):
return m1, sigma
elif path.endswith('2'):
return m2, sigma
else:
raise ValueError
mocker.patch('pytorch_fid.fid_score.compute_statistics_of_path',
side_effect=dummy_statistics)
dir_names = ['1', '2']
paths = []
for name in dir_names:
path = tmp_path / name
path.mkdir()
paths.append(str(path))
fid_value = fid_score.calculate_fid_given_paths(paths,
batch_size=dim,
device=device,
dims=dim,
num_workers=0)
# Given equal covariance, FID is just the squared norm of difference
assert fid_value == np.sum((m1 - m2)**2)
def test_compute_statistics_of_path(mocker, tmp_path, device):
model = mocker.MagicMock(inception.InceptionV3)()
model.side_effect = lambda inp: [inp.mean(dim=(2, 3), keepdim=True)]
size = (4, 4, 3)
arrays = [np.zeros(size), np.ones(size) * 0.5, np.ones(size)]
images = [(arr * 255).astype(np.uint8) for arr in arrays]
paths = []
for idx, image in enumerate(images):
paths.append(str(tmp_path / '{}.png'.format(idx)))
Image.fromarray(image, mode='RGB').save(paths[-1])
stats = fid_score.compute_statistics_of_path(str(tmp_path), model,
batch_size=len(images),
dims=3,
device=device,
num_workers=0)
assert np.allclose(stats[0], np.ones((3,)) * 0.5, atol=1e-3)
assert np.allclose(stats[1], np.ones((3, 3)) * 0.25)
def test_compute_statistics_of_path_from_file(mocker, tmp_path, device):
model = mocker.MagicMock(inception.InceptionV3)()
mu = np.random.randn(5)
sigma = np.random.randn(5, 5)
path = tmp_path / 'stats.npz'
with path.open('wb') as f:
np.savez(f, mu=mu, sigma=sigma)
stats = fid_score.compute_statistics_of_path(str(path), model,
batch_size=1,
dims=5,
device=device,
num_workers=0)
assert np.allclose(stats[0], mu)
assert np.allclose(stats[1], sigma)
def test_image_types(tmp_path):
in_arr = np.ones((24, 24, 3), dtype=np.uint8) * 255
in_image = Image.fromarray(in_arr, mode='RGB')
paths = []
for ext in fid_score.IMAGE_EXTENSIONS:
paths.append(str(tmp_path / 'img.{}'.format(ext)))
in_image.save(paths[-1])
dataset = fid_score.ImagePathDataset(paths)
for img in dataset:
assert np.allclose(np.array(img), in_arr)
| 2.125 | 2 |
run/client.py | withcouragetol/codebee-10l | 6 | 17227 | <filename>run/client.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import socket
import time
class emsc_client:
def __init__(self):
self.host = "10.10.83.174"
self.port = 5000
self.conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def run(self):
try:
self.conn.connect((self.host, self.port))
while True:
self.conn.send(("来自客户端发送的数据 : " + str(time.time())).encode())
data = self.conn.recv(1024).decode()
print("来自服务端数据 :" + data + "|" + str(time.time()))
time.sleep(100)
except:
print("服务器连接异常,尝试重新连接 (5s) ...")
self.conn.close()
time.sleep(5) # 断开连接后,每5s重新连接一次
emsc_client().run()
finally:
print("客户端已关闭 ...")
if __name__=="__main__":
emsc = emsc_client()
emsc.run() | 3.140625 | 3 |
src/sonic_ax_impl/main.py | stepanblyschak/sonic-snmpagent | 13 | 17228 | """
SNMP subagent entrypoint.
"""
import asyncio
import functools
import os
import signal
import sys
import ax_interface
from sonic_ax_impl.mibs import ieee802_1ab
from . import logger
from .mibs.ietf import rfc1213, rfc2737, rfc2863, rfc3433, rfc4292, rfc4363
from .mibs.vendor import dell, cisco
# Background task update frequency ( in seconds )
DEFAULT_UPDATE_FREQUENCY = 5
event_loop = asyncio.get_event_loop()
shutdown_task = None
class SonicMIB(
rfc1213.InterfacesMIB,
rfc1213.IpMib,
rfc1213.SysNameMIB,
rfc2737.PhysicalTableMIB,
rfc3433.PhysicalSensorTableMIB,
rfc2863.InterfaceMIBObjects,
rfc4363.QBridgeMIBObjects,
rfc4292.IpCidrRouteTable,
ieee802_1ab.LLDPLocalSystemData,
ieee802_1ab.LLDPLocalSystemData.LLDPLocPortTable,
ieee802_1ab.LLDPLocalSystemData.LLDPLocManAddrTable,
ieee802_1ab.LLDPRemTable,
ieee802_1ab.LLDPRemManAddrTable,
dell.force10.SSeriesMIB,
cisco.bgp4.CiscoBgp4MIB,
cisco.ciscoPfcExtMIB.cpfcIfTable,
cisco.ciscoPfcExtMIB.cpfcIfPriorityTable,
cisco.ciscoSwitchQosMIB.csqIfQosGroupStatsTable,
cisco.ciscoEntityFruControlMIB.cefcFruPowerStatusTable,
):
"""
If SONiC was to create custom MIBEntries, they may be specified here.
"""
def shutdown(signame, agent):
# FIXME: If the Agent dies, the background tasks will zombie.
global event_loop, shutdown_task
logger.info("Recieved '{}' signal, shutting down...".format(signame))
shutdown_task = event_loop.create_task(agent.shutdown())
def main(update_frequency=None):
global event_loop
try:
# initialize handler and set update frequency (or use the default)
agent = ax_interface.Agent(SonicMIB, update_frequency or DEFAULT_UPDATE_FREQUENCY, event_loop)
# add "shutdown" signal handlers
# https://docs.python.org/3.5/library/asyncio-eventloop.html#set-signal-handlers-for-sigint-and-sigterm
for signame in ('SIGINT', 'SIGTERM'):
event_loop.add_signal_handler(getattr(signal, signame),
functools.partial(shutdown, signame, agent))
# start the agent, wait for it to come back.
logger.info("Starting agent with PID: {}".format(os.getpid()))
event_loop.run_until_complete(agent.run_in_event_loop())
except Exception:
logger.exception("Uncaught exception in {}".format(__name__))
sys.exit(1)
finally:
if shutdown_task is not None:
# make sure shutdown has completed completely before closing the loop
event_loop.run_until_complete(shutdown_task)
# the agent runtime has exited, close the event loop and exit.
event_loop.close()
logger.info("Goodbye!")
sys.exit(0)
| 1.828125 | 2 |
btk_server.py | bedrin/keyboard_mouse_emulate_on_raspberry | 0 | 17229 | <reponame>bedrin/keyboard_mouse_emulate_on_raspberry<filename>btk_server.py
#!/usr/bin/python3
from __future__ import absolute_import, print_function
from optparse import OptionParser, make_option
import os
import sys
import uuid
import dbus
import dbus.service
import dbus.mainloop.glib
import time
import socket
from gi.repository import GLib
from dbus.mainloop.glib import DBusGMainLoop
import logging
from logging import debug, info, warning, error
import keymap
logging.basicConfig(level=logging.DEBUG)
class BTKbDevice():
# change these constants
MY_ADDRESS = "B8:27:EB:87:15:DC"
MY_DEV_NAME = "Raspberry_Keyboard"
errorCount = 0
# define some constants
P_CTRL = 17 # Service port - must match port configured in SDP record
P_INTR = 19 # Service port - must match port configured in SDP record#Interrrupt port
# dbus path of the bluez profile we will create
# file path of the sdp record to load
SDP_RECORD_PATH = sys.path[0] + "/sdp_record.xml"
UUID = "00001124-0000-1000-8000-00805f9b34fb"
def __init__(self):
logging.info("2. Setting up BT device")
self.init_bt_device()
self.init_bluez_profile()
self.set_bt_class()
# configure the bluetooth hardware device
def init_bt_device(self):
logging.info("3. Configuring Device name " + BTKbDevice.MY_DEV_NAME)
# set the device class to a keybord and set the name
os.system("hciconfig hci0 up")
os.system("hciconfig hci0 class 0x0025C0")
os.system("hciconfig hci0 name " + BTKbDevice.MY_DEV_NAME)
# make the device discoverable
os.system("hciconfig hci0 piscan")
def set_bt_class(self):
logging.info("workaround. Setting bluetooth class again")
os.system("hciconfig hci0 class 0x0025C0")
# set up a bluez profile to advertise device capabilities from a loaded service record
def init_bluez_profile(self):
logging.info("4. Configuring Bluez Profile")
# setup profile options
service_record = self.read_sdp_service_record()
opts = {
"AutoConnect": True,
"ServiceRecord": service_record
}
# retrieve a proxy for the bluez profile interface
bus = dbus.SystemBus()
manager = dbus.Interface(bus.get_object(
"org.bluez", "/org/bluez"), "org.bluez.ProfileManager1")
manager.RegisterProfile("/org/bluez/hci0", BTKbDevice.UUID, opts)
logging.info("6. Profile registered ")
os.system("hciconfig hci0 -a")
# read and return an sdp record from a file
def read_sdp_service_record(self):
logging.info("5. Reading service record")
try:
fh = open(BTKbDevice.SDP_RECORD_PATH, "r")
except:
sys.exit("Could not open the sdp record. Exiting...")
return fh.read()
# listen for incoming client connections
def listen(self):
logging.info("\033[0;33m7. Waiting for connections\033[0m")
self.scontrol = socket.socket(
socket.AF_BLUETOOTH, socket.SOCK_SEQPACKET, socket.BTPROTO_L2CAP) # BluetoothSocket(L2CAP)
self.sinterrupt = socket.socket(
socket.AF_BLUETOOTH, socket.SOCK_SEQPACKET, socket.BTPROTO_L2CAP) # BluetoothSocket(L2CAP)
self.scontrol.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sinterrupt.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# bind these sockets to a port - port zero to select next available
self.scontrol.bind((socket.BDADDR_ANY, self.P_CTRL))
self.sinterrupt.bind((socket.BDADDR_ANY, self.P_INTR))
# Start listening on the server sockets
self.scontrol.listen(5)
self.sinterrupt.listen(5)
self.ccontrol, cinfo = self.scontrol.accept()
print (
"\033[0;32mGot a connection on the control channel from %s \033[0m" % cinfo[0])
self.cinterrupt, cinfo = self.sinterrupt.accept()
print (
"\033[0;32mGot a connection on the interrupt channel from %s \033[0m" % cinfo[0])
# send a string to the bluetooth host machine
def send_string(self, message):
global errorCount
try:
self.cinterrupt.send(bytes(message))
errorCount = 0
except OSError as err:
error(err)
errorCount += 1
if errorCount > 50 :
sys.exit()
class BTKbService(dbus.service.Object):
def __init__(self):
logging.info("1. Setting up service")
# create and setup our device
self.device = BTKbDevice()
# start listening for connections
self.device.listen()
self.scancodes = {
" ": "KEY_SPACE",
"→": "KEY_RIGHT",
"↵": "KEY_ENTER"
}
# the structure for a bt keyboard input report (size is 10 bytes)
self.interimstate = [
0xA1, # this is an input report
0x01, # Usage report = Keyboard
# Bit array for Modifier keys
[0x01, # Right GUI - Windows Key
0, # Right ALT
0, # Right Shift
0, # Right Control
0, # Left GUI
0, # Left ALT
0, # Left Shift
0], # Left Control
0x00, # Vendor reserved
0x00, # rest is space for 6 keys
0x00,
0x00,
0x00,
0x00,
0x00]
# start infinite loop
while True:
for x in range(0,12):
logging.info("sending ENTER (↵)")
self.send_string(0, "↵")
logging.info("sent ENTER (↵)")
time.sleep(10)
logging.info("sending RIGHT (→)")
self.send_string(0, "→")
logging.info("sent RIGHT (→)")
time.sleep(1)
logging.info("sending ENTER (↵)")
self.send_string(0, "↵")
logging.info("sent ENTER (↵)")
time.sleep(10)
logging.info("sending CTRL+R")
self.send_string(0x01, "R")
logging.info("sent CTRL+R")
time.sleep(10)
def send_key_state(self):
"""sends a single frame of the current key state to the emulator server"""
bin_str = ""
element = self.interimstate[2]
for bit in element:
bin_str += str(bit)
self.send_keys(int(bin_str, 2), self.interimstate[4:10])
def send_key_down(self, modifier, scancode):
"""sends a key down event to the server"""
self.interimstate[2] = [modifier, 0, 0, 0, 0, 0, 0, 0]
self.interimstate[4] = scancode
self.send_key_state()
def send_key_up(self):
"""sends a key up event to the server"""
self.interimstate[2] = [0,0,0,0,0,0,0,0]
self.interimstate[4] = 0
self.send_key_state()
def send_string(self, modifier, string_to_send):
for c in string_to_send:
cu = c.upper()
if(cu in self.scancodes):
scantablekey = self.scancodes[cu]
else:
scantablekey = "KEY_"+c.upper()
logging.info(scantablekey)
scancode = keymap.keytable[scantablekey]
self.send_key_down(modifier, scancode)
time.sleep(0.01)
self.send_key_up()
time.sleep(0.01)
def send_keys(self, modifier_byte, keys):
logging.info("Get send_keys request through dbus")
logging.info("key msg: %s", keys)
state = [ 0xA1, 1, 0, 0, 0, 0, 0, 0, 0, 0 ]
state[2] = int(modifier_byte)
count = 4
for key_code in keys:
if(count < 10):
state[count] = int(key_code)
count += 1
self.device.send_string(state)
# main routine
if __name__ == "__main__":
try:
DBusGMainLoop(set_as_default=True)
myservice = BTKbService()
loop = GLib.MainLoop()
loop.run()
except KeyboardInterrupt:
sys.exit()
| 2.640625 | 3 |
ai2thor/util/visualize_3D_bbox.py | KuoHaoZeng/ai2thor-1 | 0 | 17230 | <reponame>KuoHaoZeng/ai2thor-1
import ai2thor.controller
import numpy as np
from PIL import Image, ImageDraw
def get_rotation_matrix(agent_rot):
#######
# Construct the rotation matrix. Ref: https://en.wikipedia.org/wiki/Rotation_matrix
#######
r_y = np.array([[np.cos(np.radians(agent_rot["y"])), 0, np.sin(np.radians(agent_rot["y"]))],
[0, 1, 0],
[-np.sin(np.radians(agent_rot["y"])), 0, np.cos(np.radians(agent_rot["y"]))]])
r_x = np.array([[1, 0, 0],
[0, np.cos(np.radians(agent_rot["x"])), -np.sin(np.radians(agent_rot["x"]))],
[0, np.sin(np.radians(agent_rot["x"])), np.cos(np.radians(agent_rot["x"]))]])
r = r_x @ r_y
return r
def project_to_agent_coordinate(pos, agent_pos, r):
#######
# Project a position from the world coordinate to the agent coordinate.
#######
pos_diff = pos - agent_pos
# since AI2THOR is left-handed coordinate system, we need to turn it to the right-handed to use the rotation matrix
pos_diff[2] *= -1
new_pos = r @ pos_diff
# turn back to the left-handed coordinate system
new_pos[2] *= -1
return new_pos
def project_to_2d(pos, half_fov, w, h):
#######
# Project a given 3D position to 2D space.
#######
pos_2d = [pos[0] / (pos[2] * np.tan(np.radians(half_fov))),
pos[1] / (pos[2] * np.tan(np.radians(half_fov)))]
# x-axis
x = int(w * ((pos_2d[0] + 1.0) / 2.0))
# y-axis
y = int(h * (1 - ((pos_2d[1] + 1.0) / 2.0)))
return [x, y]
def draw_3d_bbox(event):
#######
# Draw the 3D bbox in 2D RGB image by first construct the rotation matrix and get agent position by the agent pose,
# then filter out the objects which are not visible to the agent.
# Finally, project the 3D bbox to 2D space and draw it on the 2D RGB image and return the event dict with image.
#######
# get the 2D image width and height
w, h = event.metadata["screenWidth"], event.metadata["screenHeight"]
# get the FOV
half_fov = event.metadata["fov"] / 2
# get the camera rotation matrix
agent_rot = event.metadata["agent"]["rotation"]
agent_rot["x"] = event.metadata["agent"]["cameraHorizon"]
rotation_matrix = get_rotation_matrix(agent_rot)
# get the camera 3D position
agent_pos = np.array([event.metadata["cameraPosition"]["x"],
event.metadata["cameraPosition"]["y"],
event.metadata["cameraPosition"]["z"]])
# get the 2D RGB image and allocate a drawer
img = Image.fromarray(event.frame, "RGB")
draw = ImageDraw.Draw(img)
# iterate over all objects in the scene
# first classify if the object is in the view by rotated z position and instance segmentation
# then draw the 3D bbox in the 2D RGB image
for obj in event.metadata["objects"]:
# get object 3D position and rotate it to the agent coordinate
pos = np.array([obj["position"]["x"], obj["position"]["y"], obj["position"]["z"]])
new_pos = project_to_agent_coordinate(pos, agent_pos, rotation_matrix)
# classify is the object is in front of the agent
if new_pos[2] > 0:
# classify if the object is seen by the agent (not occluded by other objects)
if obj["objectId"] in event.instance_masks.keys():
# don't draw the floor and ceiling objects
if "Floor" in obj["objectId"] or "Ceiling" in obj["objectId"]:
if "Lamp" not in obj["objectId"]:
continue
# get the object color from the instance segmentation
color = event.object_id_to_color[obj["objectId"]]
# get the 3D bbox center and size
vertices, valid = [], []
if not isinstance(obj["objectOrientedBoundingBox"], type(None)):
# get the 3D bbox 8 vertices
corner_points = obj["objectOrientedBoundingBox"]["cornerPoints"]
# project vertices to 2D image coordinate
for point in corner_points:
new_point = project_to_agent_coordinate(point, agent_pos, rotation_matrix)
if new_point[2] > 0:
valid.append(True)
else:
valid.append(False)
new_point_2d = project_to_2d(new_point, half_fov, w, h)
vertices.append(new_point_2d)
# get the 3D bbox 12 lines
lines = [[vertices[0], vertices[1]],
[vertices[2], vertices[3]],
[vertices[0], vertices[3]],
[vertices[1], vertices[2]],
[vertices[4], vertices[5]],
[vertices[6], vertices[7]],
[vertices[4], vertices[7]],
[vertices[5], vertices[6]],
[vertices[2], vertices[6]],
[vertices[3], vertices[7]],
[vertices[1], vertices[5]],
[vertices[0], vertices[4]]]
valid_lines = [valid[0] * valid[1],
valid[2] * valid[3],
valid[0] * valid[3],
valid[1] * valid[2],
valid[4] * valid[5],
valid[6] * valid[7],
valid[4] * valid[7],
valid[5] * valid[6],
valid[2] * valid[6],
valid[3] * valid[7],
valid[1] * valid[5],
valid[0] * valid[4]]
else:
if "cornerPoints" in obj["axisAlignedBoundingBox"].keys():
# get the 3D bbox 8 vertices
corner_points = obj["axisAlignedBoundingBox"]["cornerPoints"]
else:
# get the 3D bbox 8 vertices from bbox center and size
center = np.array([obj["axisAlignedBoundingBox"]["center"]["x"],
obj["axisAlignedBoundingBox"]["center"]["y"],
obj["axisAlignedBoundingBox"]["center"]["z"]])
size = np.array([obj["axisAlignedBoundingBox"]["size"]["x"],
obj["axisAlignedBoundingBox"]["size"]["y"],
obj["axisAlignedBoundingBox"]["size"]["z"]])
corner_points = []
for i in range(2):
pos_x = np.array(center)
pos_x[0] = pos_x[0] - (size[0] / 2) + (i * size[0])
for j in range(2):
pos_y = np.array(pos_x)
pos_y[1] = pos_y[1] - (size[1] / 2) + (j * size[1])
for k in range(2):
pos_z = np.array(pos_y)
pos_z[2] = pos_z[2] - (size[2] / 2) + (k * size[2])
corner_points.append(pos_z)
# project vertices to 2D image coordinate
for point in corner_points:
new_point = project_to_agent_coordinate(point, agent_pos, rotation_matrix)
if new_point[2] > 0:
valid.append(True)
else:
valid.append(False)
new_point_2d = project_to_2d(new_point, half_fov, w, h)
vertices.append(new_point_2d)
# get the 3D bbox 12 lines
lines = [[vertices[0], vertices[1]],
[vertices[2], vertices[3]],
[vertices[0], vertices[2]],
[vertices[1], vertices[3]],
[vertices[4], vertices[5]],
[vertices[6], vertices[7]],
[vertices[4], vertices[6]],
[vertices[5], vertices[7]],
[vertices[2], vertices[6]],
[vertices[3], vertices[7]],
[vertices[1], vertices[5]],
[vertices[0], vertices[4]]]
valid_lines = [valid[0] * valid[1],
valid[2] * valid[3],
valid[0] * valid[2],
valid[1] * valid[3],
valid[4] * valid[5],
valid[6] * valid[7],
valid[4] * valid[6],
valid[5] * valid[7],
valid[2] * valid[6],
valid[3] * valid[7],
valid[1] * valid[5],
valid[0] * valid[4]]
lines = np.array(lines)
lines = np.reshape(lines, (-1, 4))
valid_lines = np.array(valid_lines)
valid_lines = np.reshape(valid_lines, (-1, 1))
# draw the 3D bbox 12 lines in the 2D RGB image
for iii, line in enumerate(lines):
if valid_lines[iii]:
draw.line((line[0], line[1], line[2], line[3]), fill=color, width=2)
# store the result back to the event
bbox_frame = np.array(img)
event.bbox_3d_frame = bbox_frame
return event
if __name__ == "__main__":
# give the height and width of the 2D image and scene id
w, h = 900, 900
scene = "FloorPlan2{:02d}_physics".format(1)
# allocate controller and initialize the scene and agent
# local_path = "src/ai2thor/unity/builds/thor-local-OSXIntel64.app/Contents/MacOS/AI2-Thor"
local_path = ""
controller = ai2thor.controller.Controller(local_path=local_path)
_ = controller.start(width=w, height=h)
_ = controller.reset(scene)
event = controller.step(dict(action='Initialize',
gridSize=0.25,
renderClassImage=True,
renderObjectImage=True,
renderDepthImage=True,
fieldOfView=90))
# do something then draw the 3D bbox in 2D image
event = controller.step(dict(action="MoveAhead"))
event = controller.step(dict(action="MoveAhead"))
event = controller.step(dict(action="Rotate", rotation=dict(x=0, y=30, z=0)))
event = draw_3d_bbox(event)
img = Image.fromarray(event.bbox_3d_frame, "RGB")
img.save("./output1.png")
event = controller.step(dict(action="LookDown"))
event = draw_3d_bbox(event)
img = Image.fromarray(event.bbox_3d_frame, "RGB")
img.save("./output2.png")
event = controller.step(dict(action="LookDown"))
event = draw_3d_bbox(event)
img = Image.fromarray(event.bbox_3d_frame, "RGB")
img.save("./output3.png")
| 3.46875 | 3 |
sa/profiles/ElectronR/KO01M/get_metrics.py | prorevizor/noc | 84 | 17231 | <filename>sa/profiles/ElectronR/KO01M/get_metrics.py
# ---------------------------------------------------------------------
# ElectronR.KO01M.get_metrics
# ---------------------------------------------------------------------
# Copyright (C) 2007-2020 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# NOC modules
from noc.sa.profiles.Generic.get_metrics import Script as GetMetricsScript, metrics
class Script(GetMetricsScript):
name = "ElectronR.KO01M.get_metrics"
@metrics(["Environment | Sensor Status"], volatile=False, access="S") # SNMP version
def get_sensor_status(self, metrics):
for metric in metrics:
value = 1
if metric.ifindex == 100:
continue
elif metric.ifindex == 140:
temp = self.snmp.get("1.3.6.1.4.1.35419.192.168.127.12", cached=True)
if -55 < temp < 600:
value = 0
elif metric.ifindex == 160:
impulse = self.snmp.get("1.3.6.1.4.1.35419.192.168.127.12", cached=True)
if impulse != 0:
value = 0
else:
res = self.snmp.get("1.3.6.1.4.1.35419.20.1.10%s.0" % metric.ifindex)
if res == 1:
value = 0
port = metric.labels[0].rsplit("::", 1)[-1]
self.set_metric(
id=("Environment | Sensor Status", metric.labels),
labels=[f"noc::sensor::{port}"],
value=value,
)
@metrics(["Environment | Temperature"], volatile=False, access="S") # SNMP version
def get_temperature(self, metrics):
for metric in metrics:
if metric.ifindex == 140:
value = self.snmp.get("1.3.6.1.4.1.35419.20.1.%s.0" % metric.ifindex, cached=True)
port = metric.labels[0].rsplit("::", 1)[-1]
self.set_metric(
id=("Environment | Temperature", metric.labels),
labels=[f"noc::module::{port}", f"noc::sensor::{port}"],
value=value,
multi=True,
)
@metrics(["Environment | Voltage"], volatile=False, access="S") # SNMP version
def get_voltage(self, metrics):
for metric in metrics:
value = self.snmp.get("1.3.6.1.4.1.35419.20.1.%s.0" % metric.ifindex)
port = metric.labels[0].rsplit("::", 1)[-1]
self.set_metric(
id=("Environment | Voltage", metric.labels),
labels=[f"noc::module::{port}", f"noc::sensor::{port}"],
value=value,
multi=True,
)
@metrics(["Environment | Pulse"], volatile=False, access="S") # SNMP version
def get_pulse(self, metrics):
for metric in metrics:
if metric.ifindex == 160:
value = self.snmp.get("1.3.6.1.4.1.35419.20.1.%s.0" % metric.ifindex, cached=True)
port = metric.labels[0].rsplit("::", 1)[-1]
self.set_metric(
id=("Environment | Pulse", metric.labels),
labels=[f"noc::sensor::{port}"],
value=value,
)
@metrics(["Environment | Power | Input | Status"], volatile=False, access="S") # SNMP version
def get_power_input_status(self, metrics):
for metric in metrics:
value = self.snmp.get("1.3.6.1.4.1.35419.20.1.10%s.0" % metric.ifindex, cached=True)
port = metric.labels[0].rsplit("::", 1)[-1]
self.set_metric(
id=("Environment | Power | Input | Status", metric.labels),
labels=[f"noc::sensor::{port}"],
value=0 if value == 1 else 1,
)
| 2 | 2 |
clustering.py | t20100/ccCluster | 0 | 17232 | <reponame>t20100/ccCluster
from __future__ import print_function
__author__ = "<NAME>"
__copyright__ = "Copyright 20150-2019"
__credits__ = ["<NAME>, <NAME>"]
__license__ = ""
__version__ = "1.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Beta"
from scipy.cluster import hierarchy
import scipy
import matplotlib.pyplot as plt
import os
import numpy as np
import subprocess
import collections
import operator
import stat
import json
import random
class Clustering():
"""
parse cc_calc output and perform HCA
at each call, it generates the distance matrix
You get the dendrogram through Clustering.tree()
"""
def __init__(self, ccCalcOutput):
self.ccFile= ccCalcOutput
self.CurrentDir = os.getcwd()
self.ccTable, self.Dimension = self.parseCCFile()
self.createLabels()
self.previousProcess()
def previousProcess(self):
"""
Lists all the clusters which have already been processed from a log file.
Updates the global variable alreadyDone
"""
self.alreadyDone= []
if os.path.isfile(os.getcwd()+'/.cc_cluster.log'):
with open(os.getcwd()+'/.cc_cluster.log') as log:
for line in log:
L = line.split(',')
self.alreadyDone.append([L[1], L[2].strip(), L[3].strip()])
def parseCCFile(self):
"""
Gets data from ccCalc ouput file and populates a numpy array with the distances
"""
with open(self.ccFile, 'r') as f:
dataArr = None
data=[]
Index = []
for line in f:
if line.strip() == 'Correlation coefficients':
break
for line in f:
dataline= line.rstrip().split()
data.append(dataline)
Index.append(int(dataline[0])+1)
Index.append(int(dataline[1])+1)
Dimension=max(Index)
dataArr = np.array(data,dtype=(float))
return dataArr, Dimension
def createLabels(self):
"""
Gets the labels from the ccCalc output with the input file names
"""
self.labelList= []
with open(self.ccFile) as f:
for line in f:
if line.strip() == 'Labels':
break
for line in f:
if line.strip() == 'Correlation coefficients':
break
goodLine = line.split()
self.labelList.append("%s"%(goodLine[2].strip('\n')))
return self.labelList
def inputType(self):
"""
return input file type. Either mtz or HLK
"""
element = self.labelList[0]
extension = element.split('.')[-1]
print(extension)
return extension
def tree(self):
"""
Returns the HCA dendrogrm, using the complete linkage method
"""
data = self.ccTable
Matrix=np.zeros((self.Dimension,self.Dimension))
reducedArray=[]
for line in data:
#print line
if line is not None and len(line) is not 0:
Matrix[int(line[0]),int(line[1])]= line[2]
Matrix[int(line[1]),int(line[0])]= line[2]
for x in range(0,self.Dimension):
for y in range(x+1,self.Dimension):
reducedArray.append(Matrix[x,y])
Distances = np.array(reducedArray, dtype=(float))
self.Tree =hierarchy.linkage(Distances, 'complete')
return self.Tree
def avgTree(self):
"""
Returns the HCA dendrogrm, using the average linkage method
"""
data = self.ccTable
Matrix=np.zeros((self.Dimension,self.Dimension))
reducedArray=[]
for line in data:
#print line
if line is not None and len(line) is not 0:
Matrix[int(line[0]),int(line[1])]= line[2]
Matrix[int(line[1]),int(line[0])]= line[2]
for x in range(0,self.Dimension):
for y in range(x+1,self.Dimension):
reducedArray.append(Matrix[x,y])
Distances = np.array(reducedArray, dtype=(float))
self.Tree =hierarchy.linkage(Distances, 'average')
return self.Tree
def flatClusterPrinter(self, thr, labelsList, anomFlag):
"""
Prints the flat cluster at a chosen threshold to a .json file
"""
FlatC=hierarchy.fcluster(self.Tree, thr, criterion='distance')
counter=collections.Counter(FlatC)
clusterToJson={}
clusterToJson['HKL']=[]
Best = max(counter.items(), key=operator.itemgetter(1))[0]
clusterFile = open(self.CurrentDir+'/cc_Cluster_%.2f_%s_%s/flatCluster.json'%(float(thr),Best, anomFlag), 'w')
for cluster, hkl in zip(FlatC, labelsList):
clusterToJson['HKL'].append({
'input_file':hkl,
'cluster':str(cluster)
})
print(clusterToJson)
j = json.dumps(clusterToJson, indent=4)
print(j, file=clusterFile)
def thrEstimation(self):
"""
Estimates the threshold for optimal clustering, based on the multiplicity of the biggest cluster
"""
x = 0.00
dx = 0.05
countsList = []
x_list = []
while x < 1:
FlatC = hierarchy.fcluster(self.Tree, x, criterion='distance')
counter=collections.Counter(FlatC)
Best = max(counter.items(), key=operator.itemgetter(1))[0]
countsList.append(counter[Best])
x+= dx
x_list.append(x)
dy = np.diff(countsList)
for a, b in zip (x_list, dy):
if b == max(dy):
return a
def checkMultiplicity(self, thr):
"""
Prints the multiplicity of the biggest cluster at a given threshold
"""
FlatC = hierarchy.fcluster(self.Tree, thr, criterion='distance')
counter=collections.Counter(FlatC)
Best = max(counter.items(), key=operator.itemgetter(1))[0]
print('You are clustering with a threshold of %s'%(thr))
print('The biggest cluster contains %s datasets from a total of %s'%(counter[Best], len(self.labelList)))
def completenessEstimation(self):
x = 0.00
dx = 0.05
while x > 1:
FlatC = hierarchy.fcluster(self.Tree, x, criterion='distance')
counter=collections.Counter(FlatC)
Best = max(counter.items(), key=operator.itemgetter(1))[0]
# the list self.ToProcess is needed by the scaling routines
# fix all this new mess!
def whatToProcess(self):
FlatC = hierarchy.fcluster(self.Tree, thr, criterion='distance')
counter=collections.Counter(FlatC)
Best = max(counter.items(), key=operator.itemgetter(1))[0]
Process = True
#change checkboxes to standard variables
if Process:
self.ToProcess = [Best]
else:
self.ToProcess = set(Clusters)
for key in self.ToProcess:
if counter[key]==1:
self.ToProcess = [x for x in self.ToProcess if x != key]
return self.ToProcess
#Run XSCALE to merge the biggest cluster
#input files
#!!!! Will need to define the processes to run externally
#renaming function! Edit the calls in ccCluster accordingly
def prepareXSCALE(self, anomFlag, thr):
FlatC = hierarchy.fcluster(self.Tree, thr, criterion='distance')
counter=collections.Counter(FlatC)
Best = max(counter.items(), key=operator.itemgetter(1))[0]
Process = True
#change checkboxes to standard variables
if Process:
self.ToProcess = [Best]
else:
self.ToProcess = set(Clusters)
for key in self.ToProcess:
if counter[key]==1:
self.ToProcess = [x for x in self.ToProcess if x != key]
for x in self.ToProcess:
if [thr,x, anomFlag] not in self.alreadyDone:
os.mkdir(self.CurrentDir+'/cc_Cluster_%.2f_%s_%s'%(float(thr),x, anomFlag))
Xscale=open(self.CurrentDir+'/cc_Cluster_%.2f_%s_%s/XSCALE.INP'%(float(thr),x, anomFlag), 'a')
Pointless=open(self.CurrentDir+'/cc_Cluster_%.2f_%s_%s/launch_pointless.sh'%(float(thr),x,anomFlag ), 'a')
print('OUTPUT_FILE=scaled.hkl',file=Xscale)
print('MERGE= TRUE', file=Xscale)
print('pointless hklout clustered.mtz << eof', file=Pointless)
if anomFlag=='ano':
print('FRIEDEL\'S_LAW= FALSE', file=Xscale)
elif anomFlag=='no_ano':
print('FRIEDEL\'S_LAW= TRUE', file=Xscale)
Xscale.close()
Pointless.close()
for cluster, filename in zip(FlatC,self.labelList):
if cluster in self.ToProcess:
OUT = open(self.CurrentDir+'/cc_Cluster_%.2f_%s_%s/XSCALE.INP'%(float(thr),cluster,anomFlag), 'a')
Pointless=open(self.CurrentDir+'/cc_Cluster_%.2f_%s_%s/launch_pointless.sh'%(float(thr),cluster,anomFlag), 'a')
print ('INPUT_FILE= ../%s'%(filename), file=OUT)
#print ('INCLUDE_RESOLUTION_RANGE=20, 1.8', file=OUT)
print ('MINIMUM_I/SIGMA= 0', file=OUT)
print ('XDSIN ../%s'%(filename), file= Pointless)
OUT.close()
Pointless.close()
def preparePointless(self, anomFlag, thr):
FlatC = hierarchy.fcluster(self.Tree, thr, criterion='distance')
counter=collections.Counter(FlatC)
Best = max(counter.items(), key=operator.itemgetter(1))[0]
Process = True
#change checkboxes to standard variables
if Process:
self.ToProcess = [Best]
else:
self.ToProcess = set(Clusters)
for key in self.ToProcess:
if counter[key]==1:
self.ToProcess = [x for x in self.ToProcess if x != key]
for x in self.ToProcess:
if [thr,x, anomFlag] not in self.alreadyDone:
os.mkdir(self.CurrentDir+'/cc_Cluster_%.2f_%s_%s'%(float(thr),x, anomFlag))
Pointless=open(self.CurrentDir+'/cc_Cluster_%.2f_%s_%s/launch_pointless.sh'%(float(thr),x,anomFlag ), 'a')
print('pointless hklout clustered.mtz << eof', file=Pointless)
print('XMLOUT pointlessLog.xml', file=Pointless)
Pointless.close()
for cluster, filename in zip(FlatC,self.labelList):
if cluster in self.ToProcess:
Pointless=open(self.CurrentDir+'/cc_Cluster_%.2f_%s_%s/launch_pointless.sh'%(float(thr),cluster,anomFlag), 'a')
print ('HKLIN ../%s'%(filename), file= Pointless)
Pointless.close()
#Run XSCALE in the pre-determined folders.
def scaleAndMerge(self, anomFlag, thr):
newProcesses=[]
for x in self.ToProcess:
if [thr,x, anomFlag] not in self.alreadyDone:
#self.createDendrogram(thr)
#plt.savefig(self.CurrentDir+'/cc_Cluster_%.2f_%s_%s/Dendrogram.png'%(float(thr),x,anomFlag))
P= subprocess.Popen('xscale_par',cwd=self.CurrentDir+'/cc_Cluster_%.2f_%s_%s/'%(float(thr), x, anomFlag))
P.wait()
# print('Cluster, %s , %s , %s'%(float(thr),x, anomFlag), file=Log)
newProcesses.append([thr,x, anomFlag])
#run Pointless in each folder from the processing List
def pointlessRun(self, anomFlag, thr):
newProcesses=[]
for x in self.ToProcess:
if [thr,x, anomFlag] not in self.alreadyDone:
Pointless=open(self.CurrentDir+'/cc_Cluster_%.2f_%s_%s/launch_pointless.sh'%(float(thr),x,anomFlag), 'a')
print('COPY \n bg\n TOLERANCE 4 \n eof', file= Pointless)
Pointless.close()
st = os.stat(self.CurrentDir+'/cc_Cluster_%.2f_%s_%s/launch_pointless.sh'%(float(thr),x,anomFlag ))
os.chmod(self.CurrentDir+'/cc_Cluster_%.2f_%s_%s/launch_pointless.sh'%(float(thr),x,anomFlag ), st.st_mode | 0o111)
P = subprocess.Popen(self.CurrentDir+'/cc_Cluster_%.2f_%s_%s/launch_pointless.sh > pointless.log')
P.wait()
#run aimless on the output from pointless
#will run in folders with clustered.mtz file available.
#TBD: fix directories paths into the aimless.inp file
#also set all the proper input values into the function call
#path to aimless executable to be verified.
def aimlessRun(self, anomFlag, thr):
for x in self.toProcess:
if [thr,x, anomFlag] not in self.alreadyDone:
f1= open("aimless.inp", 'w')
runScript='''#!/bin/bash
source /opt/pxsoft/ccp4/vdefault/linux-x86_64/ccp4-7.0/setup-scripts/ccp4.setup-sh
aimless HKLIN {infile} << EOF
HKLOUT {setname}_aimless.mtz'
RESOLUTION LOW {resLow} HIGH {resHigh}
OUTPUT MERGED
anomalous {anomflag}
EOF
#truncate: generate Fs
truncate hklin {setname}_aimless.mtz hklout {setname}_tr.mtz <<EOF-trunc
truncate yes
EOF-trunc
#unique: generate unique reflection set for rfree
unique HKLOUT x_unq.mtz << EOF
CELL {cell}
SYMMETRY '{SpaceGroup}'
LABOUT F=FUNI SIGF=SIGFUNI
RESOLUTION {resHigh}
EOF
#freerflag: generate free reflections
freerflag HKLIN x_unq.mtz HKLOUT x_FreeR_unq.mtz <<EOF
FREERFRAC 0.05
END
EOF
#cad: combine free reflections with data
cad HKLIN1 x_FreeR_unq.mtz HKLIN2 {setname}_tr.mtz HKLOUT {setname}_cad.mtz<<EOF
LABI FILE 1 E1=FreeR_flag
LABI FILE 2 ALLIN
END
EOF
freerflag HKLIN {setname}_cad.mtz HKLOUT {setname}_scaled.mtz <<EOF
COMPLETE FREE=FreeR_flag
END
EOF
'''.format(infile = 'clustered.mtz', setname = 'clustered', resHigh = '1.0', resLow = '60', anomflag = 'ON', cell = cell, SpaceGroup = SpaceGroup)
f1.write(runScript)
f1.close()
os.chmod(CurrentDir + '/aimless.inp', st.st_mode | 0o111)
subprocess.call('./aimless.inp > aimless.log', cwd=CurrentDir, shell=True)
# A function to investigate the influence of reference file in merging results
def shuffleXscale(self, anomFlag, thr):
FlatC = hierarchy.fcluster(self.Tree, thr, criterion='distance')
Log = open(self.CurrentDir+'/.cc_cluster.log', 'a')
counter=collections.Counter(FlatC)
Best = max(counter.items(), key=operator.itemgetter(1))[0]
print(Best)
Process = True
xscaleInputFiles=[]
#change checkboxes to standard variables
if Process:
self.ToProcess = [Best]
else:
self.ToProcess = set(Clusters)
for key in self.ToProcess:
if counter[key]==1:
self.ToProcess = [x for x in self.ToProcess if x != key]
#Prepare list of filenames to shuffle over
for cluster, filename in zip(FlatC, self.labelList):
if cluster in self.ToProcess:
xscaleInputFiles.append(filename)
print(xscaleInputFiles)
#run XSCALE with random ordered files 20 times
for x in range(0,20):
os.mkdir(self.CurrentDir+'/thr_%.2f_run_%s'%(float(thr),x))
Xscale=open(self.CurrentDir+'/thr_%.2f_run_%s/XSCALE.INP'%(float(thr),x), 'a')
print('OUTPUT_FILE=scaled.hkl',file=Xscale)
print('MERGE= TRUE', file=Xscale)
print('FRIEDEL\'S_LAW=TRUE', file=Xscale )
random.shuffle(xscaleInputFiles)
for hkl in xscaleInputFiles:
print ('INPUT_FILE= ../%s'%(hkl), file=Xscale)
print ('MINIMUM_I/SIGMA= 0', file=Xscale)
P= subprocess.Popen('xscale_par',cwd=self.CurrentDir+'/thr_%.2f_run_%s'%(float(thr),x))
P.wait()
def main():
from optparse import OptionParser
parser = OptionParser(usage="%prog --XSCALEfile=<LP filename> --outname=<output dendogram>")
parser.add_option("-o","--outname", dest="outname", default='Dendrogram', help="output dendogram file name")
parser.add_option("-t", "--threshold", dest="threshold", default='0.4', help="Distance threshold for clustering")
parser.add_option("-c", "--count",action="store_true", dest="count", default=False, help="Counts datasets in the biggest cluster and exit")
(options, args) = parser.parse_args()
thr = float(options.threshold)
CC = Clustering('Cluster_log.txt')
link = CC.tree()
if options.count:
CC.checkMultiplicity(thr)
print(CC.thrEstimation())
else:
CC.checkMultiplicity(thr)
CC.merge('ano', thr)
if __name__== '__main__':
main()
| 2.421875 | 2 |
wrappaconda.py | nckz/wrappaconda | 0 | 17233 | #!/usr/bin/env python
# Author: <NAME>
# Date: 2015oct31
from __future__ import print_function
import os
import sys
import stat
import errno
import shutil
import optparse
import traceback
import subprocess
wrappaconda_name_string = 'Wr[App]-A-Conda'
class AppAtizer(object):
def __init__(self):
# tmp paths
self._downloads_prefix = os.path.expanduser('~/Downloads')
if not os.path.isdir(self._downloads_prefix):
self._downloads_prefix = './' # use cwd
# try for wget or curl
self._get = self._getDownloaderCommand()
# cli input
self._parseUserInput()
# .app paths
self._apppath = '/Applications/'+self._name+'.app'
self._contents_prefix = self._apppath + "/Contents"
self._resource_prefix = self._contents_prefix + "/Resources"
self._info_plist_path = self._contents_prefix + "/Info.plist"
self._pkg_info_path = self._contents_prefix + "/PkgInfo"
self._macos_prefix = self._contents_prefix + "/MacOS"
self._cfbundle_icon_filename = 'app.icns'
# Wr[App]-A-Conda paths
self._id_file_path = self._resource_prefix + "/wrappaconda"
# miniconda paths
self._miniconda_prefix = self._resource_prefix + "/miniconda"
self._python_path = self._miniconda_prefix + "/bin/python"
self._conda_path = self._miniconda_prefix + "/bin/conda"
def _parseUserInput(self):
# get user input
parser = optparse.OptionParser()
parser.add_option("-n", "--name", dest='name', help="[REQUIRED] The name of this app.")
parser.add_option("-t", "--target", dest='target', help="[REQUIRED] The binary or script found in Anaconda\'s $PREFIX/bin.")
parser.add_option("-v", "--version", dest='version', help="The version of this app.", default='0.1')
parser.add_option("-i", "--icon", dest='icon_file', help="Icon file to be used in the bundle.")
parser.add_option("-c", "--channel", dest='channel', help="The Anaconda.org package channel(s), or url(s) separated by commas (e.g. nckz,https://conda.anaconda.org/gpi/channel/rc) (defaults to \'defaults\')", default='defaults')
parser.add_option("-p", "--package", dest='package', help="The package name(s) separated by commas (e.g. scipy=0.15.0,curl=7.26.0,pip).")
parser.add_option("-r", "--rootenv", dest='rootenv', help="A root environment file (created using: \'conda list --export\').")
parser.add_option("--py", dest='py_ver', help="Choose the distro python version using the major and minor version numbers (defaults to 3.5).", default='3.5')
parser.add_option("-o", "--overwrite", action="store_true", dest='overwrite', help="Overwrite an existing app with the same \'name\'. Use caution!!!")
options, args = parser.parse_args()
try:
# check for input errors
assert options.name is not None
assert options.target is not None
if options.icon_file is not None:
assert os.path.isfile(options.icon_file)
assert options.icon_file.endswith(".icns")
if options.rootenv is not None:
assert os.path.isfile(options.rootenv)
except:
parser.print_help()
raise
self._name = options.name
self._version = options.version
self._target = options.target
self._icon_file = options.icon_file
self._channel = options.channel
self._package = options.package
self._root_env = options.rootenv
self._py_ver = options.py_ver
self._overwrite = options.overwrite
def _getDownloaderCommand(self):
# check for installed utilities
try:
subprocess.check_output('command -v wget >/dev/null 2>&1;', shell=True)
return 'wget --directory-prefix ' + self._downloads_prefix + ' -c {}'
except:
try:
subprocess.check_output('command -v curl >/dev/null 2>&1;', shell=True)
return 'cd '+self._downloads_prefix+' && curl --fail -O -C - {} '
except:
print("This script requires \'wget\' or \'curl\' and neither were found.")
raise
def appPath(self):
return self._apppath
def deleteExistingApp(self):
if os.path.exists(self._apppath):
if self._overwrite:
print("Removing existing path: "+self._apppath)
try:
with open(self._id_file_path, 'r') as f:
assert f.read().count(wrappaconda_name_string) > 0
shutil.rmtree(self._apppath)
except:
print("The app \'"+self._apppath+"\' cannot be verified for deletion. You may have to remove it manually. Skipping...")
else:
print("The app \'"+self._apppath+"\' already exists, exiting...")
def buildAppSkeleton(self):
# build the .app directory and supporting files
try:
os.mkdir(self._apppath)
os.mkdir(self._contents_prefix)
os.mkdir(self._macos_prefix)
os.mkdir(self._resource_prefix)
except OSError as e:
if e.errno == errno.EPERM:
print("You must have root permissions to write to /Applications.")
raise
def copyIconFile(self):
if self._icon_file is not None:
shutil.copy(self._icon_file, self._resource_prefix + '/' + self._cfbundle_icon_filename)
def writeInfoPList(self):
# http://stackoverflow.com/questions/7404792/how-to-create-mac-application-bundle-for-python-script-via-python
CFBundleName = self._name
CFBundleVersion = self._version
CFBundleIconFile = self._cfbundle_icon_filename
CFBundleGetInfoString = CFBundleName + " " + CFBundleVersion
CFBundleShortVersionString = CFBundleGetInfoString
CFBundleIdentifier = "com.gpilab."+CFBundleName
CFBundleExecutable = self._target
info_plist = """<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>CFBundleDevelopmentRegion</key>
<string>English</string>
<key>CFBundleExecutable</key>
<string>%s</string>
<key>CFBundleGetInfoString</key>
<string>%s</string>
<key>CFBundleIconFile</key>
<string>%s</string>
<key>CFBundleIdentifier</key>
<string>%s</string>
<key>CFBundleInfoDictionaryVersion</key>
<string>6.0</string>
<key>CFBundleName</key>
<string>%s</string>
<key>CFBundlePackageType</key>
<string>APPL</string>
<key>CFBundleShortVersionString</key>
<string>%s</string>
<key>CFBundleSignature</key>
<string>????</string>
<key>CFBundleVersion</key>
<string>%s</string>
<key>NSAppleScriptEnabled</key>
<string>YES</string>
<key>NSMainNibFile</key>
<string>MainMenu</string>
<key>NSPrincipalClass</key>
<string>NSApplication</string>
</dict>
</plist>
"""
with open(self._info_plist_path, "w") as f:
f.write(info_plist % (CFBundleExecutable, CFBundleGetInfoString, CFBundleIconFile, CFBundleIdentifier, CFBundleName, CFBundleShortVersionString, CFBundleVersion))
def writePkgInfo(self):
with open(self._pkg_info_path, "w") as f:
f.write("APPL????")
def writeWrappacondaIDFile(self):
with open(self._id_file_path, "w") as f:
f.write("This app was generated by " + wrappaconda_name_string)
def setupMiniconda(self):
# anaconda website and miniconda package info
# -python 3 is the default miniconda
MINICONDA_NAME='Miniconda3'
if float(self._py_ver) < 3:
MINICONDA_NAME='Miniconda'
MINICONDA_WEB='https://repo.continuum.io/miniconda/'
MINICONDA_OSX=MINICONDA_NAME+'-latest-MacOSX-x86_64.sh'
# download miniconda
try:
cmd = self._get.format(MINICONDA_WEB+MINICONDA_OSX)
print(cmd)
subprocess.check_output(cmd, shell=True)
except:
print("Failed to download miniconda.")
# install miniconda
try:
os.chmod(self._downloads_prefix+'/'+MINICONDA_OSX, 0o777)
cmd = self._downloads_prefix+'/'+MINICONDA_OSX+' -b -p '+self._miniconda_prefix
print(cmd)
subprocess.check_output(cmd, shell=True)
except:
print("Failed to run miniconda.")
# install central conda package
if self._package:
try:
python = ' python=='+self._py_ver+' '
conda_cmd = self._conda_path+' install -y -c '+' -c '.join(self._channel.split(','))+' '+' '.join(self._package.split(',')) + python
if self._root_env:
conda_cmd += ' --file '+self._root_env
print(conda_cmd)
subprocess.check_output(conda_cmd, shell=True)
subprocess.check_output(self._conda_path+' clean -t -i -p -l -y', shell=True)
except:
print("Failed to run conda.")
raise
def linkTarget(self):
# check for the existence of the target
try:
assert os.path.isfile(self._miniconda_prefix + '/bin/' + self._target)
os.link(self._miniconda_prefix + '/bin/' + self._target, self._macos_prefix + '/' + self._target)
except:
print(self._target, ' doesn\'t exist in Miniconda bin.')
raise
def main():
make = AppAtizer()
make.deleteExistingApp()
make.buildAppSkeleton()
make.writeWrappacondaIDFile()
make.copyIconFile()
make.setupMiniconda()
make.linkTarget()
make.writeInfoPList()
make.writePkgInfo()
print(make.appPath() + " has been created.")
if __name__ == '__main__':
main()
| 2.375 | 2 |
watchtower/wallet/wallet.py | paytaca/watchtower-py | 0 | 17234 | import requests
class Wallet(object):
def __init__(self, testnet=False):
if testnet:
self.base_url = 'https://testnet.watchtower.cash/api/'
else:
self.base_url = 'https://watchtower.cash/api/'
def _get_utxos(self, wallet_hash, amount):
url = self.base_url + f'utxo/wallet/{wallet_hash}'
resp = requests.get(url)
print(resp.status_code)
print(resp.json())
def send(self, amount):
self._get_utxos('abcd0123456', amount)
print(f"Sending {amount} BCH...")
| 2.796875 | 3 |
test/unit/test_finalize.py | phated/binaryen | 5,871 | 17235 | <reponame>phated/binaryen<filename>test/unit/test_finalize.py
from scripts.test import shared
from . import utils
class EmscriptenFinalizeTest(utils.BinaryenTestCase):
def do_output_test(self, args):
# without any output file specified, don't error, don't write the wasm,
# but do emit metadata
p = shared.run_process(shared.WASM_EMSCRIPTEN_FINALIZE + [
self.input_path('empty_lld.wat'), '--global-base=1024'
] + args, capture_output=True)
# metadata is always present
self.assertIn('{', p.stdout)
self.assertIn('}', p.stdout)
return p.stdout
def test_no_output(self):
stdout = self.do_output_test([])
# module is not present
self.assertNotIn('(module', stdout)
def test_text_output(self):
stdout = self.do_output_test(['-S'])
# module is present
self.assertIn('(module', stdout)
| 2.125 | 2 |
mc/tools/TreeWidget.py | zy-sunshine/falkon-pyqt5 | 1 | 17236 | from PyQt5.QtWidgets import QTreeWidget
from PyQt5.Qt import pyqtSignal
from PyQt5.QtWidgets import QTreeWidgetItem
from PyQt5.Qt import Qt
class TreeWidget(QTreeWidget):
# enum ItemShowMode
ItemsCollapsed = 0
ItemsExpanded = 1
def __init__(self, parent=None):
super().__init__(parent)
self._refreshAllItemsNeeded = True
self._allTreeItems = [] # QList<QTreeWidgetItem>
self._showMode = self.itemCollapsed # ItemShowMode
self.itemChanged.connect(self._scheduleRefresh)
def defaultItemShowMode(self):
'''
@return: ItemShowMode
'''
return self._showMode
def setDefaultItemShowMode(self, mode):
'''
@param: item ItemShowMode
'''
self._showMode = mode
def allItems(self):
'''
@return: QList<QTreeWidgetItem>
'''
if self._refreshAllItemsNeeded:
self._allTreeItems.clear()
self.iterateAllItems(None)
self._refreshAllItemsNeeded = False
return self._allTreeItems
def appendToParentItemByText(self, parentText, item):
'''
@param: parentText QString
@param: item QTreeWidgetItem
'''
list_ = self.findItems(parentText, Qt.MatchExactly)
if len(list_) == 0:
return False
# QTreeWidgetItem
parentItem = list_[0]
if not parentItem:
return False
self._allTreeItems.append(item)
parentItem.addChild(item)
return True
def appendToParentItemByItem(self, parent, item):
if not parent or parent.treeWidget() != self:
return False
self._allTreeItems.append(item)
parent.appendChild(item)
return True
def prependToParentItemByText(self, parentText, item):
list_ = self.findItems(parentText, Qt.MatchExactly)
if len(list_) == 0:
return False
# QTreeWidgetItem
parentItem = list_[0]
if not parentItem:
return False
self._allTreeItems.append(item)
parentItem.insertChild(0, item)
return True
def prependToParentItemByItem(self, parent, item):
if not parent or parent.treeWidget() != self:
return False
self._allTreeItems.append(item)
parent.insertChild(0, item)
return True
def addTopLevelItem(self, item):
'''
@param: item QTreeWidgetItem
'''
self._allTreeItems.append(item)
super().addTopLevelItem(item)
def addTopLevelItems(self, items):
'''
@param: items QList<QTreeWidgetItem>
'''
self._allTreeItems.extend(items)
super().addTopLevelItems(items)
def insertTopLevelItem(self, index, item):
'''
@param: index int
@param: item QTreeWidgetItem
'''
self._allTreeItems.append(item)
super().insertTopLevelItem(index, item)
def insertTopLevelItems(self, index, items):
'''
@param: index int
@param: items QList<QTreeWidgetItem>
'''
self._allTreeItems.extend(items)
super().insertTopLevelItems(index, items)
def deleteItem(self, item):
'''
@param: item QTreeWidgetItem
'''
if item in self._allTreeItems:
self._allTreeItems.remove(item)
self._refreshAllItemsNeeded = True
def deleteItems(self, items):
'''
@param: items QList<QTreeWidgetItem>
'''
for item in items:
if item in self._allTreeItems:
self._allTreeItems.remove(item)
self._refreshAllItemsNeeded = True
# Q_SIGNALS:
itemControlClicked = pyqtSignal(QTreeWidgetItem) # item
itemMiddleButtonClicked = pyqtSignal(QTreeWidgetItem) # item
# public Q_SLOTS:
def filterString(self, string):
# QList<QTreeWidgetItem>
_allItems = self.allItems()
# QList<QTreeWidgetItem>
parents = []
stringIsEmpty = not string
strLower = string.lower()
for item in _allItems:
if stringIsEmpty:
containsString = True
else:
text = item.text(0).lower()
containsString = strLower in text
if containsString:
item.setHidden(False)
itemParent = item.parent()
if itemParent and itemParent not in parents:
parents.append(itemParent)
else:
item.setHidden(True)
itemParent = item.parent()
if itemParent:
itemParent.setHidden(True)
for parentItem in parents:
parentItem.setHidden(False)
if stringIsEmpty:
parentItem.setExpanded(self._showMode == self.itemExpanded)
else:
parentItem.setExpanded(True)
parentOfParentItem = parentItem.parent()
if parentOfParentItem and parentOfParentItem not in parents:
parents.append(parentOfParentItem)
def clear(self):
super().clear()
self._allTreeItems.clear()
# private Q_SLOTS:
def _scheduleRefresh(self):
self._refreshAllItemsNeeded = True
# private:
def mousePressEvent(self, event):
'''
@param: event QMouseEvent
'''
if event.modifiers() == Qt.ControlModifier:
self.itemControlClicked.emit(self.itemAt(event.pos()))
if event.buttons() == Qt.MiddleButton:
self.itemMiddleButtonClicked.emit(self.itemAt(event.pos()))
super().mousePressEvent(event)
def iterateAllItems(self, parent):
'''
@param: parent QTreeWidgetItem
'''
if parent:
count = parent.childCount()
else:
count = self.topLevelItemCount()
for idx in range(count):
if parent:
item = parent.child(idx)
else:
item = self.topLevelItem(idx)
if item.childCount() == 0:
self._allTreeItems.append(item)
self.iterateAllItems(item)
| 2.75 | 3 |
app/api/v2/models/sales.py | danuluma/dannstore | 0 | 17237 | <filename>app/api/v2/models/sales.py
import os
import sys
LOCALPATH = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, LOCALPATH + '/../../../../')
from app.api.v2.db import Db
def format_sale(sale):
"""Formats the results to a dictionary"""
sale = {
"id": sale[0],
"books": sale[1],
"total": sale[2],
"created_by": sale[3],
"attendant_name": sale[5],
"created_at": str(sale[4])
}
return sale
class SalesModel(Db):
"""Sales Model. Sales Records stuff here"""
def get_all_sales(self):
"""Gets all sales records from the db"""
records = []
for sale in Db().get_query('sales'):
details = format_sale(sale)
records.append(details)
return records
def get_single_sale(self, param, this_col):
"""Gets a single sale record"""
records = [row for row in Db().get_query(
'sales') if row[this_col] == param]
if records:
sale = records[0]
return format_sale(sale)
def add_new_record(self, new_sale):
"""Adds a new sale record to the db"""
try:
Db().db_query(f"""
INSERT INTO sales (book_id, total, created_by, attendant)
VALUES (ARRAY{new_sale[0]}, {new_sale[1]}, {new_sale[2]}, '{new_sale[3]}');
""")
except:
return "Failed to add", 500
| 3.234375 | 3 |
ubirch/linux/bleManager.py | ubirch/ubirch-ble-tool | 4 | 17238 | <filename>ubirch/linux/bleManager.py
from bleSuite import bleConnectionManager, bleServiceManager
from bluepy.btle import Scanner
from ubirch.linux.bleServiceManager import BLEServiceManager
class BLEManager(object):
""" BLE network manager """
def __init__(self, address, adapter, addressType, securityLevel, createRequester, psm=0, mtu=0):
""" Create an instance of BLE Manager """
self.address = address
self.cm = bleConnectionManager.BLEConnectionManager(address, adapter, addressType, securityLevel)
self.sm = BLEServiceManager(self.cm, self.address)
def connectDevice(self):
""" conect tot he BLE device
take manager object or mac address as a input variable
"""
self.cm.connect()
def disconnectDevice(self):
self.cm.disconnect()
def discoverDevice(self, name, timeout=5):
sm = BLEScanDevices()
deviceList = sm.scan(timeout)
for device in deviceList:
for values in device.getScanData():
if values[2].rstrip('\x00') == name:
return str(device.addr)
raise Exception("NO DEVICE FOUND")
def discoverServices(self):
return bleServiceManager.bleServiceDiscovery(self.address, self.cm)
def discoverCharacteristics(self):
ignoreUUID = ["00001800-0000-1000-8000-00805f9b34fb", "00001801-0000-1000-8000-00805f9b34fb"]
devServices = self.discoverServices()
devCharList = []
for service in devServices.services:
if not (service.uuid in ignoreUUID):
for characteristics in service.characteristics:
devCharList.append(characteristics.uuid)
return devCharList
raise Exception("No Services Found")
def write(self, handle, data):
bleServiceManager.bleServiceWriteToHandle(self.cm, handle, data)
def read(self, handle):
# TODO add a function getHandlebyUUID toget handle using uuid
# helps to read data on both mac n linux with ease
return bleServiceManager.bleServiceReadByHandle(self.cm, handle)
def isConnected(self):
return self.cm.isConnected()
# Services and Characteristics
def bleServiceWriteToHandle(self, handle, data):
return bleServiceManager.bleServiceWriteToHandle(self.cm, handle, data)
def bleServiceReadByHandle(self, handle):
return bleServiceManager.bleServiceReadByHandle(self.cm, handle)
def bleServiceReadByUUID(self, uuid):
return bleServiceManager.bleServiceReadByUUID(self.cm, uuid)
def bleDiscoverServices(self):
return bleServiceManager.bleServiceDiscovery(self.address, self.cm)
def showServices(self):
bledevice = bleServiceManager.bleServiceDiscovery(self.address, self.cm)
bledevice.printDeviceStructure()
def bleGetHandlefromUUID(self, uuid):
bledevice = bleServiceManager.bleServiceDiscovery(self.address, self.cm)
for service in bledevice.services:
# print service.uuid
for characteristic in service.characteristics:
# print characteristic.uuid
if uuid == characteristic.uuid:
return characteristic.valueHandle
return -1
def bleServiceWriteByUUID(self, uuid, data):
handle = self.bleGetHandlefromUUID(uuid)
return self.bleServiceWriteToHandle(handle, data)
class BLEScanDevices(object):
def __init__(self):
self.sm = Scanner()
def scan(self, timeOut=10):
return self.sm.scan(timeOut)
def stopScan(self):
pass
def isScanning(self):
pass
def getDeviceAddress(self, deviceName, timeOut=10):
"""return a tuple containing DeviceName and DeiceAddress"""
deviceList = self.scan(timeOut)
for device in deviceList:
for values in device.getScanData():
if values[2].rstrip('\x00') == deviceName:
return str(device.addr)
raise Exception("NO DEVICE FOUND")
| 2.875 | 3 |
sysinv/cgts-client/cgts-client/cgtsclient/v1/load.py | SidneyAn/config | 0 | 17239 | <reponame>SidneyAn/config
# Copyright (c) 2015-2020 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from cgtsclient.common import base
from cgtsclient import exc
CREATION_ATTRIBUTES = ['software_version', 'compatible_version',
'required_patches']
IMPORT_ATTRIBUTES = ['path_to_iso', 'path_to_sig', 'active']
class Load(base.Resource):
def __repr__(self):
return "<loads %s>" % self._info
class LoadManager(base.Manager):
resource_class = Load
def list(self):
return self._list('/v1/loads/', "loads")
def get(self, load_id):
path = '/v1/loads/%s' % load_id
try:
return self._list(path)[0]
except IndexError:
return None
def _create_load(self, load, path):
if set(load.keys()) != set(CREATION_ATTRIBUTES):
raise exc.InvalidAttribute()
return self._create(path, load)
def create(self, load):
path = '/v1/loads/'
self._create_load(load, path)
def import_load_metadata(self, load):
path = '/v1/loads/import_load_metadata'
return self._create_load(load, path)
def import_load(self, **kwargs):
path = '/v1/loads/import_load'
active = None
load_info = {}
for (key, value) in kwargs.items():
if key in IMPORT_ATTRIBUTES:
if key == 'active':
active = value
else:
load_info[key] = value
else:
raise exc.InvalidAttribute(key)
json_data = self._upload_multipart(
path, body=load_info, data={'active': active}, check_exceptions=True)
return self.resource_class(self, json_data)
def delete(self, load_id):
path = '/v1/loads/%s' % load_id
return self._delete(path)
def update(self, load_id, patch):
path = '/v1/loads/%s' % load_id
return self._update(path, patch)
| 1.851563 | 2 |
tests/keras_tests/feature_networks_tests/feature_networks/weights_mixed_precision_tests.py | haihabi/model_optimization | 0 | 17240 | <reponame>haihabi/model_optimization<gh_stars>0
# Copyright 2021 Sony Semiconductors Israel, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
import tensorflow as tf
from model_compression_toolkit.tpc_models.default_tp_model import get_op_quantization_configs
from model_compression_toolkit.tpc_models.keras_tp_models.keras_default import generate_keras_default_tpc
from tests.common_tests.helpers.generate_test_tp_model import generate_mixed_precision_test_tp_model
from tests.keras_tests.feature_networks_tests.base_keras_feature_test import BaseKerasFeatureNetworkTest
import model_compression_toolkit as mct
from model_compression_toolkit.common.mixed_precision.kpi import KPI
from model_compression_toolkit.common.mixed_precision.mixed_precision_quantization_config import \
MixedPrecisionQuantizationConfig
from model_compression_toolkit.common.user_info import UserInformation
from tests.common_tests.base_feature_test import BaseFeatureNetworkTest
from tests.common_tests.helpers.tensors_compare import cosine_similarity
keras = tf.keras
layers = keras.layers
tp = mct.target_platform
class MixedPercisionBaseTest(BaseKerasFeatureNetworkTest):
def __init__(self, unit_test):
super().__init__(unit_test)
def get_quantization_config(self):
qc = mct.QuantizationConfig(mct.QuantizationErrorMethod.MSE,
mct.QuantizationErrorMethod.MSE,
relu_bound_to_power_of_2=True,
weights_bias_correction=True,
weights_per_channel_threshold=True,
input_scaling=True,
activation_channel_equalization=True)
return MixedPrecisionQuantizationConfig(qc, num_of_images=1)
def get_input_shapes(self):
return [[self.val_batch_size, 224, 244, 3]]
def create_networks(self):
inputs = layers.Input(shape=self.get_input_shapes()[0][1:])
x = layers.Conv2D(30, 40)(inputs)
x = layers.BatchNormalization()(x)
x = layers.Conv2D(50, 40)(x)
outputs = layers.ReLU()(x)
model = keras.Model(inputs=inputs, outputs=outputs)
return model
def compare(self, quantized_model, float_model, input_x=None, quantization_info: UserInformation = None):
# This is a base test, so it does not check a thing. Only actual tests of mixed precision
# compare things to test.
raise NotImplementedError
class MixedPercisionManuallyConfiguredTest(MixedPercisionBaseTest):
def get_tpc(self):
base_config, _ = get_op_quantization_configs()
mp_tp_model = generate_mixed_precision_test_tp_model(base_cfg=base_config,
mp_bitwidth_candidates_list=[(8, 8), (2, 8), (3, 8)])
return generate_keras_default_tpc(name="mp_test", tp_model=mp_tp_model)
def get_quantization_config(self):
qc = mct.QuantizationConfig(mct.QuantizationErrorMethod.MSE, mct.QuantizationErrorMethod.MSE,
relu_bound_to_power_of_2=True, weights_bias_correction=True,
weights_per_channel_threshold=False, input_scaling=True,
activation_channel_equalization=True)
return MixedPrecisionQuantizationConfig(qc)
def get_kpi(self):
# Return some KPI (it does not really matter the value here as search_methods is not done,
# and the configuration is
# set manually)
return KPI(1)
def compare(self, quantized_model, float_model, input_x=None, quantization_info=None):
assert quantization_info.mixed_precision_cfg == [2, 1]
self.unit_test.assertTrue(np.unique(quantized_model.layers[2].weights[0]).flatten().shape[0] <= 4)
self.unit_test.assertTrue(np.unique(quantized_model.layers[4].weights[0]).flatten().shape[0] <= 8)
class MixedPercisionSearchTest(MixedPercisionBaseTest):
def __init__(self, unit_test):
super().__init__(unit_test)
def get_kpi(self):
# kpi is infinity -> should give best model - 8bits
return KPI(np.inf)
def compare(self, quantized_model, float_model, input_x=None, quantization_info=None):
assert (quantization_info.mixed_precision_cfg == [0,
0]).all() # kpi is infinity -> should give best model - 8bits
for i in range(30): # quantized per channel
self.unit_test.assertTrue(
np.unique(quantized_model.layers[2].weights[0][:, :, :, i]).flatten().shape[0] <= 256)
for i in range(50): # quantized per channel
self.unit_test.assertTrue(
np.unique(quantized_model.layers[4].weights[0][:, :, :, i]).flatten().shape[0] <= 256)
class MixedPercisionSearchKPI4BitsAvgTest(MixedPercisionBaseTest):
def __init__(self, unit_test):
super().__init__(unit_test)
def get_kpi(self):
# kpi is for 4 bits on average
return KPI(2544140 * 4 / 8)
def compare(self, quantized_model, float_model, input_x=None, quantization_info=None):
assert (quantization_info.mixed_precision_cfg == [1, 1]).all()
for i in range(30): # quantized per channel
self.unit_test.assertTrue(
np.unique(quantized_model.layers[2].weights[0][:, :, :, i]).flatten().shape[0] <= 16)
for i in range(50): # quantized per channel
self.unit_test.assertTrue(
np.unique(quantized_model.layers[4].weights[0][:, :, :, i]).flatten().shape[0] <= 16)
class MixedPercisionSearchKPI2BitsAvgTest(MixedPercisionBaseTest):
def __init__(self, unit_test):
super().__init__(unit_test)
def get_kpi(self):
# kpi is for 2 bits on average
return KPI(2544200 * 2 / 8)
def compare(self, quantized_model, float_model, input_x=None, quantization_info=None):
assert (quantization_info.mixed_precision_cfg == [2, 2]).all()
for i in range(30): # quantized per channel
self.unit_test.assertTrue(
np.unique(quantized_model.layers[2].weights[0][:, :, :, i]).flatten().shape[0] <= 4)
for i in range(50): # quantized per channel
self.unit_test.assertTrue(
np.unique(quantized_model.layers[4].weights[0][:, :, :, i]).flatten().shape[0] <= 4)
class MixedPercisionDepthwiseTest(MixedPercisionBaseTest):
def __init__(self, unit_test):
super().__init__(unit_test)
def get_kpi(self):
return KPI(np.inf)
def create_networks(self):
inputs = layers.Input(shape=self.get_input_shapes()[0][1:])
x = layers.DepthwiseConv2D(30)(inputs)
x = layers.BatchNormalization()(x)
x = layers.ReLU()(x)
model = keras.Model(inputs=inputs, outputs=x)
return model
def compare(self, quantized_model, float_model, input_x=None, quantization_info=None):
y = float_model.predict(input_x)
y_hat = quantized_model.predict(input_x)
cs = cosine_similarity(y, y_hat)
self.unit_test.assertTrue(np.isclose(cs, 1), msg=f'fail cosine similarity check:{cs}')
def get_tpc(self):
base_config, _ = get_op_quantization_configs()
base_config = base_config.clone_and_edit(weights_n_bits=16,
activation_n_bits=16)
mp_tp_model = generate_mixed_precision_test_tp_model(base_cfg=base_config,
mp_bitwidth_candidates_list=[(8, 16), (2, 16), (4, 16),
(16, 16)])
return generate_keras_default_tpc(name="mp_dw_test", tp_model=mp_tp_model)
def get_quantization_config(self):
qc = mct.QuantizationConfig(mct.QuantizationErrorMethod.MSE,
mct.QuantizationErrorMethod.MSE,
relu_bound_to_power_of_2=False,
weights_bias_correction=False,
weights_per_channel_threshold=True,
input_scaling=False,
activation_channel_equalization=False)
return MixedPrecisionQuantizationConfig(qc)
class MixedPrecisionActivationDisabled(MixedPercisionBaseTest):
def __init__(self, unit_test):
super().__init__(unit_test)
def get_quantization_config(self):
qc = mct.QuantizationConfig(mct.QuantizationErrorMethod.MSE,
mct.QuantizationErrorMethod.MSE,
relu_bound_to_power_of_2=True,
weights_bias_correction=True,
weights_per_channel_threshold=True,
input_scaling=False,
activation_channel_equalization=False)
return MixedPrecisionQuantizationConfig(qc, num_of_images=1)
def get_tpc(self):
base_config, _ = get_op_quantization_configs()
activation_disabled_config = base_config.clone_and_edit(enable_activation_quantization=False)
mp_tp_model = generate_mixed_precision_test_tp_model(base_cfg=activation_disabled_config,
mp_bitwidth_candidates_list=[(8, 8), (4, 8), (2, 8)])
return generate_keras_default_tpc(name="mp_weights_only_test", tp_model=mp_tp_model)
def get_kpi(self):
# kpi is infinity -> should give best model - 8bits
return KPI(np.inf)
def compare(self, quantized_model, float_model, input_x=None, quantization_info=None):
assert (quantization_info.mixed_precision_cfg == [0,
0]).all() # kpi is infinity -> should give best model - 8bits
for i in range(30): # quantized per channel
self.unit_test.assertTrue(
np.unique(quantized_model.layers[1].weights[0][:, :, :, i]).flatten().shape[0] <= 256)
for i in range(50): # quantized per channel
self.unit_test.assertTrue(
np.unique(quantized_model.layers[2].weights[0][:, :, :, i]).flatten().shape[0] <= 256) | 1.328125 | 1 |
src/ansible_navigator/actions/run.py | NaincyKumariKnoldus/ansible-navigator | 0 | 17241 | """:run
"""
import curses
import datetime
import json
import logging
import os
import re
import shlex
import shutil
import time
import uuid
from math import floor
from queue import Queue
from typing import Any
from typing import Callable
from typing import Dict
from typing import List
from typing import Optional
from typing import Tuple
from typing import Union
from ..action_base import ActionBase
from ..action_defs import RunStdoutReturn
from ..app_public import AppPublic
from ..configuration_subsystem import ApplicationConfiguration
from ..runner import CommandAsync
from ..steps import Step
from ..ui_framework import CursesLine
from ..ui_framework import CursesLinePart
from ..ui_framework import CursesLines
from ..ui_framework import Interaction
from ..ui_framework import dict_to_form
from ..ui_framework import form_to_dict
from ..ui_framework import nonblocking_notification
from ..ui_framework import warning_notification
from ..utils.functions import abs_user_path
from ..utils.functions import human_time
from ..utils.functions import remove_ansi
from ..utils.functions import round_half_up
from ..utils.serialize import json_dump
from . import _actions as actions
from . import run_action
RESULT_TO_COLOR = [
("(?i)^failed$", 9),
("(?i)^ok$", 10),
("(?i)^ignored$", 13),
("(?i)^skipped$", 14),
("(?i)^in_progress$", 8),
]
get_color = lambda word: next( # noqa: E731
(x[1] for x in RESULT_TO_COLOR if re.match(x[0], word)),
0,
)
def color_menu(_colno: int, colname: str, entry: Dict[str, Any]) -> Tuple[int, int]:
# pylint: disable=too-many-branches
"""Find matching color for word
:param colname: A word to match
"""
colval = entry[colname]
color = 0
decoration = 0
if "__play_name" in entry:
if not colval:
color = 8
elif colname in ["__task_count", "__play_name", "__progress"]:
failures = entry["__failed"] + entry["__unreachable"]
if failures:
color = 9
elif entry["__ok"]:
color = 10
else:
color = 8
elif colname == "__changed":
color = 11
else:
color = get_color(colname[2:])
if colname == "__progress" and entry["__progress"].strip().lower() == "complete":
decoration = curses.A_BOLD
elif "task" in entry:
if entry["__result"].lower() == "__in_progress":
color = get_color(entry["__result"])
elif colname in ["__result", "__host", "__number", "__task", "__task_action"]:
color = get_color(entry["__result"])
elif colname == "__changed":
if colval is True:
color = 11
else:
color = get_color(entry["__result"])
elif colname == "__duration":
color = 12
return color, decoration
def content_heading(obj: Any, screen_w: int) -> Union[CursesLines, None]:
"""create a heading for some piece of content showing
:param obj: The content going to be shown
:param screen_w: The current screen width
:return: The heading
"""
if isinstance(obj, dict) and "task" in obj:
detail = f"PLAY [{obj['play']}:{obj['__number']}] "
stars = "*" * (screen_w - len(detail))
line_1 = CursesLine(
(CursesLinePart(column=0, string=detail + stars, color=0, decoration=0),),
)
detail = f"TASK [{obj['task']}] "
stars = "*" * (screen_w - len(detail))
line_2 = CursesLine(
(CursesLinePart(column=0, string=detail + stars, color=0, decoration=0),),
)
if obj["__changed"] is True:
color = 11
res = "CHANGED"
else:
color = next((x[1] for x in RESULT_TO_COLOR if re.match(x[0], obj["__result"])), 0)
res = obj["__result"]
if "res" in obj and "msg" in obj["res"]:
msg = str(obj["res"]["msg"]).replace("\n", " ").replace("\r", "")
else:
msg = ""
string = f"{res}: [{obj['__host']}] {msg}"
string = string + (" " * (screen_w - len(string) + 1))
line_3 = CursesLine(
(CursesLinePart(column=0, string=string, color=color, decoration=curses.A_UNDERLINE),),
)
return CursesLines((line_1, line_2, line_3))
return None
def filter_content_keys(obj: Dict[Any, Any]) -> Dict[Any, Any]:
"""when showing content, filter out some keys"""
return {k: v for k, v in obj.items() if not (k.startswith("_") or k.endswith("uuid"))}
PLAY_COLUMNS = [
"__play_name",
"__ok",
"__changed",
"__unreachable",
"__failed",
"__skipped",
"__ignored",
"__in_progress",
"__task_count",
"__progress",
]
TASK_LIST_COLUMNS = [
"__result",
"__host",
"__number",
"__changed",
"__task",
"__task_action",
"__duration",
]
@actions.register
class Action(ActionBase):
# pylint: disable=too-many-instance-attributes
""":run"""
KEGEX = r"""(?x)
^
(?P<run>r(?:un)?
(\s(?P<params_run>.*))?)
$"""
def __init__(self, args: ApplicationConfiguration):
"""Initialize the ``:run`` action.
:param args: The current settings for the application
"""
super().__init__(args=args, logger_name=__name__, name="run")
self._subaction_type: str
self._msg_from_plays: Tuple[Optional[str], Optional[int]] = (None, None)
self._queue: Queue = Queue()
self.runner: CommandAsync
self._runner_finished: bool
self._auto_scroll = False
#: Flag when the first message is received from runner
self._first_message_received: bool = False
self._plays = Step(
name="plays",
step_type="menu",
columns=PLAY_COLUMNS,
value=[],
show_func=self._play_stats,
select_func=self._task_list_for_play,
)
self._task_list_columns: List[str] = TASK_LIST_COLUMNS
self._content_key_filter: Callable = filter_content_keys
@property
def mode(self):
"""if mode == stdout and playbook artifact creation is enabled
run in interactive mode, but print stdout"""
if all(
(
self._args.mode == "stdout",
self._args.playbook_artifact_enable,
self._args.app != "replay",
),
):
return "stdout_w_artifact"
return self._args.mode
def run_stdout(self) -> RunStdoutReturn:
"""Execute the ``inventory`` request for mode stdout.
:returns: The return code from the runner invocation, along with a message to review the
logs if not 0.
"""
if self._args.app == "replay":
successful: bool = self._init_replay()
if successful:
return RunStdoutReturn(message="", return_code=0)
return RunStdoutReturn(message="Please review the log for errors.", return_code=1)
self._logger.debug("playbook requested in interactive mode")
self._subaction_type = "playbook"
self._logger = logging.getLogger(f"{__name__}_{self._subaction_type}")
self._run_runner()
while True:
self._dequeue()
if self.runner.finished:
if self._args.playbook_artifact_enable:
self.write_artifact()
self._logger.debug("runner finished")
break
# Sleep briefly to prevent 100% CPU utilization
# in mode stdout, the delay introduced by the curses key read is not present
time.sleep(0.01)
return_code = self.runner.ansible_runner_instance.rc
if return_code != 0:
return RunStdoutReturn(
message="Please review the log for errors.",
return_code=return_code,
)
return RunStdoutReturn(message="", return_code=return_code)
def run(self, interaction: Interaction, app: AppPublic) -> Union[Interaction, None]:
"""run :run or :replay
:param interaction: The interaction from the user
:param app: The app instance
:return: The pending interaction or none
"""
self._prepare_to_run(app, interaction)
if interaction.action.match.groupdict().get("run"):
self._logger.debug("run requested in interactive mode")
self._subaction_type = "run"
str_uuid = str(uuid.uuid4())
self._logger = logging.getLogger(f"{__name__}_{str_uuid[-4:]}")
self._name = f"run_{str_uuid[-4:]}"
initialized = self._init_run()
elif interaction.action.match.groupdict().get("replay"):
self._logger.debug("replay requested in interactive mode")
self._subaction_type = "replay"
self._name = "replay"
self._logger = logging.getLogger(f"{__name__}_{self._subaction_type}")
initialized = self._init_replay()
if not initialized:
self._prepare_to_exit(interaction)
return None
self.steps.append(self._plays)
# Show a notification until the first the first message from the queue is processed
if self._subaction_type == "run":
messages = ["Preparing for automation, please wait..."]
notification = nonblocking_notification(messages=messages)
interaction.ui.show(notification)
while not self._first_message_received:
self.update()
while True:
self.update()
self._take_step()
if not self.steps:
if not self._runner_finished:
self._logger.error("Can not step back while playbook in progress, :q! to exit")
self.steps.append(self._plays)
else:
self._logger.debug(
"No steps remaining for '%s' returning to calling app",
self._name,
)
break
if self.steps.current.name == "quit":
if self._args.app == "replay":
self._prepare_to_exit(interaction)
return self.steps.current
done = self._prepare_to_quit(self.steps.current)
if done:
self._prepare_to_exit(interaction)
return self.steps.current
self.steps.back_one()
self._prepare_to_exit(interaction)
return None
# pylint: disable=too-many-branches
def _init_run(self) -> bool:
"""in the case of :run, check the user input"""
# Ensure the playbook and inventory are valid
self._update_args(
["run"] + shlex.split(self._interaction.action.match.groupdict()["params_run"] or ""),
)
if isinstance(self._args.playbook, str):
playbook_valid = os.path.exists(self._args.playbook)
else:
playbook_valid = False
if isinstance(self._args.inventory, list):
inventory_valid = all((os.path.exists(inv) for inv in self._args.inventory))
else:
# Permit running without an inventory
inventory_valid = True
if not all((playbook_valid, inventory_valid)):
populated_form = self._prompt_for_playbook()
if populated_form["cancelled"]:
return False
new_cmd = ["run"]
new_cmd.append(populated_form["fields"]["playbook"]["value"])
for field in populated_form["fields"].values():
if field["name"].startswith("inv_") and field["value"] != "":
new_cmd.extend(["-i", field["value"]])
if populated_form["fields"]["cmdline"]["value"]:
new_cmd.extend(shlex.split(populated_form["fields"]["cmdline"]["value"]))
# Parse as if provided from the cmdline
self._update_args(new_cmd)
self._run_runner()
self._logger.info("Run initialized and playbook started.")
return True
def _init_replay(self) -> bool:
"""in the case of :replay, replay the artifact
check for a version, to be safe
copy the calling app args as our our so the can be updated safely
with a uuid attached to the name
"""
self._logger.debug("Starting replay artifact request with mode %s", self.mode)
if self.mode == "interactive":
self._update_args(
["replay"]
+ shlex.split(self._interaction.action.match.groupdict()["params_replay"] or ""),
)
artifact_file = self._args.playbook_artifact_replay
if isinstance(self._args.playbook_artifact_replay, str):
artifact_valid = os.path.exists(self._args.playbook_artifact_replay)
else:
artifact_valid = False
if not artifact_valid and self.mode == "interactive":
populated_form = self._prompt_for_artifact(artifact_file=artifact_file)
if populated_form["cancelled"]:
return False
artifact_file = populated_form["fields"]["artifact_file"]["value"]
try:
with open(artifact_file, encoding="utf-8") as fh:
data = json.load(fh)
except json.JSONDecodeError as exc:
self._logger.debug("json decode error: %s", str(exc))
self._logger.error("Unable to parse artifact file")
return False
version = data.get("version", "")
if version.startswith("1."):
try:
stdout = data["stdout"]
if self.mode == "interactive":
self._plays.value = data["plays"]
self._interaction.ui.update_status(data["status"], data["status_color"])
self.stdout = stdout
else:
for line in data["stdout"]:
if self._args.display_color is True:
print(line)
else:
print(remove_ansi(line))
except KeyError as exc:
self._logger.debug("missing keys from artifact file")
self._logger.debug("error was: %s", str(exc))
return False
else:
self._logger.error(
"Incompatible artifact version, got '%s', compatible = '1.y.z'",
version,
)
return False
self._runner_finished = True
self._logger.debug("Completed replay artifact request with mode %s", self.mode)
return True
def _prompt_for_artifact(self, artifact_file: str) -> Dict[Any, Any]:
"""prompt for a valid artifact file"""
if not isinstance(artifact_file, str):
artifact_file = ""
FType = Dict[str, Any]
form_dict: FType = {
"title": "Artifact file not found, please confirm the following",
"fields": [],
}
form_field = {
"name": "artifact_file",
"prompt": "Path to artifact file",
"type": "text_input",
"validator": {"name": "valid_file_path"},
"pre_populate": artifact_file,
}
form_dict["fields"].append(form_field)
form = dict_to_form(form_dict)
self._interaction.ui.show(form)
populated_form = form_to_dict(form, key_on_name=True)
return populated_form
def _prompt_for_playbook(self) -> Dict[Any, Any]:
"""prepopulate a form to confirm the playbook details"""
self._logger.debug("Inventory/Playbook not set, provided, or valid, prompting")
if isinstance(self._args.playbook, str):
playbook = self._args.playbook
else:
playbook = ""
if isinstance(self._args.inventory, list):
inventory = self._args.inventory
else:
inventory = ["", "", ""]
if isinstance(self._args.cmdline, list):
cmdline = " ".join(self._args.cmdline)
else:
cmdline = ""
FType = Dict[str, Any]
form_dict: FType = {
"title": "Inventory and/or playbook not found, please confirm the following",
"fields": [],
}
form_field = {
"name": "playbook",
"pre_populate": playbook,
"prompt": "Path to playbook",
"type": "text_input",
"validator": {"name": "valid_file_path"},
}
form_dict["fields"].append(form_field)
for idx, inv in enumerate(inventory):
form_field = {
"name": f"inv_{idx}",
"pre_populate": inv,
"prompt": "Inventory source",
"type": "text_input",
"validator": {"name": "none"},
}
form_dict["fields"].append(form_field)
form_field = {
"name": "cmdline",
"pre_populate": cmdline,
"prompt": "Additional command line parameters",
"type": "text_input",
"validator": {"name": "none"},
}
form_dict["fields"].append(form_field)
form = dict_to_form(form_dict)
self._interaction.ui.show(form)
populated_form = form_to_dict(form, key_on_name=True)
return populated_form
def _take_step(self) -> None:
"""run the current step on the stack"""
result = None
if isinstance(self.steps.current, Interaction):
result = run_action(self.steps.current.name, self.app, self.steps.current)
elif isinstance(self.steps.current, Step):
if self.steps.current.show_func:
self.steps.current.show_func()
if self.steps.current.type == "menu":
new_scroll = len(self.steps.current.value)
if self._auto_scroll:
self._interaction.ui.scroll(new_scroll)
result = self._interaction.ui.show(
obj=self.steps.current.value,
columns=self.steps.current.columns,
color_menu_item=color_menu,
)
if self._interaction.ui.scroll() < new_scroll and self._auto_scroll:
self._logger.debug("autoscroll disabled")
self._auto_scroll = False
elif self._interaction.ui.scroll() >= new_scroll and not self._auto_scroll:
self._logger.debug("autoscroll enabled")
self._auto_scroll = True
elif self.steps.current.type == "content":
result = self._interaction.ui.show(
obj=self.steps.current.value,
index=self.steps.current.index,
content_heading=content_heading,
filter_content_keys=self._content_key_filter,
)
if result is None:
self.steps.back_one()
else:
self.steps.append(result)
def _run_runner(self) -> None:
"""spin up runner"""
executable_cmd: Optional[str]
if self.mode == "stdout_w_artifact":
mode = "interactive"
else:
mode = self.mode
if isinstance(self._args.set_environment_variable, dict):
set_env_vars = {**self._args.set_environment_variable}
else:
set_env_vars = {}
if self._args.display_color is False:
set_env_vars["ANSIBLE_NOCOLOR"] = "1"
kwargs = {
"container_engine": self._args.container_engine,
"host_cwd": os.getcwd(),
"execution_environment_image": self._args.execution_environment_image,
"execution_environment": self._args.execution_environment,
"inventory": self._args.inventory,
"navigator_mode": mode,
"pass_environment_variable": self._args.pass_environment_variable,
"set_environment_variable": set_env_vars,
"private_data_dir": self._args.ansible_runner_artifact_dir,
"rotate_artifacts": self._args.ansible_runner_rotate_artifacts_count,
"timeout": self._args.ansible_runner_timeout,
}
if isinstance(self._args.playbook, str):
kwargs.update({"playbook": self._args.playbook})
if isinstance(self._args.execution_environment_volume_mounts, list):
kwargs.update(
{"container_volume_mounts": self._args.execution_environment_volume_mounts},
)
if isinstance(self._args.container_options, list):
kwargs.update({"container_options": self._args.container_options})
if self._args.execution_environment:
executable_cmd = "ansible-playbook"
else:
executable_cmd = shutil.which("ansible-playbook")
if not executable_cmd:
msg = "'ansible-playbook' executable not found"
self._logger.error(msg)
raise RuntimeError(msg)
pass_through_arg = []
if self._args.help_playbook is True:
pass_through_arg.append("--help")
if isinstance(self._args.cmdline, list):
pass_through_arg.extend(self._args.cmdline)
kwargs.update({"cmdline": pass_through_arg})
self.runner = CommandAsync(executable_cmd=executable_cmd, queue=self._queue, **kwargs)
self.runner.run()
self._runner_finished = False
self._logger.debug("runner requested to start")
def _dequeue(self) -> None:
"""Drain the runner queue"""
drain_count = 0
while not self._queue.empty():
if not self._first_message_received:
self._first_message_received = True
message = self._queue.get()
self._handle_message(message)
drain_count += 1
if drain_count:
self._logger.debug("Drained %s events", drain_count)
def _handle_message(self, message: dict) -> None:
# pylint: disable=too-many-branches
# pylint: disable=too-many-nested-blocks
# pylint: disable=too-many-statements
"""Handle a runner message
:param message: The message from runner
:type message: dict
"""
try:
event = message["event"]
except KeyError:
error = f"Unhandled message from runner queue, discarded: {message}"
self._logger.critical(error)
else:
if "stdout" in message and message["stdout"]:
self.stdout.extend(message["stdout"].splitlines())
if self.mode == "stdout_w_artifact":
print(message["stdout"])
if event in ["verbose", "error"]:
if "ERROR!" in message["stdout"]:
self._msg_from_plays = ("ERROR", 9)
if self.mode == "interactive":
self._notify_error(message["stdout"])
elif "WARNING" in message["stdout"]:
self._msg_from_plays = ("WARNINGS", 13)
if event == "playbook_on_play_start":
play = message["event_data"]
play["__play_name"] = play["name"]
play["tasks"] = []
self._plays.value.append(play)
if event.startswith("runner_on_"):
runner_event = event.split("_")[2]
task = message["event_data"]
play_id = next(
idx for idx, p in enumerate(self._plays.value) if p["uuid"] == task["play_uuid"]
)
if runner_event in ["ok", "skipped", "unreachable", "failed"]:
if runner_event == "failed" and task["ignore_errors"]:
result = "ignored"
else:
result = runner_event
task["__task"] = task["task"]
task["__result"] = result.upper()
task["__changed"] = task.get("res", {}).get("changed", False)
if isinstance(task["duration"], (int, float)):
task["__duration"] = human_time(seconds=round_half_up(task["duration"]))
else:
msg = (
f"Task duration for '{task['task']}' was type {type(task['duration'])},"
" set to 0"
)
self._logger.debug(msg)
task["__duration"] = 0
task_id = None
for idx, play_task in enumerate(self._plays.value[play_id]["tasks"]):
if task["task_uuid"] == play_task["task_uuid"]:
if task["host"] == play_task["host"]:
task_id = idx
break
if task_id is not None:
self._plays.value[play_id]["tasks"][task_id].update(task)
elif runner_event == "start":
task["__host"] = task["host"]
task["__result"] = "IN_PROGRESS"
task["__changed"] = "unknown"
task["__duration"] = None
task["__number"] = len(self._plays.value[play_id]["tasks"])
task["__task"] = task["task"]
task["__task_action"] = task["task_action"]
self._plays.value[play_id]["tasks"].append(task)
def _play_stats(self) -> None:
"""Calculate the play's stats based
on it's tasks
"""
for idx, play in enumerate(self._plays.value):
total = ["__ok", "__skipped", "__failed", "__unreachable", "__ignored", "__in_progress"]
self._plays.value[idx].update(
{
tot: len([t for t in play["tasks"] if t["__result"].lower() == tot[2:]])
for tot in total
},
)
self._plays.value[idx]["__changed"] = len(
[t for t in play["tasks"] if t["__changed"] is True],
)
task_count = len(play["tasks"])
self._plays.value[idx]["__task_count"] = task_count
completed = task_count - self._plays.value[idx]["__in_progress"]
if completed:
new = floor((completed / task_count * 100))
current = self._plays.value[idx].get("__percent_complete", 0)
self._plays.value[idx]["__percent_complete"] = max(new, current)
self._plays.value[idx]["__progress"] = str(max(new, current)) + "%"
else:
self._plays.value[idx]["__progress"] = "0%"
def _prepare_to_quit(self, interaction: Interaction) -> bool:
"""Looks like we're headed out of here
:param interaction: the quit interaction
:return: a bool indicating whether of not it's safe to exit
"""
self.update()
if self.runner is not None and not self.runner.finished:
if interaction.action.match.groupdict()["exclamation"]:
self._logger.debug("shutting down runner")
self.runner.cancelled = True
while not self.runner.finished:
pass
self.write_artifact()
return True
self._logger.warning("Quit requested but playbook running, try q! or quit!")
return False
self._logger.debug("runner not running")
return True
def _task_list_for_play(self) -> Step:
"""generate a menu of task for the currently selected play
:return: The menu step
"""
value = self.steps.current.selected["tasks"]
step = Step(
name="task_list",
step_type="menu",
columns=self._task_list_columns,
select_func=self._task_from_task_list,
value=value,
)
return step
def _task_from_task_list(self) -> Step:
"""generate task content for the selected task
:return: content which show a task
"""
value = self.steps.current.value
index = self.steps.current.index
step = Step(name="task", step_type="content", index=index, value=value)
return step
def update(self) -> None:
"""Drain the queue, set the status and write the artifact if needed"""
# let the calling app update as well
self._calling_app.update()
if hasattr(self, "runner"):
self._dequeue()
self._set_status()
if self.runner.finished and not self._runner_finished:
self._logger.debug("runner finished")
self._logger.info("Playbook complete")
self.write_artifact()
self._runner_finished = True
def _get_status(self) -> Tuple[str, int]:
"""Get the status and color
:return: status string, status color
"""
status = ""
status_color = 0
if self.runner.status:
if self.runner and self.runner.finished and self.runner.status:
status = self.runner.status
if self.runner.status == "failed":
status_color = 9
else:
status_color = self._msg_from_plays[1] or 10
else:
if self._msg_from_plays[0] is not None and self._msg_from_plays[1] is not None:
status = self._msg_from_plays[0]
status_color = self._msg_from_plays[1]
else:
status = self.runner.status
status_color = 10
return status, status_color
def _set_status(self) -> None:
"""Set the UI status"""
status, status_color = self._get_status()
self._interaction.ui.update_status(status, status_color)
def write_artifact(self, filename: Optional[str] = None) -> None:
"""Write the artifact
:param filename: The file to write to
:type filename: str
"""
if (
filename
or self._args.playbook_artifact_enable is True
and self._args.help_playbook is not True
):
filename = filename or self._args.playbook_artifact_save_as
filename = filename.format(
playbook_dir=os.path.dirname(self._args.playbook),
playbook_name=os.path.splitext(os.path.basename(self._args.playbook))[0],
ts_utc=datetime.datetime.now(tz=datetime.timezone.utc).isoformat(),
)
self._logger.debug("Formatted artifact file name set to %s", filename)
filename = abs_user_path(filename)
self._logger.debug("Resolved artifact file name set to %s", filename)
status, status_color = self._get_status()
try:
os.makedirs(os.path.dirname(filename), exist_ok=True)
with open(filename, "w", encoding="utf-8") as fh:
artifact = {
"version": "1.0.0",
"plays": self._plays.value,
"stdout": self.stdout,
"status": status,
"status_color": status_color,
}
json_dump(artifact, fh)
self._logger.info("Saved artifact as %s", filename)
except (IOError, OSError) as exc:
error = (
f"Saving the artifact file failed, resulted in the following error: f{str(exc)}"
)
self._logger.error(error)
def rerun(self) -> None:
"""rerun the current playbook
since we're not reinstating run,
drain the queue, clear the steps, reset the index, etc
"""
if self._subaction_type == "run":
if self.runner.finished:
self._plays.value = []
self._plays.index = None
self._msg_from_plays = (None, None)
self._queue.queue.clear()
self.stdout = []
self._run_runner()
self.steps.clear()
self.steps.append(self._plays)
self._logger.debug("Playbook rerun triggered")
else:
self._logger.warning("Playbook rerun ignored, current playbook not complete")
elif self._subaction_type == "replay":
self._logger.error("No rerun available when artifact is loaded")
else:
self._logger.error("sub-action type '%s' is invalid", self._subaction_type)
def _notify_error(self, message: str):
"""show a blocking warning"""
warn_msg = ["Errors were encountered while running the playbook:"]
messages = remove_ansi(message).splitlines()
messages[-1] += "..."
warn_msg.extend(messages)
warn_msg += ["[HINT] To see the full error message try ':stdout'"]
warn_msg += ["[HINT] After it's fixed, try to ':rerun' the playbook"]
warning = warning_notification(warn_msg)
self._interaction.ui.show(warning)
| 2.234375 | 2 |
storagetest/pkgs/pts/__init__.py | liufeng-elva/storage-test2 | 0 | 17242 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
"""
@file : __init__.py.py
@Time : 2020/11/12 13:37
@Author: Tao.Xu
@Email : <EMAIL>
"""
"""
phoronix-test-suite: Main for Performance Test
===================
https://github.com/phoronix-test-suite/phoronix-test-suite
The Phoronix Test Suite is the most comprehensive testing and
benchmarking platform available for Linux, Solaris, macOS, Windows,
and BSD operating systems.
"""
if __name__ == '__main__':
pass
| 0.855469 | 1 |
owlbot.py | rahul2393/python-spanner | 0 | 17243 | <gh_stars>0
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script is used to synthesize generated parts of this library."""
from pathlib import Path
from typing import List, Optional
import synthtool as s
from synthtool import gcp
from synthtool.languages import python
common = gcp.CommonTemplates()
def get_staging_dirs(
# This is a customized version of the s.get_staging_dirs() function
# from synthtool to # cater for copying 3 different folders from
# googleapis-gen:
# spanner, spanner/admin/instance and spanner/admin/database.
# Source:
# https://github.com/googleapis/synthtool/blob/master/synthtool/transforms.py#L280
default_version: Optional[str] = None,
sub_directory: Optional[str] = None,
) -> List[Path]:
"""Returns the list of directories, one per version, copied from
https://github.com/googleapis/googleapis-gen. Will return in lexical sorting
order with the exception of the default_version which will be last (if specified).
Args:
default_version (str): the default version of the API. The directory for this version
will be the last item in the returned list if specified.
sub_directory (str): if a `sub_directory` is provided, only the directories within the
specified `sub_directory` will be returned.
Returns: the empty list if no file were copied.
"""
staging = Path("owl-bot-staging")
if sub_directory:
staging /= sub_directory
if staging.is_dir():
# Collect the subdirectories of the staging directory.
versions = [v.name for v in staging.iterdir() if v.is_dir()]
# Reorder the versions so the default version always comes last.
versions = [v for v in versions if v != default_version]
versions.sort()
if default_version is not None:
versions += [default_version]
dirs = [staging / v for v in versions]
for dir in dirs:
s._tracked_paths.add(dir)
return dirs
else:
return []
spanner_default_version = "v1"
spanner_admin_instance_default_version = "v1"
spanner_admin_database_default_version = "v1"
for library in get_staging_dirs(spanner_default_version, "spanner"):
# Work around gapic generator bug https://github.com/googleapis/gapic-generator-python/issues/902
s.replace(
library / f"google/cloud/spanner_{library.name}/types/transaction.py",
r""".
Attributes:""",
r""".\n
Attributes:""",
)
# Work around gapic generator bug https://github.com/googleapis/gapic-generator-python/issues/902
s.replace(
library / f"google/cloud/spanner_{library.name}/types/transaction.py",
r""".
Attributes:""",
r""".\n
Attributes:""",
)
# Remove headings from docstring. Requested change upstream in cl/377290854 due to https://google.aip.dev/192#formatting.
s.replace(
library / f"google/cloud/spanner_{library.name}/types/transaction.py",
"""\n ==.*?==\n""",
":",
)
# Remove headings from docstring. Requested change upstream in cl/377290854 due to https://google.aip.dev/192#formatting.
s.replace(
library / f"google/cloud/spanner_{library.name}/types/transaction.py",
"""\n --.*?--\n""",
":",
)
s.move(
library,
excludes=[
"google/cloud/spanner/**",
"*.*",
"docs/index.rst",
"google/cloud/spanner_v1/__init__.py",
],
)
for library in get_staging_dirs(
spanner_admin_instance_default_version, "spanner_admin_instance"
):
s.move(
library,
excludes=["google/cloud/spanner_admin_instance/**", "*.*", "docs/index.rst"],
)
for library in get_staging_dirs(
spanner_admin_database_default_version, "spanner_admin_database"
):
s.move(
library,
excludes=["google/cloud/spanner_admin_database/**", "*.*", "docs/index.rst"],
)
s.remove_staging_dirs()
# ----------------------------------------------------------------------------
# Add templated files
# ----------------------------------------------------------------------------
templated_files = common.py_library(
microgenerator=True, samples=True, cov_level=99, split_system_tests=True,
)
s.move(templated_files,
excludes=[
".coveragerc",
".github/workflows", # exclude gh actions as credentials are needed for tests
]
)
# Ensure CI runs on a new instance each time
s.replace(
".kokoro/build.sh",
"# Remove old nox",
"""\
# Set up creating a new instance for each system test run
export GOOGLE_CLOUD_TESTS_CREATE_SPANNER_INSTANCE=true
# Remove old nox""",
)
# Update samples folder in CONTRIBUTING.rst
s.replace("CONTRIBUTING.rst", "samples/snippets", "samples/samples")
# ----------------------------------------------------------------------------
# Samples templates
# ----------------------------------------------------------------------------
python.py_samples()
# ----------------------------------------------------------------------------
# Customize noxfile.py
# ----------------------------------------------------------------------------
def place_before(path, text, *before_text, escape=None):
replacement = "\n".join(before_text) + "\n" + text
if escape:
for c in escape:
text = text.replace(c, "\\" + c)
s.replace([path], text, replacement)
open_telemetry_test = """
# XXX Work around Kokoro image's older pip, which borks the OT install.
session.run("pip", "install", "--upgrade", "pip")
session.install("-e", ".[tracing]", "-c", constraints_path)
# XXX: Dump installed versions to debug OT issue
session.run("pip", "list")
# Run py.test against the unit tests with OpenTelemetry.
session.run(
"py.test",
"--quiet",
"--cov=google.cloud.spanner",
"--cov=google.cloud",
"--cov=tests.unit",
"--cov-append",
"--cov-config=.coveragerc",
"--cov-report=",
"--cov-fail-under=0",
os.path.join("tests", "unit"),
*session.posargs,
)
"""
place_before(
"noxfile.py",
"@nox.session(python=UNIT_TEST_PYTHON_VERSIONS)",
open_telemetry_test,
escape="()",
)
skip_tests_if_env_var_not_set = """# Sanity check: Only run tests if the environment variable is set.
if not os.environ.get("GOOGLE_APPLICATION_CREDENTIALS", "") and not os.environ.get(
"SPANNER_EMULATOR_HOST", ""
):
session.skip(
"Credentials or emulator host must be set via environment variable"
)
"""
place_before(
"noxfile.py",
"# Install pyopenssl for mTLS testing.",
skip_tests_if_env_var_not_set,
escape="()",
)
s.replace(
"noxfile.py",
"""f"--junitxml=unit_{session.python}_sponge_log.xml",
"--cov=google",
"--cov=tests/unit",""",
"""\"--cov=google.cloud.spanner",
"--cov=google.cloud",
"--cov=tests.unit",""",
)
s.replace(
"noxfile.py",
r"""session.install\("-e", "."\)""",
"""session.install("-e", ".[tracing]")""",
)
s.replace(
"noxfile.py",
r"""# Install all test dependencies, then install this package into the
# virtualenv's dist-packages.
session.install\("mock", "pytest", "google-cloud-testutils", "-c", constraints_path\)
session.install\("-e", ".", "-c", constraints_path\)""",
"""# Install all test dependencies, then install this package into the
# virtualenv's dist-packages.
session.install("mock", "pytest", "google-cloud-testutils", "-c", constraints_path)
session.install("-e", ".[tracing]", "-c", constraints_path)""",
)
s.shell.run(["nox", "-s", "blacken"], hide_output=False)
| 1.765625 | 2 |
k_means.py | sokrutu/imagemean | 0 | 17244 | <reponame>sokrutu/imagemean
from random import randint
def k_means(data, K):
"""
k-Means clustering
TODO: Assumes values from 0-255
:param data: NxD array of numbers
:param K: The number of clusters
:return: Tuple of cluster means (KxD array) and cluster assignments (Nx1 with values from 1 to K)
"""
N = len(data)
D = len(data[0])
means = [None]*K
for i in range(0,K):
means[i] = [randint(0, 255), randint(0, 255), randint(0, 255)]
assignments = [None]*N
changed = True
while(changed):
old_means = means
# Find closest centroid
for n in range(0, N):
"max distance in RGB"
min = 442.0
index = -1
for k in range(0,K):
temp = __distance(data[n], means[k], D)
if temp <= min:
min = temp
index = k
assignments[n] = index
# Calculate the new centers
for k in range(0,K):
# Aus assignments die Indizes mit Eintrag k finden
indices = [i for i,x in enumerate(assignments) if x == k]
# ... und dann anhand derer in Data die Werte schauen
temp_data = [x for i,x in enumerate(data) if i in indices]
# ... und mitteln
means[k] = __mean(temp_data, D)
# Check if something changed
changed = False
for k in range(0,K):
if old_means[k] != means[k]:
changed = True
break
return (means, assignments)
def __distance(a, b, dim):
sum = 0.0
for i in range(0,dim):
sum += (a[i]-b[i])**2
return sum**(1/2.0)
def __mean(a, dim):
N = len(a)
sum = [0.0]*dim
for e in a:
for d in range(0,dim):
sum[d] += e[d]
avg = [a/N for a in sum]
return avg
| 3.203125 | 3 |
contrib/analysis_server/src/analysis_server/__init__.py | Kenneth-T-Moore/OpenMDAO-Framework | 3 | 17245 | <reponame>Kenneth-T-Moore/OpenMDAO-Framework
"""
Support for interacting with ModelCenter via the AnalysisServer protocol.
Client-mode access to an AnalysisServer is provided by the 'client', 'factory',
and 'proxy' modules. Server-mode access by ModelCenter is provided by the
'server' and 'wrapper' modules.
An extension to the protocol allows 'eggs' to pe 'published': the egg is sent
to the server and made part of the server's set of supported components.
"""
from __future__ import absolute_import
from .client import Client
from .factory import ASFactory
from .server import Server, start_server, stop_server, DEFAULT_PORT
from .stream import Stream
from .units import have_translation, get_translation, set_translation
from .publish import publish_class, publish_object, publish_egg
| 1.609375 | 2 |
3rd party/YOLO_network.py | isaiasfsilva/ROLO | 962 | 17246 | import os
import numpy as np
import tensorflow as tf
import cv2
import time
import sys
import pickle
import ROLO_utils as util
class YOLO_TF:
fromfile = None
tofile_img = 'test/output.jpg'
tofile_txt = 'test/output.txt'
imshow = True
filewrite_img = False
filewrite_txt = False
disp_console = True
weights_file = 'weights/YOLO_small.ckpt'
alpha = 0.1
threshold = 0.08
iou_threshold = 0.5
num_class = 20
num_box = 2
grid_size = 7
classes = ["aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair", "cow", "diningtable", "dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train","tvmonitor"]
w_img, h_img = [352, 240]
num_feat = 4096
num_predict = 6 # final output of LSTM 6 loc parameters
num_heatmap = 1024
def __init__(self,argvs = []):
self.argv_parser(argvs)
self.build_networks()
if self.fromfile is not None: self.detect_from_file(self.fromfile)
def argv_parser(self,argvs):
for i in range(1,len(argvs),2):
if argvs[i] == '-fromfile' : self.fromfile = argvs[i+1]
if argvs[i] == '-tofile_img' : self.tofile_img = argvs[i+1] ; self.filewrite_img = True
if argvs[i] == '-tofile_txt' : self.tofile_txt = argvs[i+1] ; self.filewrite_txt = True
if argvs[i] == '-imshow' :
if argvs[i+1] == '1' :self.imshow = True
else : self.imshow = False
if argvs[i] == '-disp_console' :
if argvs[i+1] == '1' :self.disp_console = True
else : self.disp_console = False
def build_networks(self):
if self.disp_console : print "Building YOLO_small graph..."
self.x = tf.placeholder('float32',[None,448,448,3])
self.conv_1 = self.conv_layer(1,self.x,64,7,2)
self.pool_2 = self.pooling_layer(2,self.conv_1,2,2)
self.conv_3 = self.conv_layer(3,self.pool_2,192,3,1)
self.pool_4 = self.pooling_layer(4,self.conv_3,2,2)
self.conv_5 = self.conv_layer(5,self.pool_4,128,1,1)
self.conv_6 = self.conv_layer(6,self.conv_5,256,3,1)
self.conv_7 = self.conv_layer(7,self.conv_6,256,1,1)
self.conv_8 = self.conv_layer(8,self.conv_7,512,3,1)
self.pool_9 = self.pooling_layer(9,self.conv_8,2,2)
self.conv_10 = self.conv_layer(10,self.pool_9,256,1,1)
self.conv_11 = self.conv_layer(11,self.conv_10,512,3,1)
self.conv_12 = self.conv_layer(12,self.conv_11,256,1,1)
self.conv_13 = self.conv_layer(13,self.conv_12,512,3,1)
self.conv_14 = self.conv_layer(14,self.conv_13,256,1,1)
self.conv_15 = self.conv_layer(15,self.conv_14,512,3,1)
self.conv_16 = self.conv_layer(16,self.conv_15,256,1,1)
self.conv_17 = self.conv_layer(17,self.conv_16,512,3,1)
self.conv_18 = self.conv_layer(18,self.conv_17,512,1,1)
self.conv_19 = self.conv_layer(19,self.conv_18,1024,3,1)
self.pool_20 = self.pooling_layer(20,self.conv_19,2,2)
self.conv_21 = self.conv_layer(21,self.pool_20,512,1,1)
self.conv_22 = self.conv_layer(22,self.conv_21,1024,3,1)
self.conv_23 = self.conv_layer(23,self.conv_22,512,1,1)
self.conv_24 = self.conv_layer(24,self.conv_23,1024,3,1)
self.conv_25 = self.conv_layer(25,self.conv_24,1024,3,1)
self.conv_26 = self.conv_layer(26,self.conv_25,1024,3,2)
self.conv_27 = self.conv_layer(27,self.conv_26,1024,3,1)
self.conv_28 = self.conv_layer(28,self.conv_27,1024,3,1)
self.fc_29 = self.fc_layer(29,self.conv_28,512,flat=True,linear=False)
self.fc_30 = self.fc_layer(30,self.fc_29,4096,flat=False,linear=False)
#skip dropout_31
self.fc_32 = self.fc_layer(32,self.fc_30,1470,flat=False,linear=True)
self.sess = tf.Session()
self.sess.run(tf.initialize_all_variables())
self.saver = tf.train.Saver()
self.saver.restore(self.sess,self.weights_file)
if self.disp_console : print "Loading complete!" + '\n'
def conv_layer(self,idx,inputs,filters,size,stride):
channels = inputs.get_shape()[3]
weight = tf.Variable(tf.truncated_normal([size,size,int(channels),filters], stddev=0.1))
biases = tf.Variable(tf.constant(0.1, shape=[filters]))
pad_size = size//2
pad_mat = np.array([[0,0],[pad_size,pad_size],[pad_size,pad_size],[0,0]])
inputs_pad = tf.pad(inputs,pad_mat)
conv = tf.nn.conv2d(inputs_pad, weight, strides=[1, stride, stride, 1], padding='VALID',name=str(idx)+'_conv')
conv_biased = tf.add(conv,biases,name=str(idx)+'_conv_biased')
if self.disp_console : print ' Layer %d : Type = Conv, Size = %d * %d, Stride = %d, Filters = %d, Input channels = %d' % (idx,size,size,stride,filters,int(channels))
return tf.maximum(self.alpha*conv_biased,conv_biased,name=str(idx)+'_leaky_relu')
def pooling_layer(self,idx,inputs,size,stride):
if self.disp_console : print ' Layer %d : Type = Pool, Size = %d * %d, Stride = %d' % (idx,size,size,stride)
return tf.nn.max_pool(inputs, ksize=[1, size, size, 1],strides=[1, stride, stride, 1], padding='SAME',name=str(idx)+'_pool')
def fc_layer(self,idx,inputs,hiddens,flat = False,linear = False):
input_shape = inputs.get_shape().as_list()
if flat:
dim = input_shape[1]*input_shape[2]*input_shape[3]
inputs_transposed = tf.transpose(inputs,(0,3,1,2))
inputs_processed = tf.reshape(inputs_transposed, [-1,dim])
else:
dim = input_shape[1]
inputs_processed = inputs
weight = tf.Variable(tf.truncated_normal([dim,hiddens], stddev=0.1))
biases = tf.Variable(tf.constant(0.1, shape=[hiddens]))
if self.disp_console : print ' Layer %d : Type = Full, Hidden = %d, Input dimension = %d, Flat = %d, Activation = %d' % (idx,hiddens,int(dim),int(flat),1-int(linear))
if linear : return tf.add(tf.matmul(inputs_processed,weight),biases,name=str(idx)+'_fc')
ip = tf.add(tf.matmul(inputs_processed,weight),biases)
return tf.maximum(self.alpha*ip,ip,name=str(idx)+'_fc')
def detect_from_cvmat(self,img):
s = time.time()
self.h_img,self.w_img,_ = img.shape
img_resized = cv2.resize(img, (448, 448))
img_RGB = cv2.cvtColor(img_resized,cv2.COLOR_BGR2RGB)
img_resized_np = np.asarray( img_RGB )
inputs = np.zeros((1,448,448,3),dtype='float32')
inputs[0] = (img_resized_np/255.0)*2.0-1.0
in_dict = {self.x: inputs}
net_output = self.sess.run(self.fc_32,feed_dict=in_dict)
self.result = self.interpret_output(net_output[0])
self.show_results(img,self.result)
strtime = str(time.time()-s)
if self.disp_console : print 'Elapsed time : ' + strtime + ' secs' + '\n'
def detect_from_file(self,filename):
if self.disp_console : print 'Detect from ' + filename
img = cv2.imread(filename)
#img = misc.imread(filename)
self.detect_from_cvmat(img)
def detect_from_crop_sample(self):
self.w_img = 640
self.h_img = 420
f = np.array(open('person_crop.txt','r').readlines(),dtype='float32')
inputs = np.zeros((1,448,448,3),dtype='float32')
for c in range(3):
for y in range(448):
for x in range(448):
inputs[0,y,x,c] = f[c*448*448+y*448+x]
in_dict = {self.x: inputs}
net_output = self.sess.run(self.fc_32,feed_dict=in_dict)
self.boxes, self.probs = self.interpret_output(net_output[0])
img = cv2.imread('person.jpg')
self.show_results(self.boxes,img)
def interpret_output(self,output):
probs = np.zeros((7,7,2,20))
class_probs = np.reshape(output[0:980],(7,7,20))
scales = np.reshape(output[980:1078],(7,7,2))
boxes = np.reshape(output[1078:],(7,7,2,4))
offset = np.transpose(np.reshape(np.array([np.arange(7)]*14),(2,7,7)),(1,2,0))
boxes[:,:,:,0] += offset
boxes[:,:,:,1] += np.transpose(offset,(1,0,2))
boxes[:,:,:,0:2] = boxes[:,:,:,0:2] / 7.0
boxes[:,:,:,2] = np.multiply(boxes[:,:,:,2],boxes[:,:,:,2])
boxes[:,:,:,3] = np.multiply(boxes[:,:,:,3],boxes[:,:,:,3])
boxes[:,:,:,0] *= self.w_img
boxes[:,:,:,1] *= self.h_img
boxes[:,:,:,2] *= self.w_img
boxes[:,:,:,3] *= self.h_img
for i in range(2):
for j in range(20):
probs[:,:,i,j] = np.multiply(class_probs[:,:,j],scales[:,:,i])
filter_mat_probs = np.array(probs>=self.threshold,dtype='bool')
filter_mat_boxes = np.nonzero(filter_mat_probs)
boxes_filtered = boxes[filter_mat_boxes[0],filter_mat_boxes[1],filter_mat_boxes[2]]
probs_filtered = probs[filter_mat_probs]
classes_num_filtered = np.argmax(filter_mat_probs,axis=3)[filter_mat_boxes[0],filter_mat_boxes[1],filter_mat_boxes[2]]
argsort = np.array(np.argsort(probs_filtered))[::-1]
boxes_filtered = boxes_filtered[argsort]
probs_filtered = probs_filtered[argsort]
classes_num_filtered = classes_num_filtered[argsort]
for i in range(len(boxes_filtered)):
if probs_filtered[i] == 0 : continue
for j in range(i+1,len(boxes_filtered)):
if self.iou(boxes_filtered[i],boxes_filtered[j]) > self.iou_threshold :
probs_filtered[j] = 0.0
filter_iou = np.array(probs_filtered>0.0,dtype='bool')
boxes_filtered = boxes_filtered[filter_iou]
probs_filtered = probs_filtered[filter_iou]
classes_num_filtered = classes_num_filtered[filter_iou]
result = []
for i in range(len(boxes_filtered)):
result.append([self.classes[classes_num_filtered[i]],boxes_filtered[i][0],boxes_filtered[i][1],boxes_filtered[i][2],boxes_filtered[i][3],probs_filtered[i]])
return result
def show_results(self,img,results):
img_cp = img.copy()
if self.filewrite_txt :
ftxt = open(self.tofile_txt,'w')
for i in range(len(results)):
x = int(results[i][1])
y = int(results[i][2])
w = int(results[i][3])//2
h = int(results[i][4])//2
if self.disp_console : print ' class : ' + results[i][0] + ' , [x,y,w,h]=[' + str(x) + ',' + str(y) + ',' + str(int(results[i][3])) + ',' + str(int(results[i][4]))+'], Confidence = ' + str(results[i][5])
if self.filewrite_img or self.imshow:
cv2.rectangle(img_cp,(x-w,y-h),(x+w,y+h),(0,255,0),2)
cv2.rectangle(img_cp,(x-w,y-h-20),(x+w,y-h),(125,125,125),-1)
cv2.putText(img_cp,results[i][0] + ' : %.2f' % results[i][5],(x-w+5,y-h-7),cv2.FONT_HERSHEY_SIMPLEX,0.5,(0,0,0),1)
if self.filewrite_txt :
ftxt.write(results[i][0] + ',' + str(x) + ',' + str(y) + ',' + str(w) + ',' + str(h)+',' + str(results[i][5]) + '\n')
if self.filewrite_img :
if self.disp_console : print ' image file writed : ' + self.tofile_img
cv2.imwrite(self.tofile_img,img_cp)
if self.imshow :
cv2.imshow('YOLO_small detection',img_cp)
cv2.waitKey(0)
if self.filewrite_txt :
if self.disp_console : print ' txt file writed : ' + self.tofile_txt
ftxt.close()
def iou(self,box1,box2):
tb = min(box1[0]+0.5*box1[2],box2[0]+0.5*box2[2])-max(box1[0]-0.5*box1[2],box2[0]-0.5*box2[2])
lr = min(box1[1]+0.5*box1[3],box2[1]+0.5*box2[3])-max(box1[1]-0.5*box1[3],box2[1]-0.5*box2[3])
if tb < 0 or lr < 0 : intersection = 0
else : intersection = tb*lr
return intersection / (box1[2]*box1[3] + box2[2]*box2[3] - intersection)
# my addition
def createFolder(self, path):
if not os.path.exists(path):
os.makedirs(path)
def debug_location(self, img, location):
img_cp = img.copy()
x = int(location[1])
y = int(location[2])
w = int(location[3])//2
h = int(location[4])//2
cv2.rectangle(img_cp,(x-w,y-h),(x+w,y+h),(0,255,0),2)
cv2.rectangle(img_cp,(x-w,y-h-20),(x+w,y-h),(125,125,125),-1)
cv2.putText(img_cp, str(location[0]) + ' : %.2f' % location[5],(x-w+5,y-h-7),cv2.FONT_HERSHEY_SIMPLEX,0.5,(0,0,0),1)
cv2.imshow('YOLO_small detection',img_cp)
cv2.waitKey(1)
def debug_locations(self, img, locations):
img_cp = img.copy()
for location in locations:
x = int(location[1])
y = int(location[2])
w = int(location[3])//2
h = int(location[4])//2
cv2.rectangle(img_cp,(x-w,y-h),(x+w,y+h),(0,255,0),2)
cv2.rectangle(img_cp,(x-w,y-h-20),(x+w,y-h),(125,125,125),-1)
cv2.putText(img_cp, str(location[0]) + ' : %.2f' % location[5],(x-w+5,y-h-7),cv2.FONT_HERSHEY_SIMPLEX,0.5,(0,0,0),1)
cv2.imshow('YOLO_small detection',img_cp)
cv2.waitKey(1)
def debug_gt_location(self, img, location):
img_cp = img.copy()
x = int(location[0])
y = int(location[1])
w = int(location[2])
h = int(location[3])
cv2.rectangle(img_cp,(x,y),(x+w,y+h),(0,255,0),2)
cv2.imshow('gt',img_cp)
cv2.waitKey(1)
def file_to_img(self, filepath):
img = cv2.imread(filepath)
return img
def file_to_video(self, filepath):
try:
video = cv2.VideoCapture(filepath)
except IOError:
print 'cannot open video file: ' + filepath
else:
print 'unknown error reading video file'
return video
def iou(self,box1,box2):
tb = min(box1[0]+0.5*box1[2],box2[0]+0.5*box2[2])-max(box1[0]-0.5*box1[2],box2[0]-0.5*box2[2])
lr = min(box1[1]+0.5*box1[3],box2[1]+0.5*box2[3])-max(box1[1]-0.5*box1[3],box2[1]-0.5*box2[3])
if tb < 0 or lr < 0 : intersection = 0
else : intersection = tb*lr
return intersection / (box1[2]*box1[3] + box2[2]*box2[3] - intersection)
def find_iou_cost(self, pred_locs, gts):
# for each element in the batch, find its iou. output a list of ious.
cost = 0
batch_size= len(pred_locs)
assert (len(gts)== batch_size)
print("batch_size: ")
ious = []
for i in range(batch_size):
pred_loc = pred_locs[i]
gt = gts[i]
iou_ = self.iou(pred_loc, gt)
ious.append(self, iou_)
return ious
def load_folder(self, path):
paths = [os.path.join(path,fn) for fn in next(os.walk(path))[2]]
#return paths
return sorted(paths)
def load_dataset_gt(self, gt_file):
txtfile = open(gt_file, "r")
lines = txtfile.read().split('\n') #'\r\n'
return lines
def find_gt_location(self, lines, id):
line = lines[id]
elems = line.split('\t') # for gt type 2
if len(elems) < 4:
elems = line.split(',') #for gt type 1
x1 = elems[0]
y1 = elems[1]
w = elems[2]
h = elems[3]
gt_location = [int(x1), int(y1), int(w), int(h)]
return gt_location
def find_best_location(self, locations, gt_location):
# locations (class, x, y, w, h, prob); (x, y) is the middle pt of the rect
# gt_location (x1, y1, w, h)
x1 = gt_location[0]
y1 = gt_location[1]
w = gt_location[2]
h = gt_location[3]
gt_location_revised= [x1 + w/2, y1 + h/2, w, h]
max_ious= 0
for id, location in enumerate(locations):
location_revised = location[1:5]
print("location: ", location_revised)
print("gt_location: ", gt_location_revised)
ious = self.iou(location_revised, gt_location_revised)
if ious >= max_ious:
max_ious = ious
index = id
print("Max IOU: " + str(max_ious))
if max_ious != 0:
best_location = locations[index]
class_index = self.classes.index(best_location[0])
best_location[0]= class_index
return best_location
else: # it means the detection failed, no intersection with the ground truth
return [0, 0, 0, 0, 0, 0]
def save_yolo_output(self, out_fold, yolo_output, filename):
name_no_ext= os.path.splitext(filename)[0]
output_name= name_no_ext
path = os.path.join(out_fold, output_name)
np.save(path, yolo_output)
def location_from_0_to_1(self, wid, ht, location):
location[1] /= wid
location[2] /= ht
location[3] /= wid
location[4] /= ht
return location
def gt_location_from_0_to_1(self, wid, ht, location):
wid *= 1.0
ht *= 1.0
location[0] /= wid
location[1] /= ht
location[2] /= wid
location[3] /= ht
return location
def locations_normal(self, wid, ht, locations):
wid *= 1.0
ht *= 1.0
locations[1] *= wid
locations[2] *= ht
locations[3] *= wid
locations[4] *= ht
return locations
def cal_yolo_loss(self, location, gt_location):
# Translate yolo's box mid-point (x0, y0) to top-left point (x1, y1), in order to compare with gt
location[0] = location[0] - location[2]/2
location[1] = location[1] - location[3]/2
loss= sum([(location[i] - gt_location[i])**2 for i in range(4)]) * 100 / 4
return loss
def cal_yolo_IOU(self, location, gt_location):
# Translate yolo's box mid-point (x0, y0) to top-left point (x1, y1), in order to compare with gt
location[0] = location[0] - location[2]/2
location[1] = location[1] - location[3]/2
loss = self.iou(location, gt_location)
return loss
def prepare_training_data(self, img_fold, gt_file, out_fold): #[or]prepare_training_data(self, list_file, gt_file, out_fold):
''' Pass the data through YOLO, and get the fc_17 layer as features, and get the fc_19 layer as locations
Save the features and locations into file for training LSTM'''
# Reshape the input image
paths= self.load_folder(img_fold)
gt_locations= self.load_dataset_gt(gt_file)
avg_loss = 0
total= 0
total_time= 0
for id, path in enumerate(paths):
filename= os.path.basename(path)
print("processing: ", id, ": ", filename)
img = self.file_to_img(path)
# Pass through YOLO layers
self.h_img,self.w_img,_ = img.shape
img_resized = cv2.resize(img, (448, 448))
img_RGB = cv2.cvtColor(img_resized,cv2.COLOR_BGR2RGB)
img_resized_np = np.asarray( img_RGB )
inputs = np.zeros((1,448,448,3),dtype='float32')
inputs[0] = (img_resized_np/255.0)*2.0-1.0
in_dict = {self.x : inputs}
start_time = time.time()
feature= self.sess.run(self.fc_30,feed_dict=in_dict)
cycle_time = time.time() - start_time
print('cycle time= ', cycle_time)
total_time += cycle_time
output = self.sess.run(self.fc_32,feed_dict=in_dict) # make sure it does not run conv layers twice
locations = self.interpret_output(output[0])
gt_location = self.find_gt_location(gt_locations, id)
location = self.find_best_location(locations, gt_location) # find the ROI that has the maximum IOU with the ground truth
self.debug_location(img, location)
self.debug_gt_location(img, gt_location)
# change location into [0, 1]
loss= self.cal_yolo_IOU(location[1:5], gt_location)
location = self.location_from_0_to_1(self.w_img, self.h_img, location)
avg_loss += loss
total += 1
print("loss: ", loss)
yolo_output= np.concatenate(
( np.reshape(feature, [-1, self.num_feat]),
np.reshape(location, [-1, self.num_predict]) ),
axis = 1)
self.save_yolo_output(out_fold, yolo_output, filename)
avg_loss = avg_loss/total
print("YOLO avg_loss: ", avg_loss)
print "Time Spent on Tracking: " + str(total_time)
print "fps: " + str(id/total_time)
return
def loc_to_coordinates(self, loc):
loc = [i * 32 for i in loc]
x1= int(loc[0]- loc[2]/2)
y1= int(loc[1]- loc[3]/2)
x2= int(loc[0]+ loc[2]/2)
y2= int(loc[1]+ loc[3]/2)
return [x1, y1, x2, y2]
def coordinates_to_heatmap_vec(self, coord):
heatmap_vec = np.zeros(1024)
print(coord)
[classnum, x1, y1, x2, y2, prob] = coord
[x1, y1, x2, y2]= self.loc_to_coordinates([x1, y1, x2, y2])
for y in range(y1, y2):
for x in range(x1, x2):
index = y*32 + x
heatmap_vec[index] = 1.0
return heatmap_vec
def prepare_training_data_heatmap(self, img_fold, gt_file, out_fold): #[or]prepare_training_data(self, list_file, gt_file, out_fold):
''' Pass the data through YOLO, and get the fc_17 layer as features, and get the fc_19 layer as locations
Save the features and locations into file for training LSTM'''
# Reshape the input image
paths= self.load_folder(img_fold)
gt_locations= self.load_dataset_gt(gt_file)
avg_loss = 0
total= 0
for id, path in enumerate(paths):
filename= os.path.basename(path)
print("processing: ", id, ": ", filename)
img = self.file_to_img(path)
# Pass through YOLO layers
self.h_img,self.w_img,_ = img.shape
img_resized = cv2.resize(img, (448, 448))
img_RGB = cv2.cvtColor(img_resized,cv2.COLOR_BGR2RGB)
img_resized_np = np.asarray( img_RGB )
inputs = np.zeros((1,448,448,3),dtype='float32')
inputs[0] = (img_resized_np/255.0)*2.0-1.0
in_dict = {self.x : inputs}
feature= self.sess.run(self.fc_30,feed_dict=in_dict)
output = self.sess.run(self.fc_32,feed_dict=in_dict) # make sure it does not run conv layers twice
locations = self.interpret_output(output[0])
gt_location = self.find_gt_location(gt_locations, id)
location = self.find_best_location(locations, gt_location) # find the ROI that has the maximum IOU with the ground truth
self.debug_location(img, location)
self.debug_gt_location(img, gt_location)
# change location into [0, 1]
loss= self.cal_yolo_IOU(location[1:5], gt_location)
location = self.location_from_0_to_1(self.w_img, self.h_img, location)
heatmap_vec= self.coordinates_to_heatmap_vec(location)
avg_loss += loss
total += 1
print("loss: ", loss)
yolo_output= np.concatenate(
( np.reshape(feature, [-1, self.num_feat]),
np.reshape(heatmap_vec, [-1, self.num_heatmap]) ),
axis = 1)
self.save_yolo_output(out_fold, yolo_output, filename)
avg_loss = avg_loss/total
print("YOLO avg_loss: ", avg_loss)
return
def prepare_training_data_multiTarget(self, img_fold, out_fold):
''' Pass the data through YOLO, and get the fc_17 layer as features, and get the fc_19 layer as locations
Save the features and locations into file for training LSTM'''
# Reshape the input image
print(img_fold)
paths= self.load_folder(img_fold)
avg_loss = 0
total= 0
for id, path in enumerate(paths):
filename= os.path.basename(path)
print("processing: ", id, ": ", filename)
img = self.file_to_img(path)
# Pass through YOLO layers
self.h_img,self.w_img,_ = img.shape
img_resized = cv2.resize(img, (448, 448))
img_RGB = cv2.cvtColor(img_resized,cv2.COLOR_BGR2RGB)
img_resized_np = np.asarray( img_RGB )
inputs = np.zeros((1,448,448,3),dtype='float32')
inputs[0] = (img_resized_np/255.0)*2.0-1.0
in_dict = {self.x : inputs}
feature= self.sess.run(self.fc_30,feed_dict=in_dict)
output = self.sess.run(self.fc_32,feed_dict=in_dict) # make sure it does not run conv layers twice
locations = self.interpret_output(output[0])
self.debug_locations(img, locations)
# change location into [0, 1]
for i in range(0, len(locations)):
class_index = self.classes.index(locations[i][0])
locations[i][0] = class_index
locations[i] = self.location_from_0_to_1(self.w_img, self.h_img, locations[i])
if len(locations)== 1:
print('len(locations)= 1\n')
yolo_output = [[np.reshape(feature, [-1, self.num_feat])], [np.reshape(locations, [-1, self.num_predict]), [0,0,0,0,0,0]]]
else:
yolo_output = [[np.reshape(feature, [-1, self.num_feat])], [np.reshape(locations, [-1, self.num_predict])]]
self.save_yolo_output(out_fold, yolo_output, filename)
return
'''----------------------------------------main-----------------------------------------------------'''
def main(argvs):
yolo = YOLO_TF(argvs)
test = 4
heatmap= False#True
'''
VOT30
0:'Human2'
1:'Human9'
2:'Gym'
3:'Human8'
4:'Skater'
5:'Suv'
6:'BlurBody'
7:'CarScale'
8:'Dancer2'
9:'BlurCar1'
10:'Dog'
11:'Jump'
12:'Singer2'
13:'Woman'
14:'David3'
15:'Dancer'
16:'Human7'
17:'Bird1'
18:'Car4'
19:'CarDark'
20:'Couple'
21:'Diving'
22:'Human3'
23:'Skating1'
24:'Human6'
25:'Singer1'
26:'Skater2'
27:'Walking2'
28:'BlurCar3'
29:'Girl2'
MOT2016
30:'MOT16-02'
31:'MOT16-04'
32:'MOT16-05'
33:'MOT16-09'
34:'MOT16-10'
35:'MOT16-11'
36:'MOT16-13'
37:'MOT16-01'
38:'MOT16-03'
39:'MOT16-06'
40:'MOT16-07'
41:'MOT16-08'
42:'MOT16-12'
43:'MOT16-14'
'''
[yolo.w_img, yolo.h_img, sequence_name, dummy_1, dummy_2]= util.choose_video_sequence(test)
if (test >= 0 and test <= 29) or (test >= 90):
root_folder = 'benchmark/DATA'
img_fold = os.path.join(root_folder, sequence_name, 'img/')
elif test<= 36:
root_folder = 'benchmark/MOT/MOT2016/train'
img_fold = os.path.join(root_folder, sequence_name, 'img1/')
elif test<= 43:
root_folder = 'benchmark/MOT/MOT2016/test'
img_fold = os.path.join(root_folder, sequence_name, 'img1/')
gt_file = os.path.join(root_folder, sequence_name, 'groundtruth_rect.txt')
out_fold = os.path.join(root_folder, sequence_name, 'yolo_out/')
heat_fold = os.path.join(root_folder, sequence_name, 'yolo_heat/')
yolo.createFolder(out_fold)
yolo.createFolder(heat_fold)
if heatmap is True:
yolo.prepare_training_data_heatmap(img_fold, gt_file, heat_fold)
else:
if (test >= 0 and test <= 29) or (test >= 90):
yolo.prepare_training_data(img_fold,gt_file,out_fold)
else:
yolo.prepare_training_data_multiTarget(img_fold,out_fold)
if __name__=='__main__':
main(sys.argv)
| 2.328125 | 2 |
pipenv/cmdparse.py | sthagen/pipenv | 23 | 17247 | <reponame>sthagen/pipenv<filename>pipenv/cmdparse.py
import itertools
import re
import shlex
class ScriptEmptyError(ValueError):
pass
def _quote_if_contains(value, pattern):
if next(iter(re.finditer(pattern, value)), None):
return '"{0}"'.format(re.sub(r'(\\*)"', r'\1\1\\"', value))
return value
class Script(object):
"""Parse a script line (in Pipfile's [scripts] section).
This always works in POSIX mode, even on Windows.
"""
def __init__(self, command, args=None):
self._parts = [command]
if args:
self._parts.extend(args)
@classmethod
def parse(cls, value):
if isinstance(value, str):
value = shlex.split(value)
if not value:
raise ScriptEmptyError(value)
return cls(value[0], value[1:])
def __repr__(self):
return "Script({0!r})".format(self._parts)
@property
def command(self):
return self._parts[0]
@property
def args(self):
return self._parts[1:]
@property
def cmd_args(self):
return self._parts
def extend(self, extra_args):
self._parts.extend(extra_args)
def cmdify(self):
"""Encode into a cmd-executable string.
This re-implements CreateProcess's quoting logic to turn a list of
arguments into one single string for the shell to interpret.
* All double quotes are escaped with a backslash.
* Existing backslashes before a quote are doubled, so they are all
escaped properly.
* Backslashes elsewhere are left as-is; cmd will interpret them
literally.
The result is then quoted into a pair of double quotes to be grouped.
An argument is intentionally not quoted if it does not contain
foul characters. This is done to be compatible with Windows built-in
commands that don't work well with quotes, e.g. everything with `echo`,
and DOS-style (forward slash) switches.
Foul characters include:
* Whitespaces.
* Carets (^). (pypa/pipenv#3307)
* Parentheses in the command. (pypa/pipenv#3168)
Carets introduce a difficult situation since they are essentially
"lossy" when parsed. Consider this in cmd.exe::
> echo "foo^bar"
"foo^bar"
> echo foo^^bar
foo^bar
The two commands produce different results, but are both parsed by the
shell as `foo^bar`, and there's essentially no sensible way to tell
what was actually passed in. This implementation assumes the quoted
variation (the first) since it is easier to implement, and arguably
the more common case.
The intended use of this function is to pre-process an argument list
before passing it into ``subprocess.Popen(..., shell=True)``.
See also: https://docs.python.org/3/library/subprocess.html#converting-argument-sequence
"""
return " ".join(
itertools.chain(
[_quote_if_contains(self.command, r"[\s^()]")],
(_quote_if_contains(arg, r"[\s^]") for arg in self.args),
)
)
| 2.796875 | 3 |
msph/clients/ms_online.py | CultCornholio/solenya | 11 | 17248 | <filename>msph/clients/ms_online.py<gh_stars>10-100
from .framework import Client, Resource
from . import constants as const
client = Client(
base_url='https://login.microsoftonline.com',
base_headers={
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:60.0) Gecko/20100101 Firefox/60.0',
'Content-Type': 'application/x-www-form-urlencoded',
}
)
@client.endpoint
def get_device_code(client_id:str) -> str:
return Resource(
uri='/organizations/oauth2/v2.0/devicecode',
data={"client_id": client_id, "scope": const.DEVICE_CODE_SCOPE},
)
@client.endpoint
def get_access_token(client_id:str, device_code:str) -> dict:
return Resource(
uri='/organizations/oauth2/v2.0/token',
data={"grant_type": const.ACCESS_TOKEN_GRANT, "client_id": client_id, "code": device_code},
)
@client.endpoint
def refresh_access_token(refresh_token:str, target_id:str) -> dict:
return Resource(
uri='/common/oauth2/v2.0/token',
data={'grant_type': 'refresh_token', 'refresh_token': refresh_token, 'scope': const.DEVICE_CODE_SCOPE}
)
| 2.109375 | 2 |
warn/platforms/job_center/cache.py | anikasikka/warn-scraper | 12 | 17249 | <reponame>anikasikka/warn-scraper
import logging
import re
from warn.cache import Cache as BaseCache
from .urls import urls
logger = logging.getLogger(__name__)
class Cache(BaseCache):
"""A custom cache for Job Center sites."""
def save(self, url, params, html):
"""Save file to the cache."""
cache_key = self.key_from_url(url, params)
self.write(cache_key, html)
logger.debug(f"Saved to cache: {cache_key}")
def fetch(self, url, params):
"""Fetch file from the cache."""
cache_key = self.key_from_url(url, params)
content = self.read(cache_key)
logger.debug(f"Fetched from cache: {cache_key}")
return content
def key_from_url(self, url, params=None):
"""Convert a URL to a cache key."""
page_type = (
"records" if re.search(r"warn_lookups/\d+$", url) else "search_results"
)
if page_type == "records":
record_number = url.rsplit("/")[-1]
cache_key = f"records/{record_number}.html"
# Otherwise this is an initial search with params or a downstream page URL
else:
start_key = "q[notice_on_gteq]"
end_key = "q[notice_on_lteq]"
# For downstream pages, extract start/end dates + page number
if "page" in url:
parsed_params = urls.parse_url_query(url)
page_num = urls.page_num_from_url(url)
start = parsed_params[start_key][0]
end = parsed_params[end_key][0]
# For initial search page, get metadata from params
else:
if not params:
params = {}
start = params[start_key]
end = params[end_key]
page_num = 1
cache_key = f"search_results/{start}_{end}_page{page_num}.html"
return cache_key
| 2.8125 | 3 |
ebcli/core/abstractcontroller.py | senstb/aws-elastic-beanstalk-cli | 110 | 17250 | # Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import textwrap
import json
import sys
import os
from cement.core import controller
from ebcli import __version__
from ebcli.core.ebglobals import Constants
from ebcli.lib import elasticbeanstalk, utils
from ebcli.core import io, fileoperations
from ebcli.objects.exceptions import (
NoEnvironmentForBranchError,
PlatformWorkspaceNotSupportedError,
ApplicationWorkspaceNotSupportedError,
EBCLIException,
NotInitializedError
)
from ebcli.resources.strings import strings, flag_text
from ebcli.objects import region
from ebcli.operations import commonops
class AbstractBaseController(controller.CementBaseController):
"""
This is an abstract base class that is useless on its own, but used
by other classes to sub-class from and to share common commands and
arguments.
"""
class Meta:
label = 'abstract'
stacked_on = 'base'
stacked_type = 'nested'
arguments = [
(['environment_name'], dict(action='store', nargs='?',
default=[],
help=flag_text['general.env'])),
]
epilog = ''
usage = 'eb {cmd} <environment_name> [options ...]'
def do_command(self):
pass
@classmethod
def validate_workspace(cls):
workspace_type = fileoperations.get_workspace_type(None)
is_platform_workspace_only_command = cls.Meta.__dict__.get(
'is_platform_workspace_only_command'
)
requires_directory_initialization = cls.Meta.__dict__.get(
'requires_directory_initialization'
)
if '--modules' in sys.argv:
pass
elif '--help' in sys.argv:
pass
elif cls.__name__ == 'PlatformListController' or cls.__name__ == 'EBPListController':
pass
elif requires_directory_initialization and not workspace_type:
raise NotInitializedError(strings['exit.notsetup'])
elif is_platform_workspace_only_command:
if Constants.WorkSpaceTypes.APPLICATION == workspace_type:
raise ApplicationWorkspaceNotSupportedError(
strings['exit.applicationworkspacenotsupported']
)
@controller.expose(hide=True)
def default(self):
"""
This command will be shared within all controllers that sub-class
from here. It can also be overridden in the sub-class
"""
self.validate_workspace()
self.do_command()
self.check_for_cli_update(__version__)
def check_workspace_type(self, expected_type):
workspace_type = fileoperations.get_workspace_type()
if workspace_type != expected_type:
if Constants.WorkSpaceTypes.PLATFORM == workspace_type:
raise PlatformWorkspaceNotSupportedError(
strings['exit.platformworkspacenotsupported']
)
if Constants.WorkSpaceTypes.APPLICATION == workspace_type:
raise ApplicationWorkspaceNotSupportedError(
strings['exit.applicationworkspacenotsupported']
)
def check_for_cli_update(self, version):
label = self.Meta.label
if label in ('create', 'deploy', 'status', 'clone', 'config'):
if cli_update_exists(version):
if self.check_install_script_used():
io.log_alert(strings['base.update_available_script_install'])
else:
io.log_alert(strings['base.update_available'])
def get_app_name(self):
app_name = fileoperations.get_application_name()
return app_name
def get_env_name(self, cmd_example=None, noerror=False, varname='environment_name'):
env_name = getattr(self.app.pargs, varname, None)
if not env_name:
env_name = commonops. \
get_current_branch_environment()
workspace_type = fileoperations.get_workspace_type(Constants.WorkSpaceTypes.APPLICATION)
if not env_name:
if Constants.WorkSpaceTypes.PLATFORM == workspace_type:
raise EBCLIException(strings['platform.nobuilderenv'])
if noerror:
return None
if not cmd_example:
message = strings['branch.noenv'].replace('{cmd}',
self.Meta.label)
else:
message = strings['branch.noenv'].replace('eb {cmd}',
cmd_example)
io.log_error(message)
raise NoEnvironmentForBranchError()
return env_name
def check_install_script_used(self):
return '.ebcli-virtual-env' in os.path.abspath(__file__)
@classmethod
def _add_to_handler(cls, handler):
handler.register(cls)
@property
def _help_text(self):
"""
Returns the help text displayed when for the commands of the type `eb <command> <subcommand>`
except where <command> is "platform".
"""
longest = 0
def pad(label):
padlength = longest - len(label) + 2
padding = ' '
if padlength < 0:
for x in range(0, longest):
padding += ' '
else:
for x in range(0, padlength):
padding += ' '
return padding
help_txt = ''
for label in self._visible_commands:
if len(label) > longest:
longest = len(label)
for label in self._visible_commands:
cmd = self._dispatch_map[label]
cmd_txt = ' '
cmd_name = label
cmd_aliases = cmd['aliases']
if len(cmd_aliases) > 0 and cmd['aliases_only']:
cmd_name = cmd_aliases.pop(0)
cmd_txt += '{}'.format(cmd_name)
if cmd['help']:
cmd_txt += '{}{}'.format(pad(cmd_txt), cmd['help'])
if len(cmd_aliases) > 0:
cmd_txt += '\n{}(alias: {})'.format(pad(''), ', '.join(cmd_aliases))
cmd_txt += '\n'
help_txt += cmd_txt
if len(help_txt) > 0:
txt = '''{}
commands:
{}
'''.format(self._meta.description, help_txt)
else:
txt = self._meta.description
return textwrap.dedent(txt)
def cli_update_exists(current_version):
try:
data = utils.get_data_from_url(
'https://pypi.python.org/pypi/awsebcli/json', timeout=5)
data = json.loads(data)
latest = data['info']['version']
return latest != current_version
except:
return False
| 1.78125 | 2 |
python/speaktest.py | kyle-cook/templates | 0 | 17251 | import unittest
import speak
class SpeakTests(unittest.TestCase):
"""
Unit test for the speak library
"""
def testHello(self):
self.assertEqual("Hello World!", speak.helloworld())
def testGoodbye(self):
self.assertEqual("Goodbye World!", speak.goodbyeworld())
if __name__ == "__main__":
unittest.main()
| 3.046875 | 3 |
c2f_loop.py | devopsprosiva/python | 0 | 17252 | <filename>c2f_loop.py<gh_stars>0
#!/usr/local/bin/python
import sys
temperatures=[10,-20,-289,100]
def c2f(cel_temp):
if cel_temp < -273.15:
return "The lowest possible temperature that physical matter can reach is -273.15C"
else:
fah_temp=(cel_temp*1.8)+32
return fah_temp
for temp in temperatures:
file = open('temperatures.txt','a+')
if temp > -273.15:
temp_output = c2f(temp)
file.write(str(temp_output))
file.write("\n")
file.close()
| 3.34375 | 3 |
problems/eggs/services/confirm_min_throws_server.py | giuliagalvan/TAlight | 0 | 17253 | #!/usr/bin/env python3
# METADATA OF THIS TAL_SERVICE:
problem="eggs"
service="confirm_min_throws"
args_list = [
('min',int),
('n_eggs',int),
('n_floors',int),
('lang',str),
('ISATTY',bool),
]
from sys import stderr, exit, argv
from random import randrange
from math import inf as IMPOSSIBLE
from multilanguage import Env, Lang, TALcolors
ENV =Env(problem, service, args_list)
TAc =TALcolors(ENV)
LANG=Lang(ENV, TAc, lambda fstring: eval(f"f'{fstring}'"))
TAc.print(LANG.opening_msg, "green")
# START CODING YOUR SERVICE:
# INITIALIZATON: allocation, base cases, sentinels
table = [ [0] + [IMPOSSIBLE] * ENV['n_floors'] ]
for u in range(ENV['n_eggs']):
table.append([0] + [None] * ENV['n_floors'])
# INDUCTTVE STEP: the min-max recursion with nature playing against
for u in range(1,1+ENV['n_eggs']):
for f in range(1,1+ENV['n_floors']):
table[u][f] = IMPOSSIBLE
for first_launch_floor in range(1,1+f):
table[u][f] = min(table[u][f],1+max(table[u][f-first_launch_floor],table[u-1][first_launch_floor-1]))
if table[ENV['n_eggs']][ENV['n_floors']] < ENV['min']:
print(f"No! When you are given {ENV['n_eggs']} eggs and the floors are {ENV['n_floors']} then there exists a policy that guarantees you to find out the truth in strictly less than {ENV['min']} launches, whatever will happen (worst case).")
#English: print("No! When you are given {ENV['n_eggs']} eggs and the floors are {ENV['n_floors']} then there exists a policy that guarantees you to find out the truth in strictly less than {ENV['min']} launches, whatever will happen (worst case).")
if table[ENV['n_eggs']][ENV['n_floors']] > ENV['min']:
print(f"No! When you are given {ENV['n_eggs']} eggs and the floors are {ENV['n_floors']} then no policy guarantees you to find out the truth within {ENV['min']} launches in every possible scenario (aka, whathever the truth is).")
#English:
if table[ENV['n_eggs']][ENV['n_floors']] == ENV['min']:
print(f"Yes! Indeed, {ENV['min']} is the smallest possible natural B such that, when you are given {ENV['n_eggs']} eggs and the floors are {ENV['n_floors']}, still there exists a policy that guarantees you to find out the truth within B launches in every possible scenario.")
#English:
exit(0)
| 2.859375 | 3 |
pylox/error_reporting.py | hculpan/pylox | 1 | 17254 | errorFound = False
def hasError():
global errorFound
return errorFound
def clearError():
global errorFound
errorFound = False
def error(message, lineNo = 0):
report(lineNo, "", message)
def report(lineNo, where, message):
global errorFound
errorFound = True
if lineNo == 0:
print("Error {1}: {2}".format(lineNo, where, message))
else:
print("[Line {0}] Error {1}: {2}".format(lineNo, where, message)) | 3.09375 | 3 |
tests/syntax/scripts/annotated_comments.py | toddrme2178/pyccel | 0 | 17255 | <reponame>toddrme2178/pyccel
#$ header variable x :: int
#$ acc parallel private(idx)
#$ omp parallel private(idx)
| 0.910156 | 1 |
server/admin.py | allisto/allistic-server | 5 | 17256 | from django.contrib import admin
from .models import Doctor, ConsultationTime, Medicine, Allergy, Child, Parent
admin.site.site_header = "Allisto - We Do Good"
@admin.register(Doctor)
class DoctorAdmin(admin.ModelAdmin):
list_display = ('name', 'aadhar_number', 'specialization', 'email', 'phone_number')
list_filter = ('specialization', 'consultation_fee', 'working_hours')
search_fields = ('name', 'specialization', 'consultation_fee')
@admin.register(Parent)
class ParentAdmin(admin.ModelAdmin):
list_display = ('name', 'aadhar_number', 'email', 'phone_number', 'address')
list_filter = ('name', 'email', 'phone_number')
search_fields = ('name', 'aadhar_number', 'email', 'phone_number', 'address')
@admin.register(Child)
class ChildAdmin(admin.ModelAdmin):
list_display = ('name', 'autistic', 'birthday', 'gender')
list_filter = ('name', 'autistic', 'birthday')
search_fields = ('name', 'autistic', 'birthday')
@admin.register(Allergy)
class AllergyAdmin(admin.ModelAdmin):
list_display = ('name', 'description')
list_filter = ('name', 'description')
search_fields = ('name',)
@admin.register(Medicine)
class MedicineAdmin(admin.ModelAdmin):
list_display = ('name', 'description')
list_filter = ('name', 'description')
search_fields = ('name',)
admin.site.register(ConsultationTime)
| 1.765625 | 2 |
backend/ir/ir.py | zengljnwpu/yaspc | 0 | 17257 |
from backend.entity.entity import DefinedFuntion
from backend.ir.dumper import Dumper
from backend.ir.stmt import Assign
from backend.ir.stmt import Return
from backend.ir.expr import Bin
from backend.ir.expr import Call
from backend.entity.scope import *
def import_ir(data, asm_file):
def_vars = list()
def_funs = list()
for i in data["variablelist"]:
t = DefinedVariable(
name=i["name"], type=i["type"], priv=i["is_private"], init=i["value"])
def_vars.append(t)
for i in data["functionlist"]:
t = DefinedFuntion(priv=False, body=i["body"], name=i["name"],
params=i["parameterlist"], type=i["type"], scope=LocalScope(i["variablelist"]))
def_funs.append(t)
ir = IR(source=asm_file, defuns=def_funs, defvars=def_vars,
constant_table=None, funcdecls=None, scope=None)
return ir
def inst_factory(insn):
if insn["name"] == "store":
return Assign(loc=insn["line_number"], lhs=insn["address"], rhs=insn["value"])
elif insn["name"] == "return":
return Return(loc=insn["line_number"], expr=insn["expr"])
elif insn["name"] == "bin":
return Bin(left=insn["left"], right=insn["right"], op=insn["op"], type=insn["type"], value=insn["value"])
elif insn["name"] == "call":
return Call(args=insn["args"], expr=insn["expr"], type=insn["type"])
else:
raise Exception("Feature not implemented")
# This class were used to import IR from json text
class IR ():
def __init__(self,
source,
defvars,
defuns,
funcdecls,
constant_table,
scope):
self.source = source
self.defvars = defvars
self.defuns = defuns
self.funcdecls = funcdecls
self.scope = scope
self.constant_table = constant_table
self.gvars = []
self.comms = []
def file_name(self):
return self.source
def location(self):
return self.source
def defined_variables(self):
return self.defvars
def is_function_defined(self):
if self.defuns:
return True
else:
return False
def defined_funcitons(self):
return self.defuns
def scope(self):
return self.scope
def all_functions(self):
result = []
if self.defuns:
result.extend(self.defuns)
if self.funcdecls:
result.extend(self.funcdecls)
return result
def init_variables(self):
self.comms = []
self.comms = []
for var in self.scope.defined_glabal_scope_variables():
if var.has_initializer == True:
self.gvars.append(var)
else:
self.comms.append(var)
#a list of all defined/declared global-scope variables
def all_global_variables(self):
#return self.scope.all_global_variables()
return self.defvars
def is_global_variable_defined(self):
if self.defined_global_variables:
return True
else:
return False
#Returns the list of global variables.
def defined_global_variables(self):
'''
if not self.gvars:
self.init_variables()
else:
return self.gvars
'''
return self.defvars
def is_common_symbol_defined(self):
if self.defined_common_symbols():
return True
else:
return False
def defined_common_symbols(self):
if not self.comms:
self.init_variables()
else:
return self.comms
def is_string_literal_defined(self):
if self.constant_table:
return True
else:
return False
def const_table(self):
return self.constant_table
def dump(self):
d = Dumper()
d.print_class(self, self.source)
d.print_vars("variables", self.defvars)
d.print_funs("function", self.defuns)
| 2.09375 | 2 |
forest/benchmarking/tests/test_superoperator_transformations.py | stjordanis/forest-benchmarking | 40 | 17258 | import numpy as np
from pyquil.gate_matrices import X, Y, Z, H
from forest.benchmarking.operator_tools.superoperator_transformations import *
# Test philosophy:
# Using the by hand calculations found in the docs we check conversion
# between one qubit channels with one Kraus operator (Hadamard) and two
# Kraus operators (the amplitude damping channel). Additionally we check
# a few two qubit channel conversions to get additional confidence.
def amplitude_damping_kraus(p):
Ad0 = np.asarray([[1, 0], [0, np.sqrt(1 - p)]])
Ad1 = np.asarray([[0, np.sqrt(p)], [0, 0]])
return [Ad0, Ad1]
def amplitude_damping_chi(p):
poly1 = (1 + np.sqrt(1 - p)) ** 2
poly2 = (-1 + np.sqrt(1 - p)) ** 2
ad_pro = 0.25 * np.asarray([[poly1, 0, 0, p],
[0, p, -1j * p, 0],
[0, 1j * p, p, 0],
[p, 0, 0, poly2]])
return ad_pro
def amplitude_damping_pauli(p):
poly1 = np.sqrt(1 - p)
ad_pau = np.asarray([[1, 0, 0, 0],
[0, poly1, 0, 0],
[0, 0, poly1, 0],
[p, 0, 0, 1 - p]])
return ad_pau
def amplitude_damping_super(p):
poly1 = np.sqrt(1 - p)
ad_sup = np.asarray([[1, 0, 0, p],
[0, poly1, 0, 0],
[0, 0, poly1, 0],
[0, 0, 0, 1 - p]])
return ad_sup
def amplitude_damping_choi(p):
poly1 = np.sqrt(1 - p)
ad_choi = np.asarray([[1, 0, 0, poly1],
[0, 0, 0, 0],
[0, 0, p, 0],
[poly1, 0, 0, 1 - p]])
return ad_choi
HADChi = 0.5 * np.asarray([[0, 0, 0, 0],
[0, 1, 0, 1],
[0, 0, 0, 0],
[0, 1, 0, 1]])
HADPauli = 1.0 * np.asarray([[1, 0, 0, 0],
[0, 0, 0, 1],
[0, 0, -1, 0],
[0, 1, 0, 0]])
HADSuper = 0.5 * np.asarray([[1, 1, 1, 1],
[1, -1, 1, -1],
[1, 1, -1, -1],
[1, -1, -1, 1]])
HADChoi = 0.5 * np.asarray([[1, 1, 1, -1],
[1, 1, 1, -1],
[1, 1, 1, -1],
[-1, -1, -1, 1]])
# Single Qubit Pauli Channel
def one_q_pauli_channel_chi(px, py, pz):
p = (px + py + pz)
pp_chi = np.asarray([[1 - p, 0, 0, 0],
[0, px, 0, 0],
[0, 0, py, 0],
[0, 0, 0, pz]])
return pp_chi
# Pauli twirled Amplitude damping channel
def analytical_pauli_twirl_of_AD_chi(p):
# see equation 7 of https://arxiv.org/pdf/1701.03708.pdf
poly1 = (2 + 2 * np.sqrt(1 - p) - p) / 4
poly2 = p / 4
poly3 = (2 - 2 * np.sqrt(1 - p) - p) / 4
pp_chi = np.asarray([[poly1, 0, 0, 0],
[0, poly2, 0, 0],
[0, 0, poly2, 0],
[0, 0, 0, poly3]])
return pp_chi
# I \otimes Z channel or gate (two qubits)
two_qubit_paulis = n_qubit_pauli_basis(2)
IZKraus = two_qubit_paulis.ops_by_label['IZ']
IZSuper = np.diag([1, -1, 1, -1, -1, 1, -1, 1, 1, -1, 1, -1, -1, 1, -1, 1])
# one and zero state as a density matrix
ONE_STATE = np.asarray([[0, 0], [0, 1]])
ZERO_STATE = np.asarray([[1, 0], [0, 0]])
# Amplitude damping Kraus operators with p = 0.1
AdKrausOps = amplitude_damping_kraus(.1)
# Use Kraus operators to find output of channel i.e.
# rho_out = A_0 rho A_0^\dag + A_1 rho A_1^\dag.
rho_out = np.matmul(np.matmul(AdKrausOps[0], ONE_STATE), AdKrausOps[0].transpose().conj()) + \
np.matmul(np.matmul(AdKrausOps[1], ONE_STATE), AdKrausOps[1].transpose().conj())
def test_vec():
A = np.asarray([[1, 2], [3, 4]])
B = np.asarray([[1, 2, 5], [3, 4, 6]])
np.testing.assert_array_equal(np.array([[1], [3], [2], [4]]), vec(A))
np.testing.assert_array_equal(np.array([[1], [3], [2], [4], [5], [6]]), vec(B))
def test_unvec():
A = np.asarray([[1, 2], [3, 4]])
C = np.asarray([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
np.testing.assert_array_equal(A, unvec(vec(A)))
np.testing.assert_array_equal(C, unvec(vec(C)))
def test_kraus_ops_sum_to_identity():
# Check kraus ops sum to identity
p = np.random.rand()
Ad0, Ad1 = amplitude_damping_kraus(p)
np.testing.assert_array_almost_equal_nulp(np.matmul(Ad0.transpose().conj(), Ad0)
+ np.matmul(Ad1.transpose().conj(), Ad1), np.eye(2))
def test_kraus2chi():
assert np.allclose(HADChi, kraus2chi(H))
p = np.random.rand()
AdKraus = amplitude_damping_kraus(p)
AdChi = amplitude_damping_chi(p)
assert np.allclose(AdChi, kraus2chi(AdKraus))
assert np.allclose(superop2chi(IZSuper), kraus2chi(IZKraus))
def test_kraus2pauli_liouville():
p = np.random.rand()
AdKraus = amplitude_damping_kraus(p)
AdPauli = amplitude_damping_pauli(p)
assert np.allclose(kraus2pauli_liouville(AdKraus), AdPauli)
assert np.allclose(kraus2pauli_liouville(H), HADPauli)
def test_kraus2superop():
p = np.random.rand()
AdKraus = amplitude_damping_kraus(p)
AdSuper = amplitude_damping_super(p)
np.testing.assert_array_almost_equal_nulp(kraus2superop(AdKraus), AdSuper)
# test application of super operator is the same as application of Kraus ops
ONE_STATE_VEC = vec(ONE_STATE)
np.testing.assert_array_almost_equal_nulp(unvec(np.matmul(kraus2superop(AdKrausOps),
ONE_STATE_VEC)), rho_out)
assert np.allclose(kraus2superop(H), HADSuper)
assert np.allclose(kraus2superop(IZKraus), IZSuper)
# Below here tests non square Kraus operators
# In this example The Kraus operator is M_0 = I \otimes <0| where <0| = (1,0)
Idd = np.asarray([[1, 0], [0, 1]])
M0 = np.kron(Idd, np.asarray([[1, 0]]))
attempt = kraus2superop(M0)
answer = np.kron(M0.conj(), M0)
assert np.allclose(answer, attempt)
def test_kraus2choi():
p = np.random.rand()
AdKraus = amplitude_damping_kraus(p)
AdChoi = amplitude_damping_choi(p)
assert np.allclose(kraus2choi(AdKraus), AdChoi)
assert np.allclose(kraus2choi(H), HADChoi)
def test_chi2pauli_liouville():
p = np.random.rand()
AdChi = amplitude_damping_chi(p)
AdPauli = amplitude_damping_pauli(p)
assert np.allclose(AdPauli, chi2pauli_liouville(AdChi))
assert np.allclose(HADPauli, chi2pauli_liouville(HADChi))
def test_basis_transform_p_to_c():
xz_pauli_basis = np.zeros((16, 1))
xz_pauli_basis[7] = [1.]
assert np.allclose(unvec(pauli2computational_basis_matrix(4) @ xz_pauli_basis), np.kron(X, Z))
def test_basis_transform_c_to_p():
xz_pauli_basis = np.zeros((16, 1))
xz_pauli_basis[7] = [1.]
assert np.allclose(computational2pauli_basis_matrix(4) @ vec(np.kron(X, Z)), xz_pauli_basis)
def test_pl_to_choi():
for i, pauli in enumerate(n_qubit_pauli_basis(2)):
pl = kraus2pauli_liouville(pauli[1])
choi = kraus2choi(pauli[1])
assert np.allclose(choi, pauli_liouville2choi(pl))
pl = kraus2pauli_liouville(H)
choi = kraus2choi(H)
assert np.allclose(choi, pauli_liouville2choi(pl))
def test_superop_to_kraus():
assert np.allclose(superop2kraus(IZSuper), IZKraus)
p = np.random.rand()
AdSuper = amplitude_damping_super(p)
AdKraus = amplitude_damping_kraus(p)
kraus_ops = superop2kraus(AdSuper)
# the order of the Kraus ops matters
# TODO: fix the sign problem in Kraus operators
assert np.allclose([np.abs(kraus_ops[1]), np.abs(kraus_ops[0])], AdKraus)
def test_superop_to_choi():
for i, pauli in enumerate(n_qubit_pauli_basis(2)):
superop = kraus2superop(pauli[1])
choi = kraus2choi(pauli[1])
assert np.allclose(choi, superop2choi(superop))
p = np.random.rand()
AdSuper = amplitude_damping_super(p)
AdChoi = amplitude_damping_choi(p)
assert np.allclose(AdChoi, superop2choi(AdSuper))
superop = kraus2superop(H)
choi = kraus2choi(H)
assert np.allclose(choi, superop2choi(superop))
def test_superop_to_pl():
p = np.random.rand()
AdSuper = amplitude_damping_super(p)
AdPauli = amplitude_damping_pauli(p)
assert np.allclose(AdPauli, superop2pauli_liouville(AdSuper))
AdKraus = amplitude_damping_kraus(p)
superop = kraus2superop(AdKraus)
pauli = kraus2pauli_liouville(AdKraus)
assert np.allclose(pauli, superop2pauli_liouville(superop))
def test_pauli_liouville_to_superop():
p = np.random.rand()
AdSuper = amplitude_damping_super(p)
AdPauli = amplitude_damping_pauli(p)
assert np.allclose(AdSuper, pauli_liouville2superop(AdPauli))
AdKraus = amplitude_damping_kraus(p)
superop = kraus2superop(AdKraus)
pauli = kraus2pauli_liouville(AdKraus)
assert np.allclose(superop, pauli_liouville2superop(pauli))
def test_choi_to_kraus():
for i, pauli in enumerate(n_qubit_pauli_basis(2)):
choi = kraus2choi(pauli[1])
kraus = choi2kraus(choi)
assert np.allclose(choi, kraus2choi(kraus))
id_choi = np.array([[1, 0, 0, 1], [0, 0, 0, 0], [0, 0, 0, 0], [1, 0, 0, 1]])
assert np.allclose(kraus2choi(choi2kraus(id_choi)), id_choi)
for kraus in choi2kraus(id_choi):
assert np.allclose(abs(kraus), np.eye(2)) or np.allclose(kraus, np.zeros((2, 2)))
def test_choi_to_super():
p = np.random.rand()
AdSuper = amplitude_damping_super(p)
AdChoi = amplitude_damping_choi(p)
assert np.allclose(AdSuper, choi2superop(AdChoi))
def test_choi_pl_bijectivity():
assert np.allclose(choi2superop(choi2superop(np.eye(4))), np.eye(4))
assert np.allclose(superop2choi(superop2choi(np.eye(4))), np.eye(4))
h_choi = kraus2choi(H)
h_superop = kraus2superop(H)
assert np.allclose(choi2superop(choi2superop(h_choi)), h_choi)
assert np.allclose(superop2choi(superop2choi(h_superop)), h_superop)
| 2.15625 | 2 |
0702 In-Place Move Zeros to End of List.py | ansabgillani/binarysearchcomproblems | 1 | 17259 | <gh_stars>1-10
class Solution:
def solve(self, nums):
return [num for num in nums if num != 0] + [0]*nums.count(0)
| 2.90625 | 3 |
OIL/__init__.py | vjdad4m/OIL | 1 | 17260 | import OIL.color
import OIL.label
import OIL.parser
import OIL.tools
import OIL.errors | 1 | 1 |
lib/spack/spack/cmd/load.py | padamson/spack | 2 | 17261 | <reponame>padamson/spack<gh_stars>1-10
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import sys
import spack.cmd
import spack.cmd.common.arguments as arguments
import spack.environment as ev
import spack.store
import spack.user_environment as uenv
import spack.util.environment
description = "add package to the user environment"
section = "user environment"
level = "short"
def setup_parser(subparser):
"""Parser is only constructed so that this prints a nice help
message with -h. """
arguments.add_common_arguments(
subparser, ['recurse_dependencies', 'installed_specs'])
shells = subparser.add_mutually_exclusive_group()
shells.add_argument(
'--sh', action='store_const', dest='shell', const='sh',
help="print sh commands to load the package")
shells.add_argument(
'--csh', action='store_const', dest='shell', const='csh',
help="print csh commands to load the package")
shells.add_argument(
'--fish', action='store_const', dest='shell', const='fish',
help="print fish commands to load the package")
subparser.add_argument(
'--first',
action='store_true',
default=False,
dest='load_first',
help="load the first match if multiple packages match the spec"
)
subparser.add_argument(
'--only',
default='package,dependencies',
dest='things_to_load',
choices=['package', 'dependencies'],
help="""select whether to load the package and its dependencies
the default is to load the package and all dependencies
alternatively one can decide to load only the package or only
the dependencies"""
)
def load(parser, args):
env = ev.get_env(args, 'load')
specs = [spack.cmd.disambiguate_spec(spec, env, first=args.load_first)
for spec in spack.cmd.parse_specs(args.specs)]
if not args.shell:
specs_str = ' '.join(args.specs) or "SPECS"
spack.cmd.common.shell_init_instructions(
"spack load",
" eval `spack load {sh_arg} %s`" % specs_str,
)
return 1
with spack.store.db.read_transaction():
if 'dependencies' in args.things_to_load:
include_roots = 'package' in args.things_to_load
specs = [dep for spec in specs
for dep in
spec.traverse(root=include_roots, order='post')]
env_mod = spack.util.environment.EnvironmentModifications()
for spec in specs:
env_mod.extend(uenv.environment_modifications_for_spec(spec))
env_mod.prepend_path(uenv.spack_loaded_hashes_var, spec.dag_hash())
cmds = env_mod.shell_modifications(args.shell)
sys.stdout.write(cmds)
| 2.3125 | 2 |
poisson_image_editing.py | zishun/Poisson-EVA2019 | 0 | 17262 | import numpy as np
import imageio
from PoissonTemperature import FiniteDifferenceMatrixConstruction
def ind_sub_conversion(img, ind2sub_fn, sub2ind_fn):
rows, cols = img.shape[:2]
num = rows*cols
arange = np.arange(rows*cols, dtype=np.int32)
ind2sub = np.empty((num, 2), dtype=np.int32)
ind2sub[:, 0] = np.floor(arange/cols)
ind2sub[:, 1] = np.remainder(arange, cols)
sub2ind = arange.reshape((rows, cols))
np.save(ind2sub_fn, ind2sub)
np.save(sub2ind_fn, sub2ind)
def pie(FDMC, background, foreground):
Lap, Lap_Solver_Array, Rhs, is_unknown, _, _ = \
FDMC.laplacian_matrix_construction(mask.ravel())
bg = background.reshape((-1, 3))
fg = foreground.reshape((-1, 3))
result = bg.copy()
lap = Lap.dot(fg[is_unknown, :])
lap_rhs = Rhs.dot(fg)
lap_unknown = lap - lap_rhs
poisson_sol = Lap_Solver_Array[0](lap_unknown+Rhs.dot(bg))
result[is_unknown, :] = poisson_sol
result = result.reshape(background.shape)
result[result < 0] = 0.0
result[result > 1] = 1.0
return (result*255).astype(np.uint8)
if __name__ == '__main__':
folder = './data/pie/'
mask = imageio.imread(folder+'mask.png')[:, :, 0].astype(np.float32)
background = imageio.imread(folder+'mona.png')[:, :, :3]/255
foreground = imageio.imread(folder+'gine.png')[:, :, :3]/255
mask[mask > 0] = np.nan
ind2sub_fn = folder+'ind2sub.npy'
sub2ind_fn = folder+'sub2ind.npy'
ind_sub_conversion(mask, ind2sub_fn, sub2ind_fn)
FDMC = FiniteDifferenceMatrixConstruction(ind2sub_fn, sub2ind_fn)
result = pie(FDMC, background, foreground)
imageio.imwrite(folder+'result.png', result)
| 2.21875 | 2 |
mindsdb/api/http/initialize.py | mindsdb/main | 261 | 17263 | from distutils.version import LooseVersion
import requests
import os
import shutil
import threading
import webbrowser
from zipfile import ZipFile
from pathlib import Path
import traceback
import tempfile
# import concurrent.futures
from flask import Flask, url_for, make_response
from flask.json import dumps
from flask_restx import Api
from mindsdb.__about__ import __version__ as mindsdb_version
from mindsdb.interfaces.datastore.datastore import DataStore
from mindsdb.interfaces.model.model_interface import ModelInterface
from mindsdb.interfaces.database.integrations import IntegrationController
from mindsdb.utilities.ps import is_pid_listen_port, wait_func_is_true
from mindsdb.utilities.telemetry import inject_telemetry_to_static
from mindsdb.utilities.config import Config
from mindsdb.utilities.log import get_log
from mindsdb.interfaces.storage.db import session
from mindsdb.utilities.json_encoder import CustomJSONEncoder
class Swagger_Api(Api):
"""
This is a modification of the base Flask Restplus Api class due to the issue described here
https://github.com/noirbizarre/flask-restplus/issues/223
"""
@property
def specs_url(self):
return url_for(self.endpoint("specs"), _external=False)
def custom_output_json(data, code, headers=None):
resp = make_response(dumps(data), code)
resp.headers.extend(headers or {})
return resp
def get_last_compatible_gui_version() -> LooseVersion:
log = get_log('http')
try:
res = requests.get('https://mindsdb-web-builds.s3.amazonaws.com/compatible-config.json', timeout=5)
except (ConnectionError, requests.exceptions.ConnectionError) as e:
print(f'Is no connection. {e}')
return False
except Exception as e:
print(f'Is something wrong with getting compatible-config.json: {e}')
return False
if res.status_code != 200:
print(f'Cant get compatible-config.json: returned status code = {res.status_code}')
return False
try:
versions = res.json()
except Exception as e:
print(f'Cant decode compatible-config.json: {e}')
return False
current_mindsdb_lv = LooseVersion(mindsdb_version)
try:
gui_versions = {}
max_mindsdb_lv = None
max_gui_lv = None
for el in versions['mindsdb']:
if el['mindsdb_version'] is None:
gui_lv = LooseVersion(el['gui_version'])
else:
mindsdb_lv = LooseVersion(el['mindsdb_version'])
gui_lv = LooseVersion(el['gui_version'])
if mindsdb_lv.vstring not in gui_versions or gui_lv > gui_versions[mindsdb_lv.vstring]:
gui_versions[mindsdb_lv.vstring] = gui_lv
if max_mindsdb_lv is None or max_mindsdb_lv < mindsdb_lv:
max_mindsdb_lv = mindsdb_lv
if max_gui_lv is None or max_gui_lv < gui_lv:
max_gui_lv = gui_lv
all_mindsdb_lv = [LooseVersion(x) for x in gui_versions.keys()]
all_mindsdb_lv.sort()
if current_mindsdb_lv.vstring in gui_versions:
gui_version_lv = gui_versions[current_mindsdb_lv.vstring]
elif current_mindsdb_lv > all_mindsdb_lv[-1]:
gui_version_lv = max_gui_lv
else:
lower_versions = {key: value for key, value in gui_versions.items() if LooseVersion(key) < current_mindsdb_lv}
if len(lower_versions) == 0:
gui_version_lv = gui_versions[all_mindsdb_lv[0].vstring]
else:
all_lower_versions = [LooseVersion(x) for x in lower_versions.keys()]
gui_version_lv = gui_versions[all_lower_versions[-1].vstring]
except Exception as e:
log.error(f'Error in compatible-config.json structure: {e}')
return False
return gui_version_lv
def get_current_gui_version() -> LooseVersion:
config = Config()
static_path = Path(config['paths']['static'])
version_txt_path = static_path.joinpath('version.txt')
current_gui_version = None
if version_txt_path.is_file():
with open(version_txt_path, 'rt') as f:
current_gui_version = f.readline()
current_gui_lv = None if current_gui_version is None else LooseVersion(current_gui_version)
return current_gui_lv
def download_gui(destignation, version):
if isinstance(destignation, str):
destignation = Path(destignation)
log = get_log('http')
dist_zip_path = str(destignation.joinpath('dist.zip'))
bucket = "https://mindsdb-web-builds.s3.amazonaws.com/"
resources = [{
'url': bucket + 'dist-V' + version + '.zip',
'path': dist_zip_path
}]
def get_resources(resource):
response = requests.get(resource['url'])
if response.status_code != requests.status_codes.codes.ok:
raise Exception(f"Error {response.status_code} GET {resource['url']}")
open(resource['path'], 'wb').write(response.content)
try:
for r in resources:
get_resources(r)
except Exception as e:
log.error(f'Error during downloading files from s3: {e}')
return False
static_folder = destignation
static_folder.mkdir(mode=0o777, exist_ok=True, parents=True)
ZipFile(dist_zip_path).extractall(static_folder)
if static_folder.joinpath('dist').is_dir():
shutil.move(str(destignation.joinpath('dist').joinpath('index.html')), static_folder)
shutil.move(str(destignation.joinpath('dist').joinpath('assets')), static_folder)
shutil.rmtree(destignation.joinpath('dist'))
os.remove(dist_zip_path)
version_txt_path = destignation.joinpath('version.txt') # os.path.join(destignation, 'version.txt')
with open(version_txt_path, 'wt') as f:
f.write(version)
return True
'''
# to make downloading faster download each resource in a separate thread
with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
future_to_url = {executor.submit(get_resources, r): r for r in resources}
for future in concurrent.futures.as_completed(future_to_url):
res = future.result()
if res is not None:
raise res
'''
def initialize_static():
success = update_static()
session.close()
return success
def update_static():
''' Update Scout files basing on compatible-config.json content.
Files will be downloaded and updated if new version of GUI > current.
Current GUI version stored in static/version.txt.
'''
config = Config()
log = get_log('http')
static_path = Path(config['paths']['static'])
last_gui_version_lv = get_last_compatible_gui_version()
current_gui_version_lv = get_current_gui_version()
if last_gui_version_lv is False:
return False
if current_gui_version_lv is not None:
if current_gui_version_lv >= last_gui_version_lv:
return True
log.info(f'New version of GUI available ({last_gui_version_lv.vstring}). Downloading...')
temp_dir = tempfile.mkdtemp(prefix='mindsdb_gui_files_')
success = download_gui(temp_dir, last_gui_version_lv.vstring)
if success is False:
shutil.rmtree(temp_dir)
return False
temp_dir_for_rm = tempfile.mkdtemp(prefix='mindsdb_gui_files_')
shutil.rmtree(temp_dir_for_rm)
shutil.copytree(str(static_path), temp_dir_for_rm)
shutil.rmtree(str(static_path))
shutil.copytree(temp_dir, str(static_path))
shutil.rmtree(temp_dir_for_rm)
log.info(f'GUI version updated to {last_gui_version_lv.vstring}')
return True
def initialize_flask(config, init_static_thread, no_studio):
# Apparently there's a bug that causes the static path not to work if it's '/' -- https://github.com/pallets/flask/issues/3134, I think '' should achieve the same thing (???)
if no_studio:
app = Flask(
__name__
)
else:
static_path = os.path.join(config['paths']['static'], 'static/')
if os.path.isabs(static_path) is False:
static_path = os.path.join(os.getcwd(), static_path)
app = Flask(
__name__,
static_url_path='/static',
static_folder=static_path
)
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 60
app.config['SWAGGER_HOST'] = 'http://localhost:8000/mindsdb'
app.json_encoder = CustomJSONEncoder
authorizations = {
'apikey': {
'type': 'session',
'in': 'query',
'name': 'session'
}
}
api = Swagger_Api(
app,
authorizations=authorizations,
security=['apikey'],
url_prefix=':8000',
prefix='/api',
doc='/doc/'
)
api.representations['application/json'] = custom_output_json
port = config['api']['http']['port']
host = config['api']['http']['host']
# NOTE rewrite it, that hotfix to see GUI link
if not no_studio:
log = get_log('http')
if host in ('', '0.0.0.0'):
url = f'http://127.0.0.1:{port}/'
else:
url = f'http://{host}:{port}/'
log.info(f' - GUI available at {url}')
pid = os.getpid()
x = threading.Thread(target=_open_webbrowser, args=(url, pid, port, init_static_thread, config['paths']['static']), daemon=True)
x.start()
return app, api
def initialize_interfaces(app):
app.original_data_store = DataStore()
app.original_model_interface = ModelInterface()
app.original_integration_controller = IntegrationController()
config = Config()
app.config_obj = config
def _open_webbrowser(url: str, pid: int, port: int, init_static_thread, static_folder):
"""Open webbrowser with url when http service is started.
If some error then do nothing.
"""
init_static_thread.join()
inject_telemetry_to_static(static_folder)
logger = get_log('http')
try:
is_http_active = wait_func_is_true(func=is_pid_listen_port, timeout=10,
pid=pid, port=port)
if is_http_active:
webbrowser.open(url)
except Exception as e:
logger.error(f'Failed to open {url} in webbrowser with exception {e}')
logger.error(traceback.format_exc())
session.close()
| 1.820313 | 2 |
pyrocov/io.py | corneliusroemer/pyro-cov | 22 | 17264 | # Copyright Contributors to the Pyro-Cov project.
# SPDX-License-Identifier: Apache-2.0
import functools
import io
import logging
import math
import re
import sys
import torch
import torch.multiprocessing as mp
from Bio import AlignIO
from Bio.Phylo.NewickIO import Parser
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from .phylo import Phylogeny
logger = logging.getLogger(__name__)
FILE_FORMATS = {
"nex": "nexus",
"nexus": "nexus",
"fasta": "fasta",
"xml": "beast",
}
def _print_dot():
sys.stdout.write(".")
sys.stdout.flush()
def _handle_translate(lines, context):
map_lines = [line.rstrip(",").split() for line in lines[1:-1]]
context["translate"] = {key: value for key, value in map_lines}
def _handle_tree_count(lines, context):
return 1
def _handle_tree_newick(lines, context):
assert len(lines) == 1
tree, name, equal, newick = lines[0].split()
assert tree == "tree"
assert equal == "="
tree = next(Parser.from_string(newick).parse())
tree.name = name
# Add translations as .comment attributes
if "translate" in context:
translate = context["translate"]
for leaf in tree.get_terminals():
leaf.comment = translate[leaf.name]
return tree
def _handle_tree_torch(lines, context):
assert len(lines) == 1
tree, name, equal, newick = lines[0].split()
assert tree == "tree"
assert equal == "="
tree = next(Parser.from_string(newick).parse())
tree = Phylogeny.from_bio_phylo(tree)
_print_dot()
return tree
def _handle_raw(lines, context):
return lines, context
def _apply(fn, args):
return fn(*args)
def read_nexus_trees(filename, *, format="newick", max_num_trees=math.inf, processes=0):
"""
Parse and iterate over newick trees stored in a nexus file.
This streams the file and thus can handle larger files than
``Bio.Phylo.read(..., format="nexus")``.
Returns an iterator of ``Bio.Phylo`` tree objects.
"""
if format == "count":
context = {}
handlers = {"tree": _handle_tree_count}
elif format == "newick":
context = {"translate": {}}
handlers = {"translate": _handle_translate, "tree": _handle_tree_newick}
elif format == "_raw_newick":
context = {"translate": {}}
handlers = {"translate": _handle_translate, "tree": _handle_raw}
elif format == "torch":
context = None
handlers = {"tree": _handle_tree_torch}
elif format == "_raw_torch":
context = None
handlers = {"tree": _handle_raw}
else:
raise ValueError(f"unknown format: {format}")
if processes != 0:
trees = read_nexus_trees(
filename, format="_raw_" + format, max_num_trees=max_num_trees
)
with mp.Pool(processes) as pool:
handler = functools.partial(_apply, handlers["tree"])
yield from pool.imap(handler, trees)
return
with open(filename) as f:
lines = iter(f)
for line in lines:
if line.startswith("Begin trees;"):
break
part = []
for line in lines:
line = line.strip()
part.append(line)
if not line.endswith(";"):
continue
type_ = part[0].split()[0].lower()
handle = handlers.get(type_)
if handle is not None:
tree = handle(part, context)
if tree is not None:
yield tree
max_num_trees -= 1
if max_num_trees <= 0:
break
part = []
def count_nexus_trees(filename):
"""
Counts the number of trees in a nexus file.
"""
return sum(read_nexus_trees(filename, format="count"))
def stack_nexus_trees(filename, *, max_num_trees=math.inf, processes=0):
"""
Loads a batch of trees from a nexus file.
"""
trees = read_nexus_trees(
filename, format="torch", max_num_trees=max_num_trees, processes=processes
)
return Phylogeny.stack(trees)
def read_newick_tree(filename):
"""
Parse a single newick tree and convert to a ``Phylogeny``.
"""
with open(filename) as f:
line = f.read().strip()
tree = next(Parser.from_string(line).parse())
return Phylogeny.from_bio_phylo(tree)
def read_alignment(
filename, format=None, *, max_taxa=math.inf, max_characters=math.inf
):
"""
Reads a single alignment file to a torch tensor of probabilites.
:param str filename: Name of input file.
:param str format: Optional input format, e.g. "nexus" or "fasta".
:param int max_taxa: Optional number of taxa for truncation.
:param int max_characters: Optional number of characters for truncation.
:rtype: torch.Tensor
:returns: A float tensor of shape ``(num_sequences, num_characters,
num_bases)`` that is normalized along its rightmost dimension. Note
that ``num_bases`` is 5 = 4 + 1, where the final base denots a gap or
indel.
"""
# Load a Bio.Align.MultipleSeqAlignment object.
logger.info(f"Loading data from {filename}")
if format is None:
suffix = filename.split(".")[-1].lower()
format = FILE_FORMATS.get(suffix)
if format is None:
raise ValueError("Please specify a file format, e.g. 'nexus' or 'fasta'")
elif format == "nexus":
alignment = _read_alignment_nexus(filename)
elif format == "beast":
alignment = _read_alignment_beast(filename)
else:
alignment = AlignIO.read(filename, format)
# Convert to a single torch.Tensor.
num_taxa = min(len(alignment), max_taxa)
if num_taxa < len(alignment):
alignment = alignment[:num_taxa]
num_characters = min(len(alignment[0]), max_characters)
if num_characters < len(alignment[0]):
alignment = alignment[:, :num_characters]
logger.info(f"parsing {num_taxa} taxa x {num_characters} characters")
codebook = _get_codebook()
probs = torch.full((num_taxa, num_characters, 5), 1 / 5)
for i in range(num_taxa):
seq = alignment[i].seq
if not VALID_CODES.issuperset(seq):
raise ValueError(f"Invalid characters: {set(seq) - VALID_CODES}")
# Replace gaps at ends with missing.
beg, end = 0, probs.size(1)
if seq[0] in "-.N":
seq, old = seq.lstrip(seq[0]), seq
beg += len(old) - len(seq)
if seq[-1] in "-.N":
seq, old = seq.rstrip(seq[-1]), seq
end -= len(old) - len(seq)
probs[i, beg:end] = codebook[list(map(ord, seq))]
assert torch.isfinite(probs).all()
return probs
def _read_alignment_nexus(filename):
# Work around bugs in Bio.Nexus reader.
lines = []
section = None
done = set()
with open(filename) as f:
for line in f:
if line.startswith("BEGIN"):
section = line.split()[-1].strip()[:-1]
elif line.startswith("END;"):
done.add(section)
section = None
if "TAXA" in done and "CHARACTERS" in done:
lines.append(line)
break
elif section == "CHARACTERS":
if "{" in line:
line = re.sub("{([ATCG]+)}", _encode_ambiguity, line)
lines.append(line)
f = io.StringIO("".join(lines))
alignment = AlignIO.read(f, "nexus")
return alignment
def _read_alignment_beast(filename):
result = []
with open(filename) as f:
for line in f:
line = line.strip()
if not line.startswith("<sequence "):
continue
id_ = re.search(r'\bid="([^"]*)"', line).group(1)
seq = re.search(r'\bvalue="([^"]*)"', line).group(1)
result.append(SeqRecord(Seq(seq), id=id_))
return result
# See https://www.bioinformatics.org/sms/iupac.html
NUCLEOTIDE_CODES = {
# [ A, C, G, T, gap]
"?": [1 / 5, 1 / 5, 1 / 5, 1 / 5, 1 / 5], # missing
"n": [1 / 5, 1 / 5, 1 / 5, 1 / 5, 1 / 5], # missing
"A": [1 / 1, 0.0, 0.0, 0.0, 0.0], # adenine
"C": [0.0, 1 / 1, 0.0, 0.0, 0.0], # cytosine
"G": [0.0, 0.0, 1 / 1, 0.0, 0.0], # guanine
"T": [0.0, 0.0, 0.0, 1 / 1, 0.0], # thymine
"U": [0.0, 0.0, 0.0, 1 / 1, 0.0], # uracil
"R": [1 / 2, 0.0, 1 / 2, 0.0, 0.0],
"Y": [0.0, 1 / 2, 0.0, 1 / 2, 0.0],
"S": [0.0, 1 / 2, 1 / 2, 0.0, 0.0],
"W": [1 / 2, 0.0, 0.0, 1 / 2, 0.0],
"K": [0.0, 0.0, 1 / 2, 1 / 2, 0.0],
"M": [1 / 2, 1 / 2, 0.0, 0.0, 0.0],
"B": [0.0, 1 / 3, 1 / 3, 1 / 3, 0.0],
"D": [1 / 3, 0.0, 1 / 3, 1 / 3, 0.0],
"H": [1 / 3, 1 / 3, 0.0, 1 / 3, 0.0],
"V": [1 / 3, 1 / 3, 1 / 3, 0.0, 0.0],
"N": [1 / 4, 1 / 4, 1 / 4, 1 / 4, 0.0],
"-": [0.0, 0.0, 0.0, 0.0, 1 / 1], # gap
".": [0.0, 0.0, 0.0, 0.0, 1 / 1], # gap
}
VALID_CODES = set(NUCLEOTIDE_CODES)
AMBIGUOUS_CODES = {
frozenset("AG"): "R",
frozenset("CT"): "Y",
frozenset("CG"): "S",
frozenset("AT"): "W",
frozenset("GT"): "K",
frozenset("AC"): "M",
frozenset("CGT"): "B",
frozenset("AGT"): "D",
frozenset("ACT"): "H",
frozenset("ACG"): "V",
frozenset("ACGT"): "N",
}
assert len(AMBIGUOUS_CODES) == 6 + 4 + 1
def _encode_ambiguity(chars):
return AMBIGUOUS_CODES[frozenset(chars.group(1))]
def _get_codebook():
codes = torch.full((256, 5), math.nan)
keys = list(map(ord, NUCLEOTIDE_CODES.keys()))
values = torch.tensor(list(NUCLEOTIDE_CODES.values()))
assert values.sum(-1).sub(1).abs().le(1e-6).all()
codes[keys] = values
return codes
| 2.15625 | 2 |
core.py | mistifiedwarrior/house_price_prediction | 0 | 17265 | import numpy as np
import pandas as pd
from sklearn.linear_model import LinearRegression
def convert_to_sqft(str):
tokens = str.split(' - ')
if len(tokens) == 2:
return (float(tokens[0]) + float(tokens[1])) / 2
try:
return float(tokens[0])
except Exception:
return np.NAN
def convert_to_num(num):
tokens = str(num).split(' ')
return float(tokens[0])
def train_model(X, Y):
regression = LinearRegression()
regression.fit(X, Y)
return regression
def get_training_data():
dataframe = pd.read_csv("./Bengaluru_House_Data.csv")
df = dataframe.drop(columns=["area_type", "balcony", "society", "availability"], axis='columns')
df['total_sqft'] = df['total_sqft'].apply(convert_to_sqft)
df['size'] = df['size'].apply(convert_to_num)
locations = pd.get_dummies(df["location"])
df_merge = pd.concat([df.drop(columns=["location"]), locations], axis='columns')
df_merge = df_merge.drop(columns=["Unnamed: 9"], axis='columns')
df_merge = df_merge.dropna()
X = df_merge.drop(['price'], axis='columns')
Y = df_merge['price']
return X, Y
def predict_price(regression, X, location, bhk, total_sqft, bath):
location_index = np.where(X.columns == location)[0][0]
x = np.zeros(len(X.columns))
x[0] = bhk
x[1] = total_sqft
x[2] = bath
if location_index >= 0:
x[location_index] = 1
return regression.predict([x])[0]
| 3.078125 | 3 |
Python/bank-robbers.py | JaredLGillespie/CodinGame | 1 | 17266 | # https://www.codingame.com/training/easy/bank-robbers
from heapq import *
def calc_vault_time(c, n):
return 10**n * 5**(c - n)
def solution():
robbers = int(input())
vault = int(input())
vault_times = []
for i in range(vault):
c, n = map(int, input().split())
vault_times.append(calc_vault_time(c, n))
active_robbers = []
for vt in vault_times:
if len(active_robbers) < robbers:
heappush(active_robbers, vt)
else:
heappush(active_robbers, vt + heappop(active_robbers))
print(max(active_robbers))
solution()
| 3.453125 | 3 |
14Django/day04/BookManager/introduction1.py | HaoZhang95/PythonAndMachineLearning | 937 | 17267 | <gh_stars>100-1000
"""
模板语言:
{{ 变量 }}
{% 代码段 %}
{% 一个参数时:变量|过滤器, Book.id | add: 1 <= 2 当前id+1来和2比较
两个参数时:变量|过滤器:参数 %}, 过滤器最多只能传2个参数,过滤器用来对传入的变量进行修改
{% if book.name|length > 4 %} 管道|符号的左右不能有多余的空格,否则报错,其次并不是name.length而是通过管道来过滤
{{ book.pub_date|date:'Y年m月j日' }} 日期的转换管道
"""
"""
CSRF 跨站请求伪造, 盗用别人的信息,以你的名义进行恶意请求
比如:服务器返回一个表单进行转账操作,再把转账信息返回给服务器。
需要判断发送转账信息请求的客户端是不是刚才获取表单界面的客户端,防止回送请求的修改,和返回页面的修改(表单地址被修改为黑客地址,信息丢失)
防止CSRF需要服务器做安全验证
"""
"""
验证码主要用来防止暴力请求,原理就是请求页面之前生成一个动态不同的验证码写入到session中
用户登录的时候,会拿着填写的验证码和session中的验证码比较进行验证
""" | 2.546875 | 3 |
gammapy/maps/__init__.py | watsonjj/gammapy | 0 | 17268 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Sky maps."""
from .base import *
from .geom import *
from .hpx import *
from .hpxnd import *
from .hpxsparse import *
from .hpxmap import *
from .wcs import *
from .wcsnd import *
from .wcsmap import *
from .sparse import *
| 0.867188 | 1 |
lhotse/dataset/sampling/utils.py | stachu86/lhotse | 353 | 17269 | import warnings
from typing import Dict, Tuple
from lhotse import CutSet
from lhotse.dataset.sampling.base import CutSampler
def find_pessimistic_batches(
sampler: CutSampler, batch_tuple_index: int = 0
) -> Tuple[Dict[str, CutSet], Dict[str, float]]:
"""
Function for finding 'pessimistic' batches, i.e. batches that have the highest potential
to blow up the GPU memory during training. We will fully iterate the sampler and record
the most risky batches under several criteria:
- single longest cut
- single longest supervision
- largest batch cuts duration
- largest batch supervisions duration
- max num cuts
- max num supervisions
.. note: It is up to the users to convert the sampled CutSets into actual batches and test them
by running forward and backward passes with their model.
Example of how this function can be used with a PyTorch model
and a :class:`~lhotse.dataset.K2SpeechRecognitionDataset`::
sampler = SingleCutSampler(cuts, max_duration=300)
dataset = K2SpeechRecognitionDataset()
batches, scores = find_pessimistic_batches(sampler)
for reason, cuts in batches.items():
try:
batch = dset[cuts]
outputs = model(batch)
loss = loss_fn(outputs)
loss.backward()
except:
print(f"Exception caught when evaluating pessimistic batch for: {reason}={scores[reason]}")
raise
:param sampler: An instance of a Lhotse :class:`.CutSampler`.
:param batch_tuple_index: Applicable to samplers that return tuples of :class:`~lhotse.cut.CutSet`.
Indicates which position in the tuple we should look up for the CutSet.
:return: A tuple of dicts: the first with batches (as CutSets) and the other with criteria values, i.e.:
``({"<criterion>": <CutSet>, ...}, {"<criterion>": <value>, ...})``
"""
criteria = {
"single_longest_cut": lambda cuts: max(c.duration for c in cuts),
"single_longest_supervision": lambda cuts: max(
sum(s.duration for s in c.supervisions) for c in cuts
),
"largest_batch_cuts_duration": lambda cuts: sum(c.duration for c in cuts),
"largest_batch_supervisions_duration": lambda cuts: sum(
s.duration for c in cuts for s in c.supervisions
),
"max_num_cuts": len,
"max_num_supervisions": lambda cuts: sum(
1 for c in cuts for _ in c.supervisions
),
}
try:
sampler = iter(sampler)
first_batch = next(sampler)
if isinstance(first_batch, tuple):
first_batch = first_batch[batch_tuple_index]
except StopIteration:
warnings.warn("Empty sampler encountered in find_pessimistic_batches()")
return {}, {}
top_batches = {k: first_batch for k in criteria}
top_values = {k: fn(first_batch) for k, fn in criteria.items()}
for batch in sampler:
if isinstance(batch, tuple):
batch = batch[batch_tuple_index]
for crit, fn in criteria.items():
val = fn(batch)
if val > top_values[crit]:
top_values[crit] = val
top_batches[crit] = batch
return top_batches, top_values
| 2.765625 | 3 |
book/migrations/0010_auto_20170603_1441.py | pyprism/Hiren-Mail-Notify | 0 | 17270 | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-06-03 08:41
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('book', '0009_book_folder'),
]
operations = [
migrations.AddField(
model_name='book',
name='updated_at',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='book',
name='name',
field=models.CharField(max_length=400, unique=True),
),
]
| 1.796875 | 2 |
contrib/ComparisonStatistics/Test/test_1.py | xylar/cdat | 62 | 17271 | <reponame>xylar/cdat
#!/usr/bin/env python
import ComparisonStatistics
import cdutil
import os,sys
# Reference
ref = os.path.join(cdutil.__path__[0],'..','..','..','..','sample_data','tas_dnm-95a.xml')
Ref=cdutil.VariableConditioner(ref)
Ref.var='tas'
Ref.id='reference'
# Test
tst = os.path.join(cdutil.__path__[0],'..','..','..','..','sample_data','tas_ccsr-95a.xml')
Tst=cdutil.VariableConditioner(tst)
Tst.var='tas'
Tst.id='test'
# Final Grid
FG=cdutil.WeightedGridMaker()
FG.longitude.n=36
FG.longitude.first=0.
FG.longitude.delta=10.
FG.latitude.n=18
FG.latitude.first=-85.
FG.latitude.delta=10.
# Now the compall thing
c=ComparisonStatistics.ComparisonStatistics(Tst,Ref,weightedGridMaker=FG)
c.fracmin=.5
c.minyr=3
icall=19
# Let's force the indices to be the same
c.variableConditioner1.cdmsKeywords['time']=('1979','1982','co')
c.variableConditioner2.cdmsKeywords['time']=slice(0,36)
print "Before computing:"
print c.variableConditioner1
#print 'C printing:\n',c
## (test,tfr),(ref,reffrc)=c()
(test,tfr),(ref,reffrc) = c.compute()
print "Test:",test
# Retrieve the rank for th etime_domain 19 (monthly space time)
rank=c.rank(time_domain=19)
print 'Result for Rank:',rank
c.write('tmp.nc',comments='A simple example')
| 1.90625 | 2 |
mathipy/functions/linearithmic.py | BatiDyDx/maths-tools-python | 1 | 17272 | <reponame>BatiDyDx/maths-tools-python<filename>mathipy/functions/linearithmic.py<gh_stars>1-10
import math
import numpy as np
from mathipy.math import calculus
class Linearithmic(calculus.Function):
"""
f(x) = (mx + h)log_b(kx + a)
"""
function_type = 'Linearithmic'
def __init__(self, m = 1, h = 0, b = 10, a = 0, k = 1):
self.m = m
self.h = h
self.b = b
self.a = a
self.k = k
def find_roots(self) -> tuple:
x1 = - self.h / self.m
x2 = (1 - self.a) / self.k
x1 = x1 if self(x1) == 0 else np.nan
x2 = x2 if self(x2) == 0 else np.nan
return (x1, x2)
def plot_func(self, ax):
ax.scatter(self.find_roots(), (0,0), color=calculus.Function.function_part['roots'])
ax.scatter(0, self.get_yint(), color=calculus.Function.function_part['y-intercept'])
def calculate_values(self, x):
return (self.m * x + self.h) * math.log(self.k * x + self.a, self.b)
def __str__(self):
representation = ''
representation += f'({self.m}x + {self.h})'
representation += f'log_{self.b}({self.k}x + {self.a})'
return representation | 3.046875 | 3 |
pivot_based_eccv2018/misc/expander/disambiguate.py | gujiuxiang/unpaired_im2text_iccv19 | 18 | 17273 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This module contains the necessary functions to load a text-corpus from
NLTK, contract all possible sentences, applying POS-tags to the
contracted sentences and compare that with the original text.
The information about which contraction+pos-tag pair gets expanded to
which full form will be saved in a dictionary for use in expander.py
"""
__author__ = "<NAME>"
# standard library imports
import pprint
import yaml
# third-party library imports
import nltk
# local library imports
import utils
# increase the allowed ram size that the models can use
# nltk.internals.config_java(options='-xmx2G')
def _find_sub_list(sublist, full_list):
"""
Args:
- sublist is a list of words that are supposed to be found in
the full list.
- full list is a list of words that is supposed to be searched
in.
Returns:
- List of tuples with the form
(first_index_of_occurence, last_index_of_occurence)
This function finds all occurences of sublist in the full_list.
"""
# this is the output list
results = []
sublist_len = len(sublist)
# loop over all ind if the word in full_list[ind] matches the first
# word of the sublist
for ind in (i for i, word in enumerate(full_list)
if word == sublist[0]):
# check that the complete sublist is matched
if full_list[ind:ind+sublist_len] == sublist:
# then append this to the results
results.append((ind, ind+sublist_len-1))
return results
def _contract_sentences(expansions,
sent_lst,
use_ner,
ner_args):
"""
Args:
- expansions is a dictionary containing the corresponding
contractions to the expanded words
- sent_lst is a list of sentences, which is itself a list of
words, i.e. [["I", "am", "blue"], [...]].
- use_ner is boolean to decide whether to use
named-entity-recognition for a potential increase in
accuracy but with the obvious costs of performance.
- ner_args is a list with an object of StanfordNERTagger and
the tag to be used. This only needs to be
supplied if use_ner is true.
Returns:
- yields tuples of the form
(index of first word that was replaced,
list of words that were replaced,
contracted sentence).
The above example would then give
(0, ["I", "am"], ["I", "'m", "blue"])
Note that uncontractible sentences are not added to the
output.
Since yield is used, iterate over the results. Otherwise it
takes too much time.
This function checks a list of sentences for whether they can be
contracted. It starts with the first two words, then the first three
and then goes on to the second+third, then the second+third+fourth
and so on.
"""
# first find the indices of the sentences that contain contractions
for sent in sent_lst:
if use_ner:
# replace all named entities with the tag in ner_args[1]
# throw away replacement info
sent = utils.sent_to_ner(ner_args[0], sent,
tag=ner_args[1])[0]
# check whether any expansion is present then add the index
# it has a True for every expansion that is present
expansion_bool = [expansion in ' '.join(sent) for expansion
in list(expansions.keys())]
if not any(expansion_bool):
# if no expansions present just continue
continue
# convert the boolean list to a list of indices
expansion_idx = [i for i, boolean in enumerate(expansion_bool)
if boolean]
# the list of relevant expansions for the sentence
relevant_exp = [list(expansions.keys())[i] for i in expansion_idx]
for expansion in relevant_exp:
# first split the contraction up into a list of the same
# length as the expanded string
if len(expansion.split()) in [2, 3, 4]:
# if you contract three or two words,
# just split at apostrophes
contraction = expansions[expansion].split("'")
assert len(contraction) == len(expansion.split())
# add the apostrophes again
contraction[1] = "'" + contraction[1]
if len(contraction) == 3:
contraction[2] = "'" + contraction[2]
if len(contraction) == 4:
contraction[3] = "'" + contraction[3]
else:
# this case is only entered when there is only one word
# input. So assert that this is the case.
assert len(expansion) == 1
# this is a completely pathological case, since
# ambiguous 1-word replacements are not in the common
# list of replacements from wikipedia. But since one can
# openly expand contractions.yaml it is checked.
contraction = expansions[expansion]
# find where the sublist occurs
occurences = _find_sub_list(expansion.split(), sent)
# loop over all first indices of occurences
# and insert the contracted part
for occurence in occurences:
contr_sent = sent[:occurence[0]] + contraction
contr_sent += sent[occurence[0]+len(contraction):]
yield (occurence[0],
sent[occurence[0]:occurence[0]+len(contraction)],
contr_sent)
def _invert_contractions_dict():
"""
This is just a short function to return the inverted dictionary
of the contraction dictionary.
"""
with open("contractions.yaml", "r") as stream:
# load the dictionary containing all the contractions
contractions = yaml.load(stream)
# invert the dictionary for quicker finding of contractions
expansions = dict()
for key, value in contractions.items():
if len(value) == 1:
continue
for expansion in value:
if expansion in expansions:
print("WARNING: As an contraction to {}, {} is replaced with"
" {}.".format(expansion,
expansions[expansion],
key))
expansions[expansion] = key
return expansions
def write_dictionary(pos_model,
sent_lst,
add_tags=0,
use_ner=False,
ner_args=None):
"""
Args:
- pos_model is an instance of StanfordPOSTagger
- sent-lst a list of sentences which themselves are lists of the
single words.
- add_tags is the amount of pos tags used after the
relevant contraction, this can be used to further
disambiguate but (of course) spreads out the data.
- use_ner is boolean to decide whether to use
named-entity-recognition for a potential increase in
accuracy but with the obvious costs of performance.
- ner_args is a list with an object of StanfordNERTagger and
the tag to be used. This only needs to be
supplied if use_ner is true.
Returns:
- None, but writes a disambiguations.yaml file with disambiguations
for the ambiguous contractions in contractions.yaml.
Raises:
ValueError if use_ner is True but no ner_model is supplied.
Using the provided list of sentences, contract them and pos-tag them.
Using the pos-tags it is then possible to classify which
(contraction, pos-tag) combinations get expanded to which ambiguous
long form.
"""
# pylint: disable=too-many-locals
if use_ner and (ner_args is None):
raise ValueError("The use_ner flag is True but no NER"
" model has been supplied!")
expansions = _invert_contractions_dict()
output_dict = dict()
ambiguity_counter = 0
for tuple_rslt in _contract_sentences(expansions,
sent_lst,
use_ner=use_ner,
ner_args=ner_args):
# pos tag the sentence
if use_ner:
# first replace the NER tag with "it"
pos_sent = [word.replace(ner_args[1], "it") for word
in tuple_rslt[2]]
# tag the sentence
pos_sent = pos_model.tag(pos_sent)
# and replace it with the tag again
pos_sent = [(tuple_rslt[2][i], word_pos[1]) for i, word_pos
in enumerate(pos_sent)]
else:
pos_sent = pos_model.tag(tuple_rslt[2])
# extract the pos tags on the contracted part
contr_word_pos = pos_sent[tuple_rslt[0]:(tuple_rslt[0] +
len(tuple_rslt[1]))]
if add_tags == 0:
contr_pos = tuple(contr_word_pos)
else:
add_pos_list = pos_sent[len(tuple_rslt[1]):(len(tuple_rslt[1]) +
add_tags)]
add_pos = [pos_word[1] for pos_word in add_pos_list]
contr_pos = tuple(contr_word_pos + add_pos)
# write a dictionary entry connecting the (words, pos) of the
# contraction to the expanded part
word = ' '.join(tuple_rslt[1])
if contr_pos not in output_dict:
output_dict[contr_pos] = dict()
output_dict[contr_pos][word] = 1
# keep track of the progress
print("\n\n ---- \n\n")
pprint.pprint(output_dict)
print("Ambiguity counter is {}.".format(ambiguity_counter))
print("\n\n ---- \n\n")
elif word in output_dict[contr_pos].keys():
# check whether the entry is already there
output_dict[contr_pos][word] += 1
continue
else:
# if the combination of pos tags with words already occured
# once then a list has to be made. Ideally this case doesn't
# occur
ambiguity_counter += 1
output_dict[contr_pos][word] = 1
print("\n\n ---- \n\n")
print("AMBIGUITY ADDED!")
pprint.pprint(output_dict)
print("Ambiguity counter is {}.".format(ambiguity_counter))
print("\n\n ---- \n\n")
with open("disambiguations.yaml", "w") as stream:
yaml.dump(output_dict, stream)
if __name__ == '__main__':
# if you call this function directly just build the disambiguation
# dictionary.
# load a corpus that has the form of list of sentences which is
# split up into a list of words
SENT_LST = nltk.corpus.brown.sents()
SENT_LST += nltk.corpus.gutenberg.sents()
SENT_LST += nltk.corpus.reuters.sents()
SENT_LST += nltk.corpus.inaugural.sents()
POS_MODEL = utils.load_stanford('pos')
NER_MODEL = utils.load_stanford('ner')
write_dictionary(POS_MODEL,
SENT_LST,
add_tags=1,
use_ner=False,
ner_args=[NER_MODEL, "<NE>"])
| 3.4375 | 3 |
setup.py | baye0630/paperai | 0 | 17274 | # pylint: disable = C0111
from setuptools import find_packages, setup
setup(name="paperai",
# version="1.5.0",
# author="NeuML",
# description="AI-powered literature discovery and review engine for medical/scientific papers",
# long_description=DESCRIPTION,
# long_description_content_type="text/markdown",
# url="https://github.com/neuml/paperai",
# project_urls={
# "Documentation": "https://github.com/neuml/paperai",
# "Issue Tracker": "https://github.com/neuml/paperai/issues",
# "Source Code": "https://github.com/neuml/paperai",
# },
# C:\Users\sxm\Desktop\paperai
# project_urls={
# "Documentation": "C:\\Users\\sxm\\Desktop\\paperai",
# "Source Code": "C:\\Users\\sxm\\Desktop\\paperai",
#},
license="Apache 2.0: C:\\Users\\sxm\\Desktop\\paperai\\LICENSE",
packages=find_packages(where="C:\\Users\\sxm\\Desktop\\paperai\\src\\python"),
package_dir={"": "src\\python"},
keywords="search embedding machine-learning nlp covid-19 medical scientific papers",
python_requires=">=3.6",
entry_points={
"console_scripts": [
"paperai = paperai.shell:main",
],
},
install_requires=[
"html2text>=2020.1.16",
# "mdv>=1.7.4",
"networkx>=2.4",
"PyYAML>=5.3",
"regex>=2020.5.14",
"txtai>=1.4.0",
"txtmarker>=1.0.0"
],
classifiers=[
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Software Development",
"Topic :: Text Processing :: Indexing",
"Topic :: Utilities"
]) | 1.445313 | 1 |
hit_analysis/image/cut_reconstruction.py | credo-science/credo-classify | 0 | 17275 | from io import BytesIO
from typing import List, Dict
from PIL import Image
from hit_analysis.commons.config import Config
from hit_analysis.commons.consts import IMAGE, CROP_X, CROP_Y, CROP_SIZE, FRAME_DECODED, CLASSIFIED, CLASS_ARTIFACT, ORIG_IMAGE
def append_to_frame(image: Image, detection: dict):
hit_img = detection.get(IMAGE)
cx = detection[CROP_X]
cy = detection[CROP_Y]
w, h = detection[CROP_SIZE]
image.paste(hit_img, (cx, cy, cx + w, cy + h))
# fix bug in early CREDO Detector App: black filled boundary 1px too large
image.paste(image.crop((cx + w - 1, cy, cx + w, cy + h)), (cx + w, cy, cx + w + 1, cy + h))
image.paste(image.crop((cx, cy + h - 1, cx + w, cy + h)), (cx, cy + h, cx + w, cy + h + 1))
image.paste(image.crop((cx + w - 1, cy + h - 1, cx + w, cy + h)), (cx + w, cy + h, cx + w + 1, cy + h + 1))
def replace_from_frame(image: Image, detection: dict):
cx = detection.get(CROP_X)
cy = detection.get(CROP_Y)
w, h = detection.get(CROP_SIZE)
hit_img = image.crop((cx, cy, cx + w, cy + h))
detection[ORIG_IMAGE] = detection[IMAGE]
detection[IMAGE] = hit_img
with BytesIO() as output:
hit_img.save(output, format="png")
# hit_img.save('/tmp/%d.png' % detection.get('id'))
detection[FRAME_DECODED] = output.getvalue()
def do_reconstruct(detections: List[dict], config: Config) -> None:
"""
Reconstruction the fill by black cropped frame in CREDO Detector app v2.
The detection[x]['frame_decoded'] will be replaced by new value, old value will be stored in detection[x]['frame_decoded_orig'].
No any changes when count of detections is less or equal 1
:param detections: should be sorted by detection_id
:param config: config object
"""
if len(detections) <= 1:
return
sp = [str(detections[0].get('device_id')), str(detections[0].get('timestamp'))]
image = Image.new('RGBA', (detections[0].get('width'), detections[0].get('height')), (0, 0, 0))
edge = 'no_edge'
for d in detections:
if d.get('edge'):
edge = 'edge'
for d in reversed(detections):
append_to_frame(image, d)
config.store_png(['recostruct', edge, *sp, 'orig'], d.get('id'), d.get(IMAGE))
for d in detections:
replace_from_frame(image, d)
config.store_png(['recostruct', edge, *sp], d.get('id'), d.get(IMAGE))
if config.out_dir:
image.save('%s/recostruct/%s/%s/frame.png' % (config.out_dir, edge, "/".join(sp)))
def check_all_artifacts(detections: List[dict]) -> bool:
"""
Check if all detections is just classified as artifacts
:param detections: list of detections to check
:return: True - all detections is artifacts
"""
for d in detections:
if d.get(CLASSIFIED) != CLASS_ARTIFACT:
return False
return True
def filter_unclassified(by_timestamp: Dict[int, List[dict]]) -> List[int]:
"""
Filter detections with one or more unclassified as artifact.
:param by_timestamp: detections grouped by timestamp
:return: list of filtered timestamp keys
"""
ret = []
for timestamp, detections in by_timestamp.items():
if not check_all_artifacts(detections):
ret.append(timestamp)
return ret
| 2.21875 | 2 |
tests/st/fallback/control_flow/test_fallback_010_if_in_if.py | httpsgithu/mindspore | 1 | 17276 | <filename>tests/st/fallback/control_flow/test_fallback_010_if_in_if.py
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
""" test graph fallback control flow if in if scenario"""
import pytest
import numpy as np
from mindspore import Tensor, ms_function, context
context.set_context(mode=context.GRAPH_MODE)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_if_in_if_1():
"""
Feature: JIT Fallback
Description: Test fallback with control flow.
Expectation: No exception.
"""
@ms_function
def control_flow_if_in_if():
x = Tensor(1)
y = Tensor(2)
if x > Tensor(0):
if y > Tensor(1):
return y + 1
return x + 1
return x + y
res = control_flow_if_in_if()
assert res == 3
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_if_in_if_2():
"""
Feature: JIT Fallback
Description: Test fallback with control flow.
Expectation: No exception.
"""
@ms_function
def control_flow_if_in_if():
x = Tensor(1)
y = Tensor(0)
if x > Tensor(0):
if y > Tensor(1):
return y + 1
return x + 1
return x + y
res = control_flow_if_in_if()
assert res == 2
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_if_in_if_3():
"""
Feature: JIT Fallback
Description: Test fallback with control flow.
Expectation: No exception.
"""
@ms_function
def control_flow_if_in_if():
x = Tensor(-2)
y = Tensor(-3)
if x > Tensor(0):
if y > Tensor(1):
return y + 1
return x + 1
return x + y
res = control_flow_if_in_if()
assert res == -5
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_if_in_if_4():
"""
Feature: JIT Fallback
Description: Test fallback with control flow.
Expectation: No exception.
"""
@ms_function
def control_flow_if_in_if():
x = np.array([1, 2, 3, 4, 5])
y = x % 2
z = Tensor(y)
if (x >= y).all():
if sum(z) > Tensor(2):
z = Tensor(x) + 1
return z
res = control_flow_if_in_if()
assert np.all(res.asnumpy() == np.array([2, 3, 4, 5, 6]))
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_if_in_if_5():
"""
Feature: JIT Fallback
Description: Test fallback with control flow.
Expectation: No exception.
"""
@ms_function
def control_flow_if_in_if():
x = list([1, 2, 3, 4])
if max(x) >= 4:
y = Tensor(sum(x) + max(x))
if y < Tensor(10):
return y
return y - 10
return x
res = control_flow_if_in_if()
assert res == 4
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_if_else_in_if_else_1():
"""
Feature: JIT Fallback
Description: Test fallback with control flow.
Expectation: No exception.
"""
@ms_function
def control_flow_if_in_if():
x = Tensor(10)
y = Tensor(7)
if x - y > Tensor(np.array([0])):
x = x - Tensor(3)
if x - y > Tensor(0):
x = x - Tensor(4)
else:
x = x + Tensor(4)
x = x * 2
return x - 1
res = control_flow_if_in_if()
assert res == 21
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_if_else_in_if_else_2():
"""
Feature: JIT Fallback
Description: Test fallback with control flow.
Expectation: No exception.
"""
@ms_function
def control_flow_if_in_if():
x = Tensor(10)
y = Tensor(7)
if x - y > Tensor(np.array([10])):
x = x - Tensor(3)
if x - y > Tensor(0):
x = x - Tensor(4)
else:
x = x + Tensor(4)
x = x * 2
else:
if x > Tensor(15):
m = np.array([1, 2, 3, 4, 5])
elif x < Tensor(-10):
return Tensor(sum(np.array([5, 4, 3, 2, 1])))
else:
m = np.array([-1, -2, -3, -4, -5])
x = Tensor(sum(m))
return x - 1
res = control_flow_if_in_if()
assert res == -16
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_if_in_if_multi_conds():
"""
Feature: JIT Fallback
Description: Test fallback with control flow.
Expectation: No exception.
"""
@ms_function
def control_flow_if_in_if():
x = np.array([1, 2, 3, 4])
y = np.array([4, 5, 6])
if max(x) <= min(y) and sum(x) == 10:
x += 3
if max(x) <= max(y):
m = Tensor(10)
elif min(x) != max(y) or x.size > y.size:
m = Tensor(20)
else:
m = Tensor(0)
else:
m = Tensor(1)
return m
res = control_flow_if_in_if()
assert res == 20
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_if_in_if_multi_conds_2():
"""
Feature: JIT Fallback
Description: Test fallback with control flow.
Expectation: No exception.
"""
@ms_function
def control_flow_if_in_if():
x = Tensor(10)
y = Tensor(2)
if x > y and x % y == Tensor(0):
x -= Tensor(3)
if x < y:
m = Tensor(10)
elif x > y or x % y == Tensor(0):
m = Tensor(20)
else:
m = x + y
else:
m = Tensor(0)
return m
res = control_flow_if_in_if()
assert res == 20
| 2.484375 | 2 |
EPOpt/SpectrumAnalysis.py | ruixueqingyang/GPOEO | 5 | 17277 | <filename>EPOpt/SpectrumAnalysis.py
# -*- coding: utf-8 -*-
from scipy.fftpack import fft, fftshift, ifft
from scipy.fftpack import fftfreq
from scipy.signal import find_peaks
from scipy import signal
import numpy as np
import matplotlib.pyplot as plt
import pickle
import warnings
import sys
from scipy.signal.filter_design import maxflat
warnings.filterwarnings("ignore")
MAPEMax = 1e4
MAPEStdMax = 1e4
FigCount = 0
TLowBoundBase = 0.4
TLowBound = TLowBoundBase
TRound = 6
SetTLowBound = False
GrpFigCount = 0
# 计算最小公倍数
def lcm(x, y):
m = max(x, y)
n = min(x, y)
while m%n:
m, n = n, m%n
return x*y//n
# wfr 20210107 求一组短周期的近似公倍数周期
def ApproximateLCM(arrayFactor, minALCM):
arrayALCM = np.array([])
if np.round(minALCM/np.max(arrayFactor)) - minALCM/np.max(arrayFactor) < 0:
ALCM = np.max(arrayFactor) * np.round(minALCM / np.max(arrayFactor))
else:
ALCM = minALCM
# tmpALCM = minALCM
# maxErr = 1.0 # 最大百分比误差
while True:
arrayTmp = ALCM / arrayFactor
arrayInteger = np.round(arrayTmp) # 最接近的周期整数倍
arrayDecimal = np.abs(arrayTmp - arrayInteger) # 与最接近的周期整数倍, 相差的周期百分比
arrayErr = arrayDecimal / arrayInteger
maxErr = np.max(arrayErr)
if maxErr < 0.1:
if len(arrayALCM) > 0:
# 给出几个备选, 但也不能太多, 因为1倍周期和2倍周期无法区分, 因此备选不能接近2倍周期
if (ALCM / arrayALCM[0] > 1.3) \
or (len(arrayALCM) >= len(arrayFactor)) \
or (arrayALCM[-1]-arrayALCM[0] > 0.6 * np.max(arrayFactor)):
break
arrayALCM = np.append(arrayALCM, ALCM)
ALCM += 0.2 * np.max(arrayFactor)
return arrayALCM
# wfr 20210130 先计算 N 位点, 在找出其中 附近采样点分布最密集的 N 位点
# PctRange 用来表征密集程度, 表示要求有 PctRange比例 的 采样点 在 N 位点附近
def FindClusterCenter(arrayP, N, PctRange):
arrayTmpIndex = np.argsort(arrayP) # 将 arrayP 升序排列得到的 索引
PointCount = N - 1 # wfr 20210127 计算 N 位点
arrayPoint = np.linspace(0, 1, PointCount + 2)[1:-1]
arrayPoint = np.floor(arrayPoint * len(arrayP))
arrayPoint = arrayP[arrayTmpIndex[arrayPoint.astype(int)]]
arrayHalfRange = np.ones(PointCount) * (arrayP[arrayTmpIndex[-1]] - arrayP[arrayTmpIndex[0]])
# wfr 20210130 定义 N 位点的 附近 的范围, 0.3 表示 附近点的数量是采样点总数的 30%
# 就是要找到满足该百分比的 对称区间 的范围
# PctRange = 0.3
SampleCount = len(arrayP)
for i in range(PointCount):
# 初始化区间半宽度上下限
HalfRangeMin = 0
HalfRangeMax = np.max([ arrayP[arrayTmpIndex[-1]] - arrayPoint[i], arrayPoint[i] - arrayP[arrayTmpIndex[0]] ])
# 半宽度范围小于一定阈值则不再循环
while HalfRangeMax - HalfRangeMin > 0.03 * (arrayP[arrayTmpIndex[-1]] - arrayP[arrayTmpIndex[0]]):
# 二分搜索, 尝试半宽度范围的中点
HalfRange = (HalfRangeMin + HalfRangeMax) / 2
# 计算范围内采样点数量
SampleCountIn = np.sum( ((arrayPoint[i]-HalfRange) < arrayP) & (arrayP < (arrayPoint[i]+HalfRange)) )
# 更新半宽度范围
if SampleCountIn / SampleCount < PctRange:
HalfRangeMin = HalfRange
elif SampleCountIn / SampleCount >= PctRange:
HalfRangeMax = HalfRange
arrayHalfRange[i] = HalfRange
arrayRangeIndex = np.argsort(arrayHalfRange)
# wfr 20210130 返回附近采样点最密集的 N 位点 及 达到 PctRange 的 最小区间半宽度
return arrayPoint[arrayRangeIndex[0]], arrayHalfRange[arrayRangeIndex[0]]
# wfr 20210126 聚类, 合并区间, 不至于有太多的区间
def GrpClustering(arrayP, GrpFactor):
Mean = np.mean(arrayP)
# wfr 20210130 从 N 位点中挑选聚类的初始中心点, 从其中挑选附近采样点最密集的 N 位点
Point, Diff = FindClusterCenter(arrayP, 5, 0.33)
tmpLow = Point - Diff
tmpUp = Point + Diff
arrayIndexIn = np.argwhere((tmpLow <= arrayP) & (arrayP <= tmpUp)) # 在中位数附近邻域的点的 index
arrayIndexIn = arrayIndexIn[:, 0]
if len(arrayIndexIn) < 1:
print("DistributionMAPE: ERROR: 在中位数邻域没找到采样点")
elif len(arrayIndexIn) > 1:
arrayTmp = (arrayIndexIn[1:] - arrayIndexIn[:(-1)]) > 1 # 后一个 indx 减 前一个 index 大于 1, 说明不连续, 是区间分界
arrayBeginFlag = np.insert(arrayTmp, 0, True) # 区间开始点的 flag 是 true
arrayEndFlag = np.append(arrayTmp, True) # 区间结束点(包含)的 flag 是 true
arrayIndexBegin = arrayIndexIn[arrayBeginFlag] # 区间开始点的 index
arrayIndexEnd = arrayIndexIn[arrayEndFlag] + 1 # 区间结束点(不包含)的 index
else:
arrayIndexBegin = arrayIndexIn
arrayIndexEnd = arrayIndexIn + 1
arrayIndexBeginOrigin = arrayIndexBegin
arrayIndexEndOrigin = arrayIndexEnd
# 一共有 len(arrayIndexBegin)+1 个空隙需要尝试合并(包括首尾两边的空隙)
arrayIsolateFlag = np.zeros(len(arrayIndexBegin)).astype(bool) # 用来表示对应区间是否尝试过进行合并
stdP = np.std(arrayP) * 0.999 # 防止相等时的不确定情况
# wfr 20210202 修改误差种数
# MeanErrorShort = 0.04 # 待被合并的 区间 的 平均值的误差
MeanErrorGap = 0.04 # 待被合并的 间隙 的 平均值的误差
MeanErrorGrp = 0.03 # 合并后区间 的 平均值的误差
ShortGrpPct = 0.04 # 极小区间阈值, 小于阈值的区间将使用更大的误差
ScaleFactor = 2 # 误差增大系数
arrayIsolateIndex = np.argsort(-1 * (arrayIndexEnd - arrayIndexBegin))
i = arrayIsolateIndex[0]
while 0 <= i and i <= len(arrayIndexBegin) - 1:
meanGrp = np.mean(arrayP[arrayIndexBegin[i]: arrayIndexEnd[i]])
isMerged = True
while isMerged == True:
isMerged = False
# wfr 20210127 尝试合并左侧
if i == 0:
if arrayIndexBegin[0] != 0: # 尝试合并最左侧空隙
tmpStd = np.std(arrayP[0: arrayIndexEnd[0]])
tmpMean = np.mean(arrayP[0: arrayIndexEnd[0]])
tmpMeanGap = np.mean(arrayP[0: arrayIndexBegin[0]])
# wfr 20210202 判断 gap 是否极短, 若是则增大误差
if arrayIndexBegin[0] <= ShortGrpPct * len(arrayP):
tmpMeanErrorGap = ScaleFactor * MeanErrorGap
else:
tmpMeanErrorGap = MeanErrorGap
# wfr 20210202 判断 区间 是否极短, 若是则增大误差
if arrayIndexEnd[0] <= ShortGrpPct * len(arrayP):
tmpMeanErrorGrp = ScaleFactor * MeanErrorGrp
tmpStdP = ScaleFactor * stdP
else:
tmpMeanErrorGrp = MeanErrorGrp
tmpStdP = stdP
if tmpStd < tmpStdP and np.abs((meanGrp - tmpMean) / np.mean([meanGrp, tmpMean])) < tmpMeanErrorGrp \
and np.abs((meanGrp - tmpMeanGap) / np.mean([meanGrp, tmpMeanGap])) < tmpMeanErrorGap:
# 合并成功
arrayIndexBegin[0] = 0
meanGrp = tmpMean
isMerged = True
elif True or arrayIsolateFlag[i-1] == False: # 尝试合并最左侧的 区间 和 空隙
tmpStd = np.std(arrayP[arrayIndexBegin[i - 1]: arrayIndexEnd[i]])
tmpMean = np.mean(arrayP[arrayIndexBegin[i - 1]: arrayIndexEnd[i]])
# tmpMeanPrev = np.mean(arrayP[arrayIndexBegin[i - 1]: arrayIndexBegin[i]])
tmpMeanGap = np.mean(arrayP[arrayIndexEnd[i - 1]: arrayIndexBegin[i]])
# wfr 20210202 判断 gap 是否极短, 若是则增大误差
if arrayIndexBegin[i] - arrayIndexEnd[i-1] <= ShortGrpPct * len(arrayP):
tmpMeanErrorGap = ScaleFactor * MeanErrorGap
else:
tmpMeanErrorGap = MeanErrorGap
# wfr 20210202 判断 区间 是否极短, 若是则增大误差
if arrayIndexEnd[i] - arrayIndexBegin[i-1] <= ShortGrpPct * len(arrayP):
tmpMeanErrorGrp = ScaleFactor * MeanErrorGrp
tmpStdP = ScaleFactor * stdP
else:
tmpMeanErrorGrp = MeanErrorGrp
tmpStdP = stdP
if tmpStd < tmpStdP and np.abs((meanGrp - tmpMean) / np.mean([meanGrp, tmpMean])) < tmpMeanErrorGrp \
and np.abs((meanGrp - tmpMeanGap) / np.mean([meanGrp, tmpMeanGap])) < tmpMeanErrorGap:
# and np.abs((meanGrp - tmpMeanPrev) / np.mean([meanGrp, tmpMeanPrev])) < tmpMeanError1:
# 合并成功
arrayIndexBegin = np.delete(arrayIndexBegin, i)
arrayIndexEnd = np.delete(arrayIndexEnd, i - 1)
arrayIsolateFlag = np.delete(arrayIsolateFlag, i - 1)
meanGrp = tmpMean
isMerged = True
i -= 1
# wfr 20210127 尝试合并右侧
if i == len(arrayIndexBegin) - 1:
if arrayIndexEnd[-1] != len(arrayP): # 尝试合并最右侧空隙
tmpStd = np.std(arrayP[arrayIndexBegin[i]:])
tmpMean = np.mean(arrayP[arrayIndexBegin[i]:])
tmpMeanGap = np.mean(arrayP[arrayIndexEnd[i]:])
# wfr 20210202 判断 gap 是否极短, 若是则增大误差
if len(arrayP) - arrayIndexEnd[i] <= ShortGrpPct * len(arrayP):
tmpMeanErrorGap = ScaleFactor * MeanErrorGap
else:
tmpMeanErrorGap = MeanErrorGap
# wfr 20210202 判断 区间 是否极短, 若是则增大误差
if len(arrayP) - arrayIndexBegin[i] <= ShortGrpPct * len(arrayP):
tmpMeanErrorGrp = ScaleFactor * MeanErrorGrp
tmpStdP = ScaleFactor * stdP
else:
tmpMeanErrorGrp = MeanErrorGrp
tmpStdP = stdP
if tmpStd < tmpStdP and np.abs((meanGrp - tmpMean) / np.mean([meanGrp, tmpMean])) < tmpMeanErrorGrp \
and np.abs((meanGrp - tmpMeanGap) / np.mean([meanGrp, tmpMeanGap])) < tmpMeanErrorGap:
# 合并成功
arrayIndexEnd[-1] = len(arrayP)
meanGrp = tmpMean
isMerged = True
elif True or arrayIsolateFlag[i+1] == False: # 尝试合并最右侧的 区间 和 空隙
tmpStd = np.std(arrayP[arrayIndexBegin[i]: arrayIndexEnd[i + 1]])
tmpMean = np.mean(arrayP[arrayIndexBegin[i]: arrayIndexEnd[i + 1]])
tmpMeanGap = np.mean(arrayP[arrayIndexEnd[i]: arrayIndexBegin[i + 1]])
# tmpMeanBack = np.mean(arrayP[arrayIndexEnd[i]: arrayIndexEnd[i + 1]])
# wfr 20210202 判断 gap 是否极短, 若是则增大误差
if arrayIndexBegin[i+1] - arrayIndexEnd[i] <= ShortGrpPct * len(arrayP):
tmpMeanErrorGap = ScaleFactor * MeanErrorGap
else:
tmpMeanErrorGap = MeanErrorGap
# wfr 20210202 判断 区间 是否极短, 若是则增大误差
if arrayIndexEnd[i+1] - arrayIndexBegin[i] <= ShortGrpPct * len(arrayP):
tmpMeanErrorGrp = ScaleFactor * MeanErrorGrp
tmpStdP = ScaleFactor * stdP
else:
tmpMeanErrorGrp = MeanErrorGrp
tmpStdP = stdP
if tmpStd < tmpStdP and np.abs((meanGrp - tmpMean) / np.mean([meanGrp, tmpMean])) < tmpMeanErrorGrp \
and np.abs((meanGrp - tmpMeanGap) / np.mean([meanGrp, tmpMeanGap])) < tmpMeanErrorGap:
# and np.abs((meanGrp - tmpMeanBack) / np.mean([meanGrp, tmpMeanBack])) < tmpMeanErrorGap \
# 合并成功
arrayIndexBegin = np.delete(arrayIndexBegin, i + 1)
arrayIndexEnd = np.delete(arrayIndexEnd, i)
arrayIsolateFlag = np.delete(arrayIsolateFlag, i + 1)
meanGrp = tmpMean
isMerged = True
# wfr 20210127 标记 i 为已经尝试过, 找到下一个没尝试过的长度最长的 区间
arrayIsolateFlag[i] = True
i = -1
arrayIsolateIndex = np.argsort(-1 * (arrayIndexEnd - arrayIndexBegin))
if len(arrayIsolateIndex) > 0:
tmp = np.argwhere(arrayIsolateFlag[arrayIsolateIndex] == False)
tmp = tmp[:, 0]
if len(tmp) > 0:
i = arrayIsolateIndex[tmp[0]]
# wfr 20210202 评估聚类结果, 如果有一个长度接近整个周期的区间, 则认为是不应该聚类, 因为聚类后会导致误差异常减小
arrayInterval = arrayIndexEnd - arrayIndexBegin
for i in range(len(arrayInterval)):
if arrayInterval[i] / len(arrayP) > 0.85:
tmp0 = arrayIndexBegin[i] <= arrayIndexBeginOrigin
tmp1 = arrayIndexEndOrigin <= arrayIndexEnd[i]
tmp = np.argwhere(tmp0 & tmp1)[:, 0]
arrayIndexBegin = np.delete(arrayIndexBegin, i)
arrayIndexBegin = np.insert(arrayIndexBegin, i, arrayIndexBeginOrigin[tmp])
arrayIndexEnd = np.delete(arrayIndexEnd, i)
arrayIndexEnd = np.insert(arrayIndexEnd, i, arrayIndexEndOrigin[tmp])
break
# 开始点/结束点 index 交叉存放, 形成区间分界 index 序列
arrayGroupIndex = np.zeros(2 * len(arrayIndexBegin)).astype(int)
arrayGroupIndex[2 * np.arange(0, len(arrayIndexBegin))] = arrayIndexBegin
arrayGroupIndex[1 + 2 * np.arange(0, len(arrayIndexBegin))] = arrayIndexEnd
# 补充 0 和 len(arrayP), 如果缺的话
if arrayGroupIndex[0] != 0:
arrayGroupIndex = np.insert(arrayGroupIndex, 0, 0)
if arrayGroupIndex[-1] != len(arrayP):
arrayGroupIndex = np.append(arrayGroupIndex, len(arrayP))
# fig = plt.figure(figsize=(8, 4)) # 定义一个图像窗口
# ax = fig.add_subplot(111)
# ax.plot(arrayP)
# for v in arrayGroupIndex:
# ax.axvline(x=v, color="black", linestyle="--", linewidth=0.5) # 横座标 v 画一条横线
# plt.show()
# plt.close(fig)
return arrayGroupIndex
# 计算 周期为 T 时的功率分布情况的 MAPE(Mean Absolute Percentage Error)
# 功率波形采样序列 arraySample
# 采样时间序列 arrayTimeStamp
# 周期 T
# 该函数是执行热点, 且有优化空间, 可以更加向量化
# wfr 20210108 传入公倍数周期的最大因子
def DistributionMAPE(T, arraySample, arrayTimeStamp, algorithm = "mean", TInit = -1, TRef = -1):
global MAPEMax, MAPEStdMax
TMAPE = MAPEMax
TMAPEStd = MAPEStdMax
if T > 0.5 * arrayTimeStamp[-1]: # or T < 2:
return TMAPE, TMAPEStd # 排除 长度 大于 测量区间一半的 周期
NumTRegion = int(arrayTimeStamp[-1] / T) # 采样时间内包含多少个周期
SampleInterval = arrayTimeStamp[1]; # 采样间隔
NumSample = int(T / (SampleInterval)) # 一个周期中的采样点数量
arrayTRegionMAPE = np.array([])
arrayIndex = np.arange(0, (NumTRegion + 0.1), 1) # 最后一个区间可能不是完整的, 所以不要最后一个
arrayIndex = arrayIndex * NumSample
arrayIndex = arrayIndex.astype(np.int) # 每个 完整 周期区间的 开始和结束时间戳 的 index
# tmpOffset = len(arraySample) - arrayIndex[-1]
# arrayIndex += int(0.5 * tmpOffset)
# wfr 20211015 1倍周期 / 多倍周期 应该使用不同的 分组聚类数量,
# 这里以 TInit 为基准, 计算分组倍增系数, T > TInit, 则取倍增系数为: 2, 3, 4, ...
# 分组聚类时, 会将每个分组 再拆分为 2, 3, 4 个分组
GrpFactor = int(1)
if TInit > T:
tmp = TInit / T
GrpFactor = int(tmp)
if tmp - GrpFactor > 0.75:
GrpFactor += 1
for indexTRgn in range( NumTRegion - 1 ): # 对于每两个相邻的周期区间, 衡量其功率数据分布相似情况
# wfr 20210106
arrayP0 = arraySample[ arrayIndex[indexTRgn] : arrayIndex[indexTRgn+1] ]
arrayP1 = arraySample[ arrayIndex[indexTRgn+1] : arrayIndex[indexTRgn+2] ]
tmpMean0 = np.mean(arrayP0)
tmpMean1 = np.mean(arrayP1)
tmpStd0 = np.std(arrayP0)
tmpStd1 = np.std(arrayP1)
if tmpStd0 < tmpStd1:
arrayGroupIndex = GrpClustering(arrayP1, GrpFactor)
else:
arrayGroupIndex = GrpClustering(arrayP0, GrpFactor)
# 先取出对应的分组后的数组
NumGroup = len(arrayGroupIndex)-1
arrayGrouped0 = np.zeros( NumGroup )
arrayGrouped1 = np.zeros( NumGroup )
# 两个相邻周期区间分别分成 NumGroup 组, 计算每组中 功率与区间平均功率的差 的平均值
for indexGrp in range(NumGroup):
# wfr 20210106
tmpArray0 = arraySample[ arrayIndex[indexTRgn]+arrayGroupIndex[indexGrp]: arrayIndex[indexTRgn]+arrayGroupIndex[indexGrp+1] ]
tmpArray1 = arraySample[ arrayIndex[indexTRgn+1]+arrayGroupIndex[indexGrp]: arrayIndex[indexTRgn+1]+arrayGroupIndex[indexGrp+1] ]
arrayGrouped0[indexGrp] = np.mean(tmpArray0) - tmpMean0
arrayGrouped1[indexGrp] = np.mean(tmpArray1) - tmpMean1
# 两个相邻周期区间的 对应组 分别两两 求 SMAPE
arrayGroupMAPE = np.abs((arrayGrouped1 - arrayGrouped0) / np.mean([tmpMean0, tmpMean1]) * 100)
if algorithm != "mean":
# 临近搜索时, 不用均值, 而用最值, 这样才更能体现出波形的非周期性
TRegionMAPE = np.max( arrayGroupMAPE )
else:
# wfr 这里改成根据区间采样点数量加权平均
arrayWeight = arrayGroupIndex[1:] - arrayGroupIndex[:-1]
TRegionMAPE = np.average(arrayGroupMAPE, weights=arrayWeight)
# TRegionMAPE = np.mean( arrayGroupMAPE )
arrayTRegionMAPE = np.append(arrayTRegionMAPE, TRegionMAPE)
# end
# wfr 20210828 对于 小备选周期, 划分的周期区间比较多, 很可能有误差非常小的区间, 不应该考虑这些区间, 因为可能处于一个大周期的平台上,
# 因此考虑将 arrayTRegionMAPE 分成 TRef/T 组, 取出每组中的最大误差 重新组成 tmpArray
if TRef < 0 or TRef / T < 1.7:
TMAPE = np.mean(arrayTRegionMAPE)
TMAPEStd = np.std(arrayTRegionMAPE)
else:
tmpEachCount = int(np.ceil(TRef / T)) # 每组中的区间个数
tmpGrpCount = int(np.ceil(len(arrayTRegionMAPE) / tmpEachCount)) # 组数
tmpAppendCount = int(tmpGrpCount * tmpEachCount - len(arrayTRegionMAPE)) # 需要填充的元素个数
tmpArray = np.append(arrayTRegionMAPE, np.zeros(tmpAppendCount)) # 为了 rashape 而填充 0.0 元素
tmpArray = tmpArray.reshape(tmpGrpCount, -1) # reshape 完成分组 [0,1,2], [3,4,5], ...
tmpArray = np.max(tmpArray, axis=1).flatten() # 每组分别求最大值
TMAPE = np.mean(tmpArray)
TMAPEStd = np.std(tmpArray)
return TMAPE, TMAPEStd
# wfr 20210116 这里使用新方法判断是否是无周期
def TCompute(arraySample, SampleInterval, TUpBound, isPlot = False, lastT=0.0, preference=0, TCandidateExtra=np.array([])):
global MAPEMax, MAPEStdMax, TLowBoundBase, TLowBound, SetTLowBound
fs = 1/(SampleInterval/1000)
t = (SampleInterval/1000) * np.arange(0, len(arraySample), 1)
# wfr 20210109 fft 算法采样倍率, 提高该值可以增大最大可以识别的周期, 提高频率/周期的分辨率
# 频率分辨率是不变的, 周期分辨率是变化的, 周期越大分辨率越低
FFTSampleRatio = 1
# wfr 20210109 动态调节 fft算法 采样点数量, 即 FFTSampleRatio
# 若 fft变换 分析出的 周期 接近 能分辨的最大周期, 则 增大 FFTSampleRatio, 再重新 分析
for i in range(2):
num_fft = FFTSampleRatio * t.size
# 傅里叶变换
idx = fftfreq(num_fft, 1/fs)
arrayX = idx[:num_fft//2]
arrayY = fft(arraySample, num_fft)
arrayY = np.abs(arrayY)
arrayY = arrayY[:num_fft//2]
# arrayLogY = np.log10(arrayY[:num_fft//2])
listPeakIndex, Properties = find_peaks( arrayY )
# 取出峰值处的 频率 和 幅值
arrayPeakX = arrayX[listPeakIndex]
arrayPeak = arrayY[listPeakIndex]
arrayPeakIndex = np.argsort(-1 * arrayPeak) # 将 arrayPeak 降序排列得到的 索引
# print("TCompute: len of arrayPeakX = {0}".format(len(arrayPeakX)))
# print("arrayPeak = {0}".format(arrayPeak[arrayPeakIndex[:9]]))
# print("Freq = {0}".format(arrayPeakX[arrayPeakIndex[:9]]))
# print("T = {0}".format(1/arrayPeakX[arrayPeakIndex[:9]]))
# 取出振幅最大的前几个周期, 且不大于周期上界
arrayT = 1 / arrayPeakX[arrayPeakIndex] # 先按照峰值大小 降序排列
arrayPeakOrder = arrayPeak[arrayPeakIndex]
# 先排除大于周期上限的周期
tmpPeakOrder = arrayPeakOrder[(arrayT <= TUpBound)]
arrayT = arrayT[(arrayT <= TUpBound)]
# 再排除峰值不够大的周期
# arrayT = arrayT[(tmpPeakOrder > 0.65 * tmpPeakOrder[0])]
arrayT = arrayT[(tmpPeakOrder > 0.60 * tmpPeakOrder[0])]
arrayT = arrayT[:6]
if len(arrayT) == 0:
print("TCompute ERROR: 没有备选周期!")
return TUpBound, MAPEMax
if i == 0:
# wfr 20210109 计算 能分辨的次最大周期, 常数 3 代表次最大周期, 常数 2 代表最大周期
TUpResolution = FFTSampleRatio * len(arraySample) / (0.5*fs) / 3
if np.max(arrayT) > TUpResolution:
FFTSampleRatio = np.ceil( TUpBound / len(arraySample) * (0.5*fs) * 3 )
else:
break
print("arrayT = {0}".format(arrayT))
if isPlot == True:
# plt.clf()
# plt.ion()
plt.figure(figsize=(14, 6))
ax = plt.subplot(211)
ax.set_title('original signal')
plt.plot(t, arraySample)
ax = plt.subplot(212)
ax.set_title('fft transform')
plt.plot(arrayX, 20 * np.log10(arrayY))
ax.set_xlim(0, 2)
# ax.set_xlim(0, 5)
global FigCount
import os
WorkDir = "./"
FigFile = os.path.join(WorkDir, "TCompute" + str(FigCount) + ".png")
plt.savefig(FigFile)
FigCount += 1
# plt.show()
plt.close()
if len(listPeakIndex) == 0:
print("TCompute ERROR: 计算备选频率/周期出错")
return TUpBound, MAPEMax
# wfr 20210107 求近似最小公倍数周期, 以代替较小的(小于周期下限)周期
arrayFactor = arrayT[ (arrayT < TLowBound) ]
if len(arrayFactor) > 0:
# wfr 20210108 公倍数周期的最大因子, 后续 MAPE计算 以及 邻近搜索 会用到
FactorMax = np.max(arrayFactor) # wfr 20210107 小于阈值的备选周期 中的 最大值
# print("FactorMax = {0}".format(FactorMax))
arrayALCM = ApproximateLCM(arrayFactor, TLowBound)
# print("ALCM = {0}".format(arrayALCM))
arrayScaleT = np.delete(arrayT, np.argwhere(arrayT < TLowBound).flatten()) # wfr 20210107 删除小于阈值的备选周期
# arrayScaleT = np.append(arrayScaleT, arrayFactor[0])
# arrayScaleT = np.insert(arrayScaleT, 0, arrayFactor[0])
# arrayScaleT = np.insert(arrayScaleT, 0, FactorMax) # wfr 20210107 保留 小于阈值的备选周期 中的 最大值
arrayScaleT = np.append(arrayScaleT, FactorMax) # wfr 20210107 保留 小于阈值的备选周期 中的 最大值
LenTNormal = len(arrayScaleT) # wfr 20210108 普通周期的数量
arrayScaleT = np.append(arrayScaleT, arrayALCM)
else:
arrayScaleT = arrayT
LenTNormal = len(arrayScaleT) # wfr 20210108 普通周期的数量
FactorMax = 1e4
arrayScaleT = np.append(arrayScaleT, TCandidateExtra)
print("arrayScaleT = {0}".format(arrayScaleT[:9]))
if int(preference) == int(1):
T_lower = lastT * 0.8
T_upper = lastT * 1.5
elif int(preference) == int(-1):
T_lower = lastT * 0.66
T_upper = lastT * 1.25
if int(preference) != int(0):
print("lastT = {:.2f}, set lowerT = {:.2f}, upperT = {:.2f}".format(lastT, T_lower, T_upper))
print("arrayScaleT remove: [", end="")
tmpArr = arrayScaleT.tolist()
for cand_T in arrayScaleT:
if cand_T < T_lower or cand_T > T_upper:
print("{:.2f}, ".format(cand_T), end="")
tmpArr.remove(cand_T)
arrayScaleT = np.array(tmpArr)
print("]")
if len(arrayScaleT) <= int(0):
print("arrayScaleT add: [", end="")
for cand_T in np.arange(T_lower, T_upper, (T_upper-T_lower-1e-15)/4):
print("{:.2f}, ".format(cand_T), end="")
arrayScaleT = np.append(arrayScaleT, cand_T)
print("]")
else:
pass
# print("==>no preference")
sys.stdout.flush()
arrayTMAPE = MAPEMax * np.ones(len(arrayScaleT))
arrayTMAPEStd = MAPEStdMax * np.ones(len(arrayScaleT))
TInit = arrayScaleT[0]
for i in range( len(arrayScaleT) ): # 对于每个猜测的周期长度
arrayTMAPE[i], arrayTMAPEStd[i] = DistributionMAPE(arrayScaleT[i], arraySample, t, "mean", TInit, np.max(arrayScaleT))
print("arrayTMAPE = {0}".format(arrayTMAPE))
# 如果峰值最高的周期的 MAPE 没有被计算, 即测量时间不够长, 就直接返回不稳定
if sum(arrayTMAPE > MAPEMax - 1) > 0:
# if arrayTMAPE[0] > MAPEMax - 1 or int(2 * sum(arrayTMAPE > MAPEMax - 1)) >= int(len(arrayTMAPE)):
tmpT = np.max(arrayScaleT)
# if int(2 * sum(arrayTMAPE > MAPEMax - 1)) >= int(len(arrayTMAPE)):
# tmpT = np.max(arrayScaleT)
# else:
# tmpT = arrayScaleT[0]
print("TCompute: 本次测量时间不够长")
print("TCompute: TOpt = {0:.2f} s".format(arrayScaleT[0]))
print("TCompute: MAPEOpt = {0:.2f}".format(arrayTMAPE[0]))
# return arrayScaleT[0], arrayTMAPE[0]
return tmpT, -1
# wfr 20210129 修改根据 arrayTMAPE 和 arrayTMAPEStd 判断最优周期的规则
# 先将 arrayTMAPEStdIndex 中的 0 都赋值成 其中的 非0最小值
if np.sum(arrayTMAPEStd < 1e-6) == len(arrayTMAPEStd):
arrayTMAPEStd[:] = 1
elif np.sum(arrayTMAPEStd < 1e-6) > 0:
arrayTMAPEStd[arrayTMAPEStd < 1e-6] = np.min(arrayTMAPEStd[arrayTMAPEStd > 1e-6])
# arrayTError = arrayTMAPE * arrayTMAPEStd # 将两数组相乘, 用乘积来评价 周期 的误差
arrayTIndex = np.argsort(arrayTMAPE) # 将 arrayTmp 升序排序 得到的 索引
IndexOpt = arrayTIndex[0]
# # print("arrayTMAPE = {0}".format(arrayTMAPE[:9]))
# arrayTMAPEIndex = np.argsort(arrayTMAPE) # 将 arrayDistributionErrPct 升序排序 得到的 索引
# arrayTMAPEStdIndex = np.argsort(arrayTMAPEStd) # 将 arrayTMAPEStdIndex 升序排序 得到的 索引
#
# # wfr 20210127 如果最小 MAPE 和 最小 MAPEStd 对应的 T 相同
# if arrayTMAPEIndex[0] == arrayTMAPEStdIndex[0]:
# IndexOpt = arrayTMAPEIndex[0]
# else: # wfr 20210127 如果最小 MAPE 和 最小 MAPEStd 对应的 T 不相同
# arrayPartIndex = np.argwhere(arrayTMAPE - arrayTMAPE[arrayTMAPEIndex[0]] < 0.08 * np.max(arrayScaleT))
# # arrayPartIndex = np.argwhere(arrayTMAPE < 1.5 * arrayTMAPE[arrayTMAPEIndex[0]])
# arrayPartIndex = arrayPartIndex[:,0]
# tmp = np.argsort(arrayTMAPEStd[arrayPartIndex]) # 将 arrayTMAPEStdIndex 升序排序 得到的 索引
# IndexOpt = arrayPartIndex[tmp[0]]
# wfr 20210108 判断最优周期是 公倍数周期 还是 普通周期, 从而设置 公倍数周期的最大因子, 以支持后续 MAPE 计算
TOpt = arrayScaleT[IndexOpt]
MAPEOpt = arrayTMAPE[IndexOpt]
MAPEStdOpt = arrayTMAPEStd[IndexOpt]
np.set_printoptions(precision=2, suppress=True)
# print("TOpt = {0}".format(TOpt))
# print("arrayScaleT: {0}".format(arrayScaleT))
# # print("arrayTError: {0}".format(arrayTError))
# print("arrayTMAPE: {0}".format(arrayTMAPE))
# # print("arrayTMAPEStd: {0}".format(arrayTMAPEStd))
for i in range(3):
# wfr 20210109 因为 fft变换 频率分辨率是不变的, 周期是频率的倒数, 其分辨率是变化的
# 频率越低/周期越长, 周期分辨率越低, 所以需要动态调节邻近搜索区间
# 频谱分析得到的周期可能有误差(因为周期分辨率的限制), 因此需要在此周期附近进行局部搜索, 得到更精确的周期
# wfr 20210109 中心频率 及 频率刻度
FreqCenter = 1 / TOpt
FreqResolution = (0.5 * fs) / len(arraySample)
# wfr 20210109 先计算频率邻近区间
FreqLow = FreqCenter - 0.7 * FreqResolution
FreqUp = FreqCenter + 0.7 * FreqResolution
# wfr 20210109 再计算周期邻近区间
TLow = 1 / FreqUp
if FreqLow > 0:
TUp = 1 / FreqLow
else:
TUp = 1.5 * TOpt
# wfr 20210113 确保邻近搜索范围大于等于 +/-15%
TLow = np.min([0.85 * TOpt, TLow])
TUp = np.max([1.15 * TOpt, TUp])
# wfr 20210109 用因子限制上下界, 防止对于公倍数周期过度搜索
TLow = np.max([(TOpt-0.5*FactorMax), TLow])
# TLow = np.max([TLowBound, TLow]) # wfr 20210121 搜索区间下限不小于 TUpBound
TUp = np.min([(TOpt+0.5*FactorMax), TUp])
# wfr 20210109 区间最多分8份
TStep = (TUp - TLow) / 10
TStep = np.min([TStep, 1]) # wfr 20210113 步长的上限是 1s
TStep = np.max([TStep, 1 * SampleInterval/1000])
if TUp - TLow <= TStep:
return TOpt, MAPEOpt
# wfr 20210109 生成邻近搜索序列, 要包括区间端点
arraySearchT = np.arange(TLow, TUp, TStep)
arraySearchT = np.append(arraySearchT, TUp)
arraySearchT = np.append(arraySearchT, TOpt)
arraySearchTMAPE = MAPEMax * np.ones(len(arraySearchT))
arraySearchTMAPEStd = MAPEStdMax * np.ones(len(arraySearchT))
# 对于每个备选的周期, 计算 MAPE, MAPE越小越好, 越小说明功率变化/分布情况越一致
TInit = TOpt
for i in range( len(arraySearchT) ):
arraySearchTMAPE[i], arraySearchTMAPEStd[i] = DistributionMAPE(arraySearchT[i], arraySample, t, "mean", TInit, np.max(arrayScaleT))
# arrayTIndex = np.argsort(arraySearchTMAPE) # 将 arrayTmp 升序排序 得到的 索引
# IndexOpt = arrayTIndex[0]
IndexOpt = np.argmin(arraySearchTMAPE)
TOpt = arraySearchT[IndexOpt]
MAPEOpt = arraySearchTMAPE[IndexOpt]
if IndexOpt == len(arraySearchT) - 1 or (0.2 * (len(arraySearchT)-1) < IndexOpt and IndexOpt < 0.8 * (len(arraySearchT)-1)):
break
print("TCompute: arraySearchT: {0}".format(arraySearchT))
# print("TCompute: arrayTError: {0}".format(arrayTError))
print("TCompute: arraySearchTMAPE: {0}".format(arraySearchTMAPE))
# print("TCompute: arraySearchTMAPEStd: {0}".format(arraySearchTMAPEStd))
# wfr 20210108 放大太短的周期
if TOpt < TLowBound:
# wfr 20210120 自适应调节周期下限, 保证不会相差一个周期, 或者相差一个周期, 但是仍然在 10% 以内
# if SetTLowBound == False and TLowBound / TOpt > 6:
# TLowBound = np.ceil(10 * TOpt)
# SetTLowBound = True
TOpt = TOpt * np.round(TLowBound / TOpt)
print("TCompute: TOpt = {0:.2f} s".format(TOpt))
# print("TCompute: MAPEOpt = {0:.2f}".format(MAPEOpt))
# print("")
return TOpt, MAPEOpt
def NotPeriodic(arraySample, SampleInterval, T):
TFixed = 8
N = 4
SigmaMax = 1
SigmaPctMax = 0.04
MeanErrMax = 0.04
DiffErrMax = 0.20
arraySample = arraySample[5:-5]
# 采样时间过短 直接返回 False
if (len(arraySample)-1) * (SampleInterval/1000) < N * TFixed:
if T < TFixed:
TFixed = T
if (len(arraySample)-1) * (SampleInterval/1000) < N * TFixed:
return False
Step = int(TFixed / (SampleInterval / 1000))
arrayRegion = np.array([])
arrayMean = np.array([])
arrayStd = np.array([])
arrayDiffMax = np.array([])
for i in range(N):
begin = len(arraySample) - ((i+1) * Step)
end = len(arraySample) - (i * Step)
arrayTmp = np.array(arraySample[begin:end])
arrayRegion = np.append(arrayRegion, arrayTmp)
arrayMean = np.append(arrayMean, np.mean(arrayTmp))
arrayStd = np.append(arrayStd, np.std(arrayTmp))
arrayDiffMax = np.append(arrayDiffMax, np.max(np.abs( arrayTmp - arrayMean[i] )))
# 如果相邻两区间平均功率相差过大则认为不是无周期
for i in range(N-1):
if np.abs(arrayMean[i]-arrayMean[i+1]) / (np.mean(arrayMean[i:i+1])) > MeanErrMax:
return False
# 如果 标准差 过大则认为不是无周期
# if np.max(arrayStd) > SigmaMax:
# return False
if np.max(arrayStd/arrayMean) > SigmaPctMax:
return False
# 如果 最大/最小值 超限则认为不是无周期
if np.max(arrayDiffMax / arrayMean) > DiffErrMax:
return False
print("NotPeriodic: 无周期")
return True
# wfr 20201230
def T_SpectrumAnalysis(listSample, SampleInterval, TUpBound, MeasureTFactor, TraceFileName, StrictMode="normal", lastT=-1, preference=0, isPlot = False):
global MAPEMax, FigCount, TLowBoundBase, TLowBound, SetTLowBound
FigCount = 0
TLowBound = TLowBoundBase
SetTLowBound = False
# print("SpectrumAnalysis: 采样点数量 = {0}".format(len(listSample)))
arraySample = np.array(listSample) # 去除刚开始的采样点, 因为可能异常偏低
MeasureDuration = (len(arraySample)-1) * (SampleInterval/1000)
arrayT = np.array([])
arraySMAPE = np.array([])
isStable = False
MeasureDurationNext = -1
# 保存原始数据到文件
if len(TraceFileName) > 0:
FileDir = "./"+TraceFileName+".pkl"
pickle.dump(listSample, open(FileDir, "wb"))
# 低通滤波
# 采样频率 10Hz, 要滤除 Threshold Hz 以上的频率成分
# SampleFreq = 1/(SampleInterval/1000)
# Threshold = 2
# Wn = 2 * Threshold / SampleFreq
# b, a = signal.butter(8, Wn, 'lowpass')
# SampleFilted = signal.filtfilt(b, a, arraySample)
SampleFilted = arraySample
tmpT, tmpSMAPE = TCompute(SampleFilted, SampleInterval, TUpBound, isPlot)
# 如果测量时间不够长, 就直接返回不稳定
# wfr 20211027 尽早判断出是非周期应用, 尽早返回
if MeasureDuration >= 3 * TUpBound and tmpT > TUpBound:
T = TUpBound
isStable = False
MeasureDurationNext = -1
return T, isStable, MeasureDurationNext
elif MeasureDuration >= 3 * TUpBound:
tmpIndex = int(1/(SampleInterval/1000) * 0.33 * TUpBound)
tmpT1, tmpSMAPE1 = TCompute(SampleFilted[tmpIndex:], SampleInterval, TUpBound, isPlot)
tmpIndex = int(1/(SampleInterval/1000) * 0.66 * TUpBound)
tmpT2, tmpSMAPE2 = TCompute(SampleFilted[tmpIndex:], SampleInterval, TUpBound, isPlot)
tmpArrayT = np.array([tmpT, tmpT1, tmpT2])
SMAPE = abs( (np.max(tmpArrayT)-np.min(tmpArrayT)) / np.mean(tmpArrayT) )
if SMAPE > 0.3:
T = min(TUpBound, tmpT)
isStable = False
MeasureDurationNext = -1
return T, isStable, MeasureDurationNext
elif tmpT > (0.5 * MeasureDuration):
T = tmpT
isStable = False
if MeasureDuration < 3 * TUpBound:
MeasureDurationNext = max(0.5 * tmpT, (3.1 * TUpBound - MeasureDuration)) # 测量够 3.1倍 周期
else:
MeasureDurationNext = max(0.5 * tmpT, (MeasureTFactor * tmpT - MeasureDuration)) # 测量够 5倍 周期
print("T_SpectrumAnalysis: 本次测量时间不够长")
print("T_SpectrumAnalysis: TOpt = {0:.2f} s".format(tmpT))
print("T_SpectrumAnalysis: isStable = {0}".format(isStable))
print("T_SpectrumAnalysis: MeasureDurationNext = {0}".format(MeasureDurationNext))
return T, isStable, MeasureDurationNext
arrayT = np.append(arrayT, tmpT)
arraySMAPE = np.append(arraySMAPE, tmpSMAPE)
StepFactor = 0.5
MinStep = MeasureDuration / 10
tmpStep = np.max([tmpT * StepFactor, MinStep])
MeasureDurationLeft = MeasureDuration - tmpStep
while MeasureDurationLeft / np.max(arrayT) >= 2.8:
# 计算剩余采样点的起始 index
tmpIndexBegin = int( (MeasureDuration-MeasureDurationLeft) / (SampleInterval/1000) )
arrayPart = SampleFilted[tmpIndexBegin:]
# tmpT, tmpSMAPE = TCompute(arrayPart, SampleInterval, TUpBound, isPlot, lastT, int(preference), arrayT)
# tmpT, tmpSMAPE = TCompute(arrayPart, SampleInterval, TUpBound, isPlot, lastT, int(preference), arrayT[-1])
tmpT, tmpSMAPE = TCompute(arrayPart, SampleInterval, TUpBound, isPlot, lastT, int(preference), [])
arrayT = np.append(arrayT, tmpT)
arraySMAPE = np.append(arraySMAPE, tmpSMAPE)
if tmpT > (0.5 * MeasureDurationLeft):
T = tmpT
isStable = False
if MeasureDuration < 3 * TUpBound:
MeasureDurationNext = max(0.5 * tmpT, (3.1 * TUpBound - MeasureDuration)) # 测量够 3.1倍 周期
else:
MeasureDurationNext = max(0.5 * tmpT, (MeasureTFactor * tmpT - MeasureDuration)) # 测量够 5倍 周期
print("T_SpectrumAnalysis: 本次测量时间不够长")
print("T_SpectrumAnalysis: TOpt = {0:.2f} s".format(tmpT))
print("T_SpectrumAnalysis: isStable = {0}".format(isStable))
print("T_SpectrumAnalysis: MeasureDurationNext = {0}".format(MeasureDurationNext))
return T, isStable, MeasureDurationNext
# 每次减去上次周期的长度, 且设置每次步长的下限, 防止步长太小
tmpStep = np.max([tmpT * StepFactor, MinStep])
MeasureDurationLeft = MeasureDurationLeft - tmpStep
# MeasureDurationLeft = MeasureDurationLeft - np.mean(arrayT)
print("T_SpectrumAnalysis: arrayT: {0}".format(arrayT))
print("T_SpectrumAnalysis: arraySMAPE: {0}".format(arraySMAPE))
# wfr 20210828 如果 测量区间长度 / 最好周期 < MeasureTFactor 则认为 测量时间不够长
tmpIndex = np.argwhere(arraySMAPE < 0).flatten() # SMAPE < 0 说明 TCompute 认为测量时间不够长
arraySMAPE[tmpIndex] = np.min(arraySMAPE)
tmpIndex = np.argsort(arraySMAPE).flatten() # 将 tmpArraySMAPE 升序排列得到的 索引
# tmpT = np.max(arrayT[tmpIndex])
tmpT = arrayT[0]
if MeasureDuration / tmpT < MeasureTFactor:
T = tmpT
isStable = False
if MeasureDuration < 3 * TUpBound:
MeasureDurationNext = max(0.5 * tmpT, (3.1 * TUpBound - MeasureDuration)) # 测量够 3.1倍 周期
else:
MeasureDurationNext = max(0.5 * tmpT, (MeasureTFactor * tmpT - MeasureDuration)) # 测量够 5倍 周期
print("T_SpectrumAnalysis: 本次测量时间不够长")
print("T_SpectrumAnalysis: TOpt = {0:.2f} s".format(T))
print("T_SpectrumAnalysis: isStable = {0}".format(isStable))
print("T_SpectrumAnalysis: MeasureDurationNext = {0}".format(MeasureDurationNext))
return T, isStable, MeasureDurationNext
if StrictMode == "strict":
tmpThreshold = 0.10
elif StrictMode == "relaxed":
tmpThreshold = 0.30
else:
tmpThreshold = 0.15
tmpThreshold1 = tmpThreshold + 0.05
LenT = len(arrayT)
LenMin = 3
LenMax = 8
tmpIndex = np.argsort(arraySMAPE) # 将 tmpArraySMAPE 升序排列得到的 索引
T = arrayT[tmpIndex[0]]
tmpIndex = np.argsort(arrayT) # 将 tmpArraySMAPE 升序排列得到的 索引
IndexMiddle = tmpIndex[round(len(tmpIndex)/2)]
TMiddle = arrayT[IndexMiddle]
if LenT < LenMin: # wfr 测量区间还比较短
T = np.mean(arrayT[(0.65*T<arrayT)&(arrayT<1.35*T)])
SMAPE = abs( (np.max(arrayT)-np.min(arrayT)) / np.mean(arrayT) )
print("T_SpectrumAnalysis: SMAPE = {0:.2f}".format(SMAPE))
isStable = False
if MeasureDuration > 2.1 * TUpBound and SMAPE > tmpThreshold:
MeasureDurationNext = -1
# 测量够 5倍 周期
elif MeasureDuration < MeasureTFactor * np.max(arrayT):
# MeasureDurationNext = MeasureTFactor * np.max(arrayT) - MeasureDuration + 5
# MeasureDurationNext = np.ceil(MeasureDuration / np.max(arrayT)) * np.max(arrayT) - MeasureDuration + 5
tmpNext = np.max([np.max(arrayT), MinStep])
if tmpNext + MeasureDuration < MeasureTFactor * np.max(arrayT):
MeasureDurationNext = tmpNext
else:
MeasureDurationNext = MeasureTFactor * np.max(arrayT) - MeasureDuration + 5
else:
MeasureDurationNext = np.ceil(MeasureDuration / np.max(arrayT)) * np.max(arrayT) - MeasureDuration + 5
elif LenMin <= LenT:
tmp = min(LenT, LenMax)
if LenT > 8:
tmpIndexBegin = int(round(0.2 * LenT))
tmpIndexEnd = int(round(0.8 * LenT)) - 1
tmpIndex = np.arange(tmpIndexBegin,tmpIndexEnd)
elif LenT >= 4: # wfr 20211012 排除部分异常值
# tmpIndex = np.argwhere((0.65*T<arrayT) & (arrayT<1.35*T)).flatten()
tmpIndex = np.argwhere((0.60*TMiddle<arrayT) & (arrayT<1.35*TMiddle)).flatten()
else:
tmpIndexBegin = max(0, LenT-tmp)
tmpIndexEnd = LenT
tmpIndex = np.arange(tmpIndexBegin,tmpIndexEnd)
tmpArrayT = np.array(arrayT[tmpIndex])
print("T_SpectrumAnalysis: tmpArrayT = {}".format(tmpArrayT))
tmpArraySMAPE = np.array(arraySMAPE[tmpIndex])
SMAPE = abs( (np.max(tmpArrayT)-np.min(tmpArrayT)) / np.mean(tmpArrayT) )
print("T_SpectrumAnalysis: SMAPE = {0:.2f}".format(SMAPE))
tmpIndex = np.argsort(tmpArraySMAPE) # 将 tmpArraySMAPE 升序排列得到的 索引
T = tmpArrayT[tmpIndex[0]]
T = np.mean(tmpArrayT[(0.65*T<tmpArrayT)&(tmpArrayT<1.35*T)])
if SMAPE < tmpThreshold: # wfr 20201231 对称平均百分误差较小则认为稳定, 停止测量
isStable = True
MeasureDurationNext = -1
elif tmpThreshold <= SMAPE and SMAPE < tmpThreshold1:
isStable = False
if MeasureDuration > 2.1 * TUpBound:
MeasureDurationNext = -1
# 测量够 MeasureTFactor倍 周期
elif MeasureDuration < MeasureTFactor * np.max(arrayT):
# MeasureDurationNext = MeasureTFactor * np.max(arrayT) - MeasureDuration + 5
tmpNext = np.max([np.max(arrayT), MeasureDuration / 10])
if tmpNext + MeasureDuration < MeasureTFactor * np.max(arrayT):
MeasureDurationNext = tmpNext
else:
MeasureDurationNext = MeasureTFactor * np.max(arrayT) - MeasureDuration + 5
else:
MeasureDurationNext = np.ceil(MeasureDuration / np.max(arrayT)) * np.max(arrayT) - MeasureDuration + 5
elif tmpThreshold1 <= SMAPE: # 最近几次周期相差较大, 测量区间远大于最近的最大周期
isStable = False
if MeasureDuration > 2.1 * TUpBound:
MeasureDurationNext = -1
elif MeasureDuration > TUpBound and T < 0.5 * TUpBound:
MeasureDurationNext = -1
elif MeasureDuration >= 1.3 * MeasureTFactor * np.max(arrayT):
MeasureDurationNext = -1
else:
tmpTMax = np.max(tmpArrayT)
MeasureDurationNext = MeasureTFactor * tmpTMax - MeasureDuration
if MeasureDurationNext < 0:
MeasureDurationNext = tmpTMax * ( np.ceil(MeasureDuration/tmpTMax) + 1 - MeasureTFactor )
if 0 < MeasureDurationNext and MeasureDurationNext < 5: # wfr 20210110 下次等待测量时间要 > 5s
MeasureDurationNext += 5
# wfr 20210116 这里使用新方法判断是否是无周期 5 * T
if isStable == False and SMAPE > 0.4 and (LenT >= LenMax or MeasureDuration >= 5 * T or MeasureDuration > TUpBound):
if True == NotPeriodic(arraySample, SampleInterval, T):
# if T < TLowBound:
# T = np.ceil(TLowBound / T) * T
MeasureDurationNext = -1
if MeasureDurationNext > 0:
MeasureDurationNext = np.max([MeasureDurationNext, 0.5 * tmpT, (MeasureTFactor * tmpT - MeasureDuration)]) # 测量够 5倍 周期
print("T_SpectrumAnalysis: TOpt = {0:.2f} s".format(T))
print("T_SpectrumAnalysis: isStable = {0}".format(isStable))
print("T_SpectrumAnalysis: MeasureDurationNext = {0}".format(MeasureDurationNext))
return T, isStable, MeasureDurationNext
def TComputeFFT(arraySample, SampleInterval, TUpBound, isPlot = False):
fs = 1/(SampleInterval/1000)
t = (SampleInterval/1000) * np.arange(0, len(arraySample), 1)
num_fft = t.size
# 傅里叶变换
idx = fftfreq(num_fft, 1/fs)
arrayX = idx[:num_fft//2]
arrayY = fft(arraySample, num_fft)
arrayY = np.abs(arrayY)
arrayY = arrayY[:num_fft//2]
# arrayLogY = np.log10(arrayY[:num_fft//2])
listPeakIndex, Properties = find_peaks( arrayY )
# 取出峰值处的 频率 和 幅值
arrayPeakX = arrayX[listPeakIndex]
arrayPeak = arrayY[listPeakIndex]
arrayPeakIndex = np.argsort(-1 * arrayPeak) # 将 arrayPeak 降序排列得到的 索引
# print("TCompute: len of arrayPeakX = {0}".format(len(arrayPeakX)))
# print("arrayPeak = {0}".format(arrayPeak[arrayPeakIndex[:9]]))
# print("Freq = {0}".format(arrayPeakX[arrayPeakIndex[:9]]))
# print("T = {0}".format(1/arrayPeakX[arrayPeakIndex[:9]]))
# 取出振幅最大的前几个周期
arrayT = 1 / arrayPeakX[arrayPeakIndex] # 先按照峰值大小 降序排列
arrayPeakOrder = arrayPeak[arrayPeakIndex]
# 再排除峰值不够大的周期
arrayT = arrayT[(arrayPeakOrder > 0.65 * arrayPeakOrder[0])]
print("TComputeFFT: T = {0:.2f} s".format(arrayT[0]))
sys.stdout.flush
return arrayT[0]
def T_FFT(listSample, SampleInterval, TUpBound, MeasureTFactor, TraceFileName, isPlot = False):
T = 1.0
arraySample = np.array(listSample)
MeasureDuration = (len(arraySample)-1) * (SampleInterval/1000)
MeasureDurationNext = -1
# 保存原始数据到文件
if len(TraceFileName) > 0:
FileDir = "./"+TraceFileName+".pkl"
pickle.dump(listSample, open(FileDir, "wb"))
T = TComputeFFT(arraySample, SampleInterval, TUpBound, isPlot)
# 如果测量时间不够长, 就直接返回不稳定
if MeasureTFactor * T > MeasureDuration:
MeasureDurationNext = max(0.5 * T, (MeasureTFactor * T - MeasureDuration)) # 测量够 3倍 周期
print("T_FFT: 本次测量时间不够长")
print("T_FFT: T = {0:.2f} s".format(T))
print("T_FFT: MeasureDurationNext = {}".format(MeasureDurationNext))
return T, MeasureDurationNext
| 2.171875 | 2 |
slippy/core/tests/test_materials.py | KDriesen/slippy | 12 | 17278 | import numpy as np
import numpy.testing as npt
import slippy
import slippy.core as core
"""
If you add a material you need to add the properties that it will be tested with to the material_parameters dict,
the key should be the name of the class (what ever it is declared as after the class key word).
The value should be a tuple of dicts:
The first dict in the tuple will be unpacked to instantiate the class,
The second will be used with the displacement from loads method
The third will be used with the loads from displacement method to ensure that the methods are inverses of each other
If there is a limit the applicability of the displacements from loads method (such as for a perfectly plastic material
the _max_load key word should be set in the second dict.
For more complex behaviour please also implement your own tests
"""
material_parameters = {
'Elastic': ({'name': 'steel_5', 'properties': {'E': 200e9, 'v': 0.3}},
{'grid_spacing': 0.01, 'simple': True},
{'grid_spacing': 0.01, 'simple': True, 'tol': 1e-9}),
'Rigid': ({}, {}, {})
}
exceptions = [core.Rigid]
def test_materials_basic():
# check that one of influence matrix or displacement from loading is given
for material in core.materials._IMMaterial._subclass_registry:
if material in exceptions:
continue
try:
mat_params = material_parameters[material.material_type]
except KeyError:
raise AssertionError(f"Material test parameters are not specified, for material {material.material_type}")
mat_instance = material(**mat_params[0])
max_load = mat_params[1].pop('_max_load', 1)
np.random.seed(0)
loads = np.random.rand(16, 16) * max_load
# check that the loads and displacement functions are inverse of each other
for direction in {'x', 'y', 'z'}:
load_in_direction = {direction: loads}
displacement = mat_instance.displacement_from_surface_loads(load_in_direction, **mat_params[1])
set_disp = displacement[direction]
loads_calc = mat_instance.loads_from_surface_displacement(displacements={direction: set_disp},
**mat_params[2])
npt.assert_allclose(loads, slippy.asnumpy(loads_calc[direction]), atol=max_load * 0.02)
def test_elastic_coupled():
mat = core.Elastic('steel_6', {'E': 200e9, 'v': 0.3})
np.random.seed(0)
loads1 = np.random.rand(16, 16)
loads2 = np.random.rand(16, 16)
directions = 'xyzx'
for i in range(3):
dir_1 = directions[i]
dir_2 = directions[i+1]
loads_in_direction = {dir_1: loads1, dir_2: loads2}
displacement = mat.displacement_from_surface_loads(loads_in_direction, grid_spacing=0.01, simple=True)
loads_calc = mat.loads_from_surface_displacement(displacements=displacement,
grid_spacing=0.01, simple=True)
for direction in [dir_1, dir_2]:
npt.assert_allclose(loads_in_direction[direction], slippy.asnumpy(loads_calc[direction]), atol=0.02)
displacement = mat.displacement_from_surface_loads(loads_in_direction, grid_spacing=0.01, simple=False)
loads_calc = mat.loads_from_surface_displacement(displacements=displacement,
grid_spacing=0.01, simple=False)
for direction in [dir_1, dir_2]:
npt.assert_allclose(loads_in_direction[direction], slippy.asnumpy(loads_calc[direction]), atol=0.02)
| 2.796875 | 3 |
python/dash_tools/restore_from_bup.py | Dash-Industry-Forum/media-tools | 60 | 17279 | #!/usr/bin/env python
"""Restore files with ending BACKUP_ENDING to original files."""
# The copyright in this software is being made available under the BSD License,
# included below. This software may be subject to other third party and contributor
# rights, including patent rights, and no such rights are granted under this license.
#
# Copyright (c) 2016, Dash Industry Forum.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
# * Neither the name of Dash Industry Forum nor the names of its
# contributors may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS AS IS AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import os
import sys
from backup_handler import BACKUP_ENDING
def main():
"Command-line function."
from optparse import OptionParser
parser = OptionParser()
#pylint: disable=unused-variable
(options, args) = parser.parse_args()
if len(args) < 1:
parser.error("Wrong number of arguments")
sys.exit(1)
for file_name in args:
if file_name.endswith(BACKUP_ENDING):
old_name = file_name[:-len(BACKUP_ENDING)]
print("moving %s to %s" % (file_name, old_name))
if os.path.exists(old_name):
os.unlink(old_name)
os.rename(file_name, old_name)
continue
if __name__ == "__main__":
main()
| 2.078125 | 2 |
src/opendr/simulation/human_model_generation/utilities/joint_extractor.py | makistsantekidis/opendr | 3 | 17280 | # Copyright 2020-2021 OpenDR European Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pyglet
import numpy as np
import sklearn.preprocessing
class Joint_extractor:
def __init__(self, num_of_joints=18):
self.num_of_joints = num_of_joints
self.start_points = []
self.end_points = []
for j in range(18):
self.start_points.append([])
self.end_points.append([])
def compute_rays(self, cv_kps, image_width, image_height):
pmat = (pyglet.gl.GLdouble * 16)()
mvmat = (pyglet.gl.GLdouble * 16)()
view = (pyglet.gl.GLint * 4)()
pyglet.gl.glGetDoublev(pyglet.gl.GL_MODELVIEW_MATRIX, mvmat)
pyglet.gl.glGetDoublev(pyglet.gl.GL_PROJECTION_MATRIX, pmat)
pyglet.gl.glGetIntegerv(pyglet.gl.GL_VIEWPORT, view)
if cv_kps.size != 0:
for i, cv_kp in enumerate(cv_kps):
if cv_kp[0] != -1 and cv_kp[0] != -1:
start_x = pyglet.gl.GLdouble()
start_y = pyglet.gl.GLdouble()
start_z = pyglet.gl.GLdouble()
end_x = pyglet.gl.GLdouble()
end_y = pyglet.gl.GLdouble()
end_z = pyglet.gl.GLdouble()
pyglet.gl.gluUnProject(cv_kp[0], image_height - cv_kp[1], 0, mvmat, pmat, view, start_x,
start_y, start_z)
pyglet.gl.gluUnProject(cv_kp[0], image_height - cv_kp[1], 1, mvmat, pmat, view, end_x, end_y,
end_z)
self.start_points[i].append(np.asarray([start_x.value, start_y.value, start_z.value]))
self.end_points[i].append(np.asarray([end_x.value, end_y.value, end_z.value]))
@property
def compute_3D_positions(self):
for i in range(self.num_of_joints):
if len(self.start_points[i]) == 0 or len(self.end_points[i]) == 0:
print("Failed to estimate the position of the joints...")
return [[], []]
points_3D = []
dists_3D = []
inds_sorted = None
for i in range(self.num_of_joints):
d = 100
first_time = True
while d > 0.05:
if first_time:
s = np.asarray(self.start_points[i])
e = np.asarray(self.end_points[i])
else:
s = s[inds_sorted[:-1]]
e = e[inds_sorted[:-1]]
v = e - s
ni = sklearn.preprocessing.normalize(v, norm="l2")
nx = ni[:, 0]
ny = ni[:, 1]
nz = ni[:, 2]
sxx = np.sum(nx * nx - 1)
syy = np.sum(ny * ny - 1)
szz = np.sum(nz * nz - 1)
sxy = np.sum(nx * ny)
sxz = np.sum(nx * nz)
syz = np.sum(ny * nz)
S = np.asarray([np.asarray([sxx, sxy, sxz]), np.asarray([sxy, syy, syz]), np.asarray([sxz, syz, szz])])
cx = np.sum(s[:, 0] * (nx * nx - 1) + s[:, 1] * (nx * ny) + s[:, 2] * (nx * nz))
cy = np.sum(s[:, 0] * (nx * ny) + s[:, 1] * (ny * ny - 1) + s[:, 2] * (ny * nz))
cz = np.sum(s[:, 0] * (nx * nz) + s[:, 1] * (ny * nz) + s[:, 2] * (nz * nz - 1))
C = np.asarray([cx, cy, cz])
p_intersect = np.linalg.inv(np.asarray(S)).dot(C)
N = s.shape[0]
distances = np.zeros(N, dtype=np.float32)
for j in range(N):
ui = ((p_intersect - s[j, :]).dot(np.transpose(v[j, :]))) / (v[j, :].dot(v[j, :]))
distances[j] = np.linalg.norm(p_intersect - s[j, :] - ui * v[j, :])
# for i=1:N %http://mathworld.wolfram.com/Point-LineDistance3-Dimensional.html:
# distances(i) = norm(cross(p_intersect-PA(i,:),p_intersect-PB(i,:))) / norm(Si(i,:));
inds_sorted = np.argsort(distances)
d = distances[inds_sorted[-1]]
first_time = False
points_3D.append(p_intersect)
dists_3D.append(distances)
points_3D = np.asarray(points_3D, dtype=np.float32)
dists_3D = np.asarray(dists_3D, dtype=object)
return points_3D, dists_3D
| 1.734375 | 2 |
bluebottle/donations/migrations/0009_auto_20190130_1140.py | jayvdb/bluebottle | 0 | 17281 | <reponame>jayvdb/bluebottle<filename>bluebottle/donations/migrations/0009_auto_20190130_1140.py
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2019-01-30 10:40
from __future__ import unicode_literals
import bluebottle.utils.fields
from decimal import Decimal
from django.db import migrations, models
import django.db.models.deletion
import djmoney.models.fields
class Migration(migrations.Migration):
dependencies = [
('donations', '0008_auto_20170927_1021'),
]
operations = [
migrations.AddField(
model_name='donation',
name='payout_amount',
field=bluebottle.utils.fields.MoneyField(currency_choices="[('EUR', u'Euro')]", decimal_places=2, default=Decimal('0.0'), max_digits=12, verbose_name='Payout amount'),
),
migrations.AddField(
model_name='donation',
name='payout_amount_currency',
field=djmoney.models.fields.CurrencyField(choices=[(b'EUR', 'Euro')], default='EUR', editable=False, max_length=3),
),
]
| 1.59375 | 2 |
pickle_storage/tests/__init__.py | PyUnchained/pickle_storage | 0 | 17282 | <gh_stars>0
# import os
# os.environ.setdefault('PICKLE_STORAGE_SETTINGS', 'pickle_storage.tests.settings') | 1.164063 | 1 |
src/config.py | NicolasSommer/valuenet | 0 | 17283 | <reponame>NicolasSommer/valuenet<gh_stars>0
import argparse
import json
import os
class Config:
DATA_PREFIX = "data"
EXPERIMENT_PREFIX = "experiments"
def write_config_to_file(args, output_path):
config_path = os.path.join(output_path, "args.json")
with open(config_path, 'w', encoding='utf-8') as f:
json.dump(args.__dict__, f, indent=2)
def _add_model_configuration(parser):
parser.add_argument('--cuda', default=True, action='store_true')
# language model configuration
parser.add_argument('--encoder_pretrained_model', default='facebook/bart-base', type=str)
parser.add_argument('--max_seq_length', default=1024, type=int)
# model configuration
parser.add_argument('--column_pointer', action='store_true', default=True)
parser.add_argument('--embed_size', default=300, type=int, help='size of word embeddings')
parser.add_argument('--hidden_size', default=300, type=int, help='size of LSTM hidden states')
parser.add_argument('--action_embed_size', default=128, type=int, help='size of word embeddings')
parser.add_argument('--att_vec_size', default=300, type=int, help='size of attentional vector')
parser.add_argument('--type_embed_size', default=128, type=int, help='size of word embeddings')
parser.add_argument('--col_embed_size', default=300, type=int, help='size of word embeddings')
parser.add_argument('--readout', default='identity', choices=['identity', 'non_linear'])
parser.add_argument('--column_att', choices=['dot_prod', 'affine'], default='affine')
parser.add_argument('--dropout', default=0.3, type=float, help='dropout rate')
def _add_postgresql_configuration(parser):
parser.add_argument('--database_host', default='localhost', type=str)
parser.add_argument('--database_port', default='18001', type=str)
parser.add_argument('--database_user', default='postgres', type=str)
parser.add_argument('--database_password', default='<PASSWORD>', type=str)
parser.add_argument('--database_schema', default='unics_cordis', type=str)
def read_arguments_train():
parser = argparse.ArgumentParser(description="Run training with following arguments")
# model configuration
_add_model_configuration(parser)
# general configuration
parser.add_argument('--exp_name', default='exp', type=str)
parser.add_argument('--seed', default=90, type=int)
parser.add_argument('--toy', default=False, action='store_true')
parser.add_argument('--data_set', default='spider', type=str)
# training & optimizer configuration
parser.add_argument('--batch_size', default=1, type=int)
parser.add_argument('--num_epochs', default=5.0, type=float)
parser.add_argument('--lr_base', default=1e-3, type=float)
parser.add_argument('--lr_connection', default=1e-4, type=float)
parser.add_argument('--lr_transformer', default=2e-5, type=float)
# parser.add_argument('--adam_eps', default=1e-8, type=float)
parser.add_argument('--scheduler_gamma', default=0.5, type=int)
parser.add_argument('--max_grad_norm', default=1.0, type=float)
parser.add_argument('--clip_grad', default=5., type=float)
parser.add_argument('--loss_epoch_threshold', default=50, type=int)
parser.add_argument('--sketch_loss_weight', default=1.0, type=float)
# prediction configuration (run after each epoch)
parser.add_argument('--beam_size', default=5, type=int, help='beam size for beam search')
parser.add_argument('--decode_max_time_step', default=40, type=int,
help='maximum number of time steps used in decoding and sampling')
args = parser.parse_args()
args.data_dir = os.path.join(Config.DATA_PREFIX, args.data_set)
args.model_output_dir = Config.EXPERIMENT_PREFIX
print("*** parsed configuration from command line and combine with constants ***")
for argument in vars(args):
print("argument: {}={}".format(argument, getattr(args, argument)))
return args
def read_arguments_evaluation():
parser = argparse.ArgumentParser(description="Run evaluation with following arguments")
# model configuration
_add_model_configuration(parser)
# evaluation
parser.add_argument('--evaluation_type', default='spider', type=str)
parser.add_argument('--model_to_load', type=str)
parser.add_argument('--prediction_dir', type=str)
parser.add_argument('--batch_size', default=1, type=int)
# general configuration
parser.add_argument('--seed', default=90, type=int)
parser.add_argument('--data_set', default='spider', type=str)
# prediction configuration
parser.add_argument('--beam_size', default=1, type=int, help='beam size for beam search')
parser.add_argument('--decode_max_time_step', default=40, type=int,
help='maximum number of time steps used in decoding and sampling')
# DB config is only needed in case evaluation is executed on PostgreSQL DB
_add_postgresql_configuration(parser)
parser.add_argument('--database', default='cordis_temporary', type=str)
args = parser.parse_args()
args.data_dir = os.path.join(Config.DATA_PREFIX, args.data_set)
print("*** parsed configuration from command line and combine with constants ***")
for argument in vars(args):
print("argument: {}={}".format(argument, getattr(args, argument)))
return args
def read_arguments_manual_inference():
parser = argparse.ArgumentParser(description="Run manual inference with following arguments")
# model configuration
_add_model_configuration(parser)
# manual_inference
parser.add_argument('--model_to_load', type=str)
parser.add_argument('--api_key', default='1234', type=str)
parser.add_argument('--ner_api_secret', default='PLEASE_ADD_YOUR_OWN_GOOGLE_API_KEY_HERE', type=str)
# database configuration (in case of PostgreSQL, not needed for sqlite)
_add_postgresql_configuration(parser)
# general configuration
parser.add_argument('--seed', default=90, type=int)
parser.add_argument('--batch_size', default=1, type=int)
# prediction configuration
parser.add_argument('--beam_size', default=1, type=int, help='beam size for beam search')
parser.add_argument('--decode_max_time_step', default=40, type=int,
help='maximum number of time steps used in decoding and sampling')
args = parser.parse_args()
print("*** parsed configuration from command line and combine with constants ***")
for argument in vars(args):
print("argument: {}={}".format(argument, getattr(args, argument)))
return args
| 2.390625 | 2 |
aqg/utils/summarizer.py | Sicaida/Automatic_Question_Generation | 134 | 17284 | <gh_stars>100-1000
from __future__ import absolute_import
from __future__ import division, print_function, unicode_literals
from sumy.parsers.html import HtmlParser
from sumy.parsers.plaintext import PlaintextParser
from sumy.nlp.tokenizers import Tokenizer
#from sumy.summarizers.lsa import LsaSummarizer as Summarizer
from sumy.summarizers.lex_rank import LexRankSummarizer as Summarizer
from sumy.nlp.stemmers import Stemmer
from sumy.utils import get_stop_words
class TextSummarizer:
def __init__(self, count=10):
self.LANGUAGE = "czech"
self.SENTENCES_COUNT = count
def summarize_from_url(self,url):
parser = HtmlParser.from_url(url, Tokenizer(self.LANGUAGE))
stemmer = Stemmer(self.LANGUAGE)
summarizer = Summarizer(stemmer)
file_1 = open("summarizer_output.txt","w+")
file_2 = open("summarizer_output2.txt","w+")
for sentence in summarizer(parser.document, self.SENTENCES_COUNT):
file_2.write(str(sentence))
file_1.write(str(sentence))
file_1.write("\n")
file_1.close()
file_2.close()
def summarize_from_text(self,text):
parser = PlaintextParser.from_string(text, Tokenizer(self.LANGUAGE))
stemmer = Stemmer(self.LANGUAGE)
summarizer = Summarizer(stemmer)
file_1 = open("summarizer_output.txt","w+")
file_2 = open("summarizer_output2.txt","w+")
for sentence in summarizer(parser.document, self.SENTENCES_COUNT):
file_2.write(str(sentence))
file_1.write(str(sentence))
file_1.write("\n")
file_1.close()
file_2.close()
def summarize_from_file(self,file_name):
parser = PlaintextParser.from_file(file_name, Tokenizer(self.LANGUAGE))
stemmer = Stemmer(self.LANGUAGE)
summarizer = Summarizer(stemmer)
file_1 = open("summarizer_output.txt","w+")
file_2 = open("summarizer_output2.txt","w+")
for sentence in summarizer(parser.document, self.SENTENCES_COUNT):
file_2.write(str(sentence))
file_1.write(str(sentence))
file_1.write("\n")
file_1.close()
file_2.close()
# t = TextSummarizer()
# t.summarize_from_file("obama_short.txt")
# pdf = pdfgeneration()
# pdf.generate_pdf_summarizer("summarizer_output2.txt")
| 2.859375 | 3 |
tests/__init__.py | AdamRuddGH/super_json_normalize | 2 | 17285 | <reponame>AdamRuddGH/super_json_normalize
"""Unit test package for super_json_normalize."""
| 1.148438 | 1 |
examples/turbulent_condensate/run.py | chrisjbillington/parpde | 0 | 17286 | <reponame>chrisjbillington/parpde
# An example of a turbulent BEC in a harmonic trap. The groundstate is found
# and then some vortices randomly printed about with a phase printing. Some
# evolution in imaginary time is then performed to smooth things out before
# evolving the BEC in time.
# Run with 'mpirun -n <N CPUs> python run_example.py'
from __future__ import division, print_function
import sys
# sys.path.insert(0, '../..') # The location of the modules we need to import
import numpy as np
from parPDE import Simulator2D, LAPLACIAN
from parPDE.BEC2D import BEC2D
def get_number_and_trap(rhomax, R):
"""Gives the 2D normalisation constant and trap frequency required for the
specified maximum density and radius of a single-component condensate in
the Thomas-Fermi approximation"""
N = pi * rhomax * R**2 / 2
omega = np.sqrt(2 * g * rhomax / (m * R**2))
return N, omega
# Constants:
pi = np.pi
hbar = 1.054571726e-34 # Reduced Planck's constant
a_0 = 5.29177209e-11 # Bohr radius
u = 1.660539e-27 # unified atomic mass unit
m = 86.909180*u # 87Rb atomic mass
a = 98.98*a_0 # 87Rb |2,2> scattering length
g = 4*pi*hbar**2*a/m # 87Rb self interaction constant
rhomax = 2.5e14 * 1e6 # Desired peak condensate density
R = 7.5e-6 # Desired condensate radius
mu = g * rhomax # Approximate chemical potential for desired max density
# (assuming all population is in in mF=+1 or mF=-1)
N_2D, omega = get_number_and_trap(rhomax, R) # 2D normalisation constant and trap frequency
# required for specified radius and peak density
# Space:
nx_global = ny_global = 256
x_max_global = y_max_global = 10e-6
simulator = Simulator2D(-x_max_global, x_max_global, -y_max_global, y_max_global, nx_global, ny_global,
periodic_x=True, periodic_y=True, operator_order=6)
bec2d = BEC2D(simulator, natural_units=False, use_ffts=True)
x = simulator.x
y = simulator.y
dx = simulator.dx
dy = simulator.dy
r2 = x**2.0 + y**2.0
r = np.sqrt(r2)
# A Harmonic trap:
V = 0.5 * m * omega**2 * R**2.0 * (r/R)**2
dispersion_timescale = dx**2 * m / (pi * hbar)
chemical_potential_timescale = 2*pi*hbar/mu
potential_timescale = 2*pi*hbar/V.max()
K = -hbar**2/(2*m)*LAPLACIAN
def H(t, psi):
"""The Hamiltonian for single-component wavefunction psi. Returns the
kinetic term as an OperatorSum instance, and the local terms separately."""
H_local_lin = V
H_local_nonlin = g * abs(psi)**2
return K, H_local_lin, H_local_nonlin
if __name__ == '__main__':
# The initial Thomas-Fermi guess:
psi = rhomax * (1 - (x**2 + y**2) / R**2)
psi[psi < 0] = 0
psi = np.sqrt(psi)
# Find the groundstate:
psi = bec2d.find_groundstate(H, mu, psi, relaxation_parameter=1.7, convergence=1e-13,
output_interval=100, output_directory='groundstate', convergence_check_interval=10)
# psi is real so far, convert it to complex:
psi = np.array(psi, dtype=complex)
# Print some vortices, seeding the pseudorandom number generator so that
# MPI processes all agree on where the vortices are:
np.random.seed(42)
for i in range(30):
sign = np.sign(np.random.normal())
x_vortex = np.random.normal(0, scale=R)
y_vortex = np.random.normal(0, scale=R)
psi[:] *= np.exp(sign * 1j*np.arctan2(x - y_vortex, y - x_vortex))
# Smooth it a bit in imaginary time:
psi = bec2d.evolve(dt=dispersion_timescale/2, t_final=chemical_potential_timescale,
H=H, psi=psi, mu=mu, method='rk4', imaginary_time=True,
output_interval=100, output_directory='smoothing')
# And evolve it in time for 10ms:
psi = bec2d.evolve(dt=dispersion_timescale/2, t_final=10e-3,
H=H, psi=psi, mu=mu, method='rk4', imaginary_time=False,
output_interval=100, output_directory='evolution')
| 2.546875 | 3 |
src/Test_Sfepy_NavierStokes.py | somu15/Small_Pf_code | 0 | 17287 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 28 09:33:53 2020
@author: dhulls
"""
from __future__ import print_function
from __future__ import absolute_import
from argparse import ArgumentParser
import numpy as nm
import sys
sys.path.append('.')
from sfepy.base.base import IndexedStruct, Struct
from sfepy.discrete import (FieldVariable, Material, Integral, Function,
Equation, Equations, Problem)
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.terms import Term
from sfepy.discrete.conditions import Conditions, EssentialBC, InitialCondition
from sfepy.solvers.ls import ScipyDirect
from sfepy.solvers.nls import Newton
from sfepy.postprocess.viewer import Viewer
from sfepy.postprocess.probes_vtk import ProbeFromFile, Probe
import numpy as np
helps = {
'show' : 'show the results figure',
}
from sfepy import data_dir
parser = ArgumentParser()
parser.add_argument('--version', action='version', version='%(prog)s')
parser.add_argument('-s', '--show',
action="store_true", dest='show',
default=False, help=helps['show'])
options = parser.parse_args()
mesh = Mesh.from_file(data_dir + '/meshes/3d/fluid_mesh.inp')
domain = FEDomain('domain', mesh)
omega = domain.create_region('Omega', 'all')
field_1 = Field.from_args(name='3_velocity', dtype=nm.float64, shape=3, region=omega, approx_order=1)
field_2 = Field.from_args(name='pressure', dtype=nm.float64, shape=1, region=omega, approx_order=1)
region_0 = domain.create_region(name='Walls1', select='vertices in (y < -0.049)', kind='facet')
region_1 = domain.create_region(name='Walls2', select='vertices in (y > 0.049)', kind='facet')
region_2 = domain.create_region(name='Inlet', select='vertices in (x < -0.499)', kind='facet')
region_3 = domain.create_region(name='Outlet', select='vertices in (x > -0.499)', kind='facet')
ebc_1 = EssentialBC(name='Walls1', region=region_0, dofs={'u.[0,1,2]' : 0.0})
ebc_2 = EssentialBC(name='Walls2', region=region_1, dofs={'u.[0,1,2]' : 0.0})
ebc_3 = EssentialBC(name='Inlet', region=region_2, dofs={'u.0' : 1.0, 'u.[1,2]' : 0.0})
ebc_4 = EssentialBC(name='Outlet', region=region_3, dofs={'p':0.0, 'u.[1,2]' : 0.0})
viscosity = Material(name='viscosity', value=1.25e-3)
variable_1 = FieldVariable('u', 'unknown', field_1)
variable_2 = FieldVariable(name='v', kind='test', field=field_1, primary_var_name='u')
variable_3 = FieldVariable(name='p', kind='unknown', field=field_2)
variable_4 = FieldVariable(name='q', kind='test', field=field_2, primary_var_name='p')
integral_1 = Integral('i1', order=2)
integral_2 = Integral('i2', order=3)
t1 = Term.new(name='dw_div_grad(viscosity.value, v, u)',
integral=integral_2, region=omega, viscosity=viscosity, v=variable_2, u=variable_1)
t2 = Term.new(name='dw_convect(v, u)',
integral=integral_2, region=omega, v=variable_2, u=variable_1)
t3 = Term.new(name='dw_stokes(v, p)',
integral=integral_1, region=omega, v=variable_2, p=variable_3)
t4 = Term.new(name='dw_stokes(u, q)',
integral=integral_1, region=omega, u=variable_1, q=variable_4)
eq1 = Equation('balance', t1+t2-t3)
eq2 = Equation('incompressibility', t4)
eqs = Equations([eq1,eq2])
ls = ScipyDirect({})
nls_status = IndexedStruct()
nls = Newton({'i_max' : 20, 'eps_a' : 1e-8, 'eps_r' : 1.0, 'macheps' : 1e-16, 'lin_red' : 1e-2, 'ls_red' : 0.1, 'ls_red_warp' : 0.001, 'ls_on' : 0.99999, 'ls_min' : 1e-5, 'check' : 0, 'delta' : 1e-6}, lin_solver=ls, status=nls_status)
pb = Problem('Navier-Stokes', equations=eqs)
pb.set_bcs(ebcs=Conditions([ebc_1, ebc_2, ebc_3]))
pb.set_solver(nls)
status = IndexedStruct()
state = pb.solve(status=status, save_results=True)
out = state.create_output_dict()
pb.save_state('Navier_Stokes.vtk', out=out)
view = Viewer('Navier_Stokes.vtk')
view(rel_scaling=2,
is_scalar_bar=True, is_wireframe=True) | 1.90625 | 2 |
nd_customization/api/lab_test.py | libermatic/nd_customization | 0 | 17288 | <filename>nd_customization/api/lab_test.py
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import frappe
import json
from frappe.utils import now, cint
from functools import partial
from toolz import compose
@frappe.whitelist()
def deliver_result(lab_test, revert=0, delivery_time=None):
doc = frappe.get_doc("Lab Test", lab_test)
if doc and doc.docstatus == 1:
if cint(revert):
doc.delivery_time = None
else:
doc.delivery_time = delivery_time or now()
doc.save()
_get_subsections = compose(
partial(map, lambda x: x.get("test_event") or x.get("particulars")),
partial(filter, lambda x: cint(x.is_subsection) == 1),
)
def change_test_loading(doc, template):
if template.test_template_type == "Compound":
subsections = _get_subsections(template.normal_test_templates)
if subsections:
for item in doc.normal_test_items:
if item.test_name in subsections:
frappe.db.set_value(
"Normal Test Items", item.name, "require_result_value", 0
)
elif item.test_name and not item.test_event:
frappe.db.set_value(
"Normal Test Items", item.name, "test_name", None
)
frappe.db.set_value(
"Normal Test Items", item.name, "test_event", item.test_name
)
if template.test_template_type == "Descriptive":
subsections = _get_subsections(template.special_test_template)
if subsections:
for item in doc.special_test_items:
if item.test_particulars in subsections:
frappe.db.set_value(
"Special Test Items", item.name, "require_result_value", 0
)
if template.test_template_type == "Grouped":
for item in doc.normal_test_items:
if item.test_name and item.template and item.template != doc.template:
test_comment = frappe.db.get_value(
"Lab Test Template", item.template, "test_comment"
)
if test_comment:
frappe.db.set_value(
"Normal Test Items", item.name, "test_comment", test_comment
)
def load_result_format(lab_test, template, prescription, invoice):
from erpnext.healthcare.doctype.lab_test.lab_test import load_result_format
load_result_format(lab_test, template, prescription, invoice)
change_test_loading(lab_test, template)
@frappe.whitelist()
def create_invoice(company, patient, lab_tests, prescriptions):
from erpnext.healthcare.doctype.lab_test.lab_test import create_invoice
si_name = create_invoice(company, patient, lab_tests, prescriptions)
test_ids = json.loads(lab_tests)
if test_ids:
si = frappe.get_doc("Sales Invoice", si_name)
si.patient = patient
find_item = _find_item(si.items)
for test_id in test_ids:
test = frappe.get_doc("Lab Test", test_id)
item_code = frappe.db.get_value("Lab Test Template", test.template, "item")
item = find_item(item_code)
item.reference_dt = "Lab Test"
item.reference_dn = test_id
item.lab_test_result_date = test.result_date
si.save()
return si_name
def _find_item(items):
def fn(item_code):
for item in items:
if item.item_code == item_code:
return item
return fn
@frappe.whitelist()
def link_invoice(lab_test, sales_invoice):
test_doc = frappe.get_doc("Lab Test", lab_test)
invoice_doc = frappe.get_doc("Sales Invoice", sales_invoice)
if test_doc.docstatus == 2 or invoice_doc.docstatus == 2:
frappe.throw("Cannot link cancelled documents.")
if test_doc.patient != invoice_doc.patient:
frappe.throw("Lab Test and Sales Invoice belong to different Patients.")
frappe.db.set_value("Lab Test", lab_test, "invoice", sales_invoice)
| 1.9375 | 2 |
build-flask-app.py | Abdur-rahmaanJ/build-flask-app | 1 | 17289 | #!/usr/bin/env python3
from scripts.workflow import get_app_name, is_name_valid
from scripts.workflow import get_args, is_args_valid
from scripts.workflow import create_dir, create_app, create_templates_folder, create_static_folder, create_dockerfile
from scripts.manual import print_manual
from scripts.messages import empty_name, success_msg, failure_msg
import sys
app_name = get_app_name()
args = get_args()
args.remove(app_name)
# validate name of app!!
if (is_name_valid(app_name)):
# validate all arguments first!!
if(is_args_valid(args)):
# Create folder named app_name
create_dir(app_name)
# Arguments
debugger_mode = False
import_css_js = False
use_docker = False
if '-d' in args or '--debugger' in args:
debugger_mode = True
print("- Debugger mode on")
print(" |__ added debug=True")
else:
print("- Debugger mode off")
if '-cj' in args or '--css-js' in args:
import_css_js = True
create_static_folder(app_name)
print("- Css and Js mode on")
print(" |__ import static/stylesheet/style.css")
print(" |__ import static/js/app.css")
else:
print("- Css and Js mode off")
if '-dc' in args or '--docker-container' in args:
use_docker = True
print("- Docker mode on")
print(' |__ cd %s' % app_name)
print(' |__ \"docker-compose up -d\" to start app')
else:
print("- Docker mode off")
# create templates folder to hold index.html
create_templates_folder(app_name, import_css_js)
# create app.py in root directory(app_name)
create_app(app_name, debugger_mode)
# move application to docker container;
if (use_docker):
# generate Dockerfile
create_dockerfile(app_name)
success_msg(app_name)
else:
print('Unknown argument detected! Please check the help section\n')
print_manual()
failure_msg(app_name)
else:
if (app_name == '-h' or app_name == '--help'):
print_manual()
else:
print('Please choose another app name')
failure_msg(app_name) | 2.40625 | 2 |
stacker/assembler.py | unrahul/stacker | 0 | 17290 | import os
from pathlib import Path
from jinja2 import Template
import parser
from utils import write_to_file
from utils import mkdir_p
parser.init()
# parse and assign to vars
spec = parser.spec
def _concat(slice: str) -> str:
"""helper to concatenate each template slice."""
return "{}\n".format(slice)
def slices_filename_content_hash() -> dict:
"""create a dict of filename: content for slices"""
docker_slices = {}
path = Path.cwd().joinpath(
os.path.join(os.path.dirname(os.path.realpath(__file__)), "slices")
)
for file in path.iterdir():
docker_slices[file.name] = file.read_text()
return docker_slices
def concat_slices(component: str = "tensorflow", flavor: str = "mkl") -> str:
"""concatenate templates based on the what user want"""
docker_slices = slices_filename_content_hash()
names = ["os.dockerfile"]
dockerfile = ""
if component == "tensorflow" and flavor == "mkl":
names.append("tensorflow.dockerfile")
names.append("horovod.dockerfile")
if component == "pytorch" and flavor == "mkl":
names.append("pytorch.dockerfile")
names.append("horovod.dockerfile")
for name in names:
dockerfile += _concat(docker_slices[name])
return "".join(dockerfile)
def insert_template_values(dockerfile: str, kwargs: dict):
dockerfile = Template(dockerfile)
dockerfile = dockerfile.render(**kwargs)
return dockerfile
def generate_dockerfile(os: str, framework: str, file_name: str = "Dockerfile"):
"""generate and write to dir dockerfiles per `os` and `framework`"""
dlrs = spec["stack"]["dlrs"]
os_version = dlrs[os]["version"]
pkgs = dlrs[os]["os_pkgs"]
tf_version = dlrs[os]["tensorflow"]["mkl"]["version"]
hvd_version = dlrs[os]["horovod"]["version"]
torch_version = dlrs[os]["pytorch"]["mkl"]["version"]
pkg_installer = "apt-get install -y" if os == "ubuntu" else "swupd bundle-add"
kwargs = {
"os": "{}:{}".format(os, os_version),
"pkg_install": "{} {}".format(pkg_installer, " ".join(pkgs)),
"tf_version": tf_version,
"hvd_version": hvd_version,
"torch_version": torch_version,
}
dockerfile_template = concat_slices(framework)
dockerfile = insert_template_values(dockerfile_template, kwargs)
write_to_file(file_name, dockerfile)
def generate_all_dockerfiles(generate: bool = True, build: bool = False) -> None:
"""generate all dockerfiles for all frameworks and OSes"""
if generate:
base_dir = "./dockerfiles"
for framework in ["pytorch", "tensorflow"]:
for _os in ["ubuntu", "clearlinux"]:
save_to_dir = mkdir_p(os.path.join(base_dir, _os, framework))
save_to_file = os.path.join(save_to_dir, "Dockerfile")
generate_dockerfile(_os, framework, save_to_file)
if build:
# TOOD(unrahul) build the dockerfiles
pass
| 2.90625 | 3 |
akshare/fx/cons.py | PKUuu/akshare | 1 | 17291 | <filename>akshare/fx/cons.py
# -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Author: <NAME>
date: 2019/10/20 10:58
contact: <EMAIL>
desc: 外汇配置文件
"""
# headers
SHORT_HEADERS = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.91 Safari/537.36'
}
# url
FX_SPOT_URL = "http://www.chinamoney.com.cn/r/cms/www/chinamoney/data/fx/rfx-sp-quot.json"
FX_SWAP_URL = "http://www.chinamoney.com.cn/r/cms/www/chinamoney/data/fx/rfx-sw-quot.json"
FX_PAIR_URL = "http://www.chinamoney.com.cn/r/cms/www/chinamoney/data/fx/cpair-quot.json"
# payload
SPOT_PAYLOAD = {
"t": {}
}
| 1.71875 | 2 |
entropylab/tests/test_issue_204.py | qguyk/entropy | 0 | 17292 | import os
from datetime import datetime
import pytest
from entropylab import ExperimentResources, SqlAlchemyDB, PyNode, Graph
@pytest.mark.skipif(
datetime.utcnow() > datetime(2022, 6, 25),
reason="Please remove after two months have passed since the fix was merged",
)
def test_issue_204(initialized_project_dir_path, capsys):
# arrange
# remove DB files because when they are present, issue does not occur
db_files = [".entropy/params.db", ".entropy/entropy.db", ".entropy/entropy.hdf5"]
for file in db_files:
full_path = os.path.join(initialized_project_dir_path, file)
if os.path.exists(full_path):
os.remove(full_path)
# experiment to run
experiment_resources = ExperimentResources(
SqlAlchemyDB(initialized_project_dir_path)
)
def root_node():
print("root node")
# error that should be logged to stderr:
print(a)
return {}
node0 = PyNode(label="root_node", program=root_node)
experiment = Graph(resources=experiment_resources, graph={node0}, story="run_a")
# act
try:
experiment.run()
except RuntimeError:
pass
# assert
captured = capsys.readouterr()
assert "message: name 'a' is not defined" in captured.err
| 2.4375 | 2 |
src/__init__.py | PY-GZKY/fconversion | 1 | 17293 | <reponame>PY-GZKY/fconversion
from .file_core import FileEngine
from src.utils.utils import *
from .version import __version__
| 0.902344 | 1 |
app/app/calc.py | benning55/recipe-app-api | 0 | 17294 | #
# def add(x, y):
# """
# Add Number Together
# """
# return x+y
#
#
# def subtract(x, y):
# """
# Subtract x from y
# """
# return x-y
| 3.90625 | 4 |
open_anafi/lib/indicator_tools.py | Cour-des-comptes/open-anafi-backend | 7 | 17295 | <gh_stars>1-10
from open_anafi.models import Indicator, IndicatorParameter, IndicatorLibelle
from open_anafi.serializers import IndicatorSerializer
from .frame_tools import FrameTools
from open_anafi.lib import parsing_tools
from open_anafi.lib.ply.parsing_classes import Indic
import re
from django.db import transaction
from django.core.exceptions import ObjectDoesNotExist
class IndicatorTools:
@staticmethod
def calculate_max_depth(indicator):
"""Calculates the depth of an indicator (the max depth of all its parameters)
:param indicator: The indicator to evaluate
:type indicator: class:`open_anafi.models.Indicator`
"""
depth = 0
for parameter in indicator.parameters.all():
if parameter.depth > depth:
depth = parameter.depth
indicator.max_depth = depth
indicator.save()
@staticmethod
def update_depth(indicator):
"""Updates the depth of an indicator after an update.
Recursively updates all the affected indicators/frames
:param indicator: The indicator to evaluate
:type indicator: class:`open_anafi.models.Indicator`
"""
parameters = IndicatorParameter.objects.filter(original_equation__contains=indicator.name)
indicators_to_update = list(set([param.indicator for param in parameters]))
frames_to_update = list(indicator.frames.all())
if len(indicators_to_update) > 0:
for indic in indicators_to_update:
for frame in indic.frames.all(): frames_to_update.append(frame)
# For each indicator, we update the depth of all the parameters, then we calculate the max depth of the indicator
for param in indic.parameters.all(): IndicatorParameterTools.calculate_depth(param)
IndicatorTools.calculate_max_depth(indic)
for indic in indicators_to_update: IndicatorTools.update_depth(indic)
# We update the depth of the frames
frames_to_update = list(set(frames_to_update))
if len(frames_to_update) > 0:
for frame in frames_to_update: FrameTools.calculate_depth(frame)
#This method can be optimized
@staticmethod
def update_indicator(equation, description, id, libelle=None):
"""Update an indicator.
Note that we cannot modify the indicator's name.
:param equation: The updated equation (updated or not)
:type equation: str
:param description: The updated description
:type description: str
:param id: The indicator's id
:type id: int
:param libelle: An extra libelle for the indicator
:type libelle: str
:return: The updated indicator
:rtype: class:`open_anafi.models.Indicator`
"""
indic = Indicator.objects.get(id=id)
if libelle is not None:
indicator_libelle = IndicatorLibelle.objects.filter(indicator=indic)
if len(indicator_libelle) > 1:
raise Exception('Cet indicateur possède plusieurs libellés')
elif len(indicator_libelle) == 0:
indicator_libelle = IndicatorLibelle.objects.create(libelle=libelle, indicator=indic)
indicator_libelle.save()
else:
indicator_libelle = indicator_libelle[0]
indicator_libelle.libelle = libelle
indicator_libelle.save()
if description is not None :
with transaction.atomic():
indic.description = description
indic.save()
if equation is not None:
#
with transaction.atomic():
backup_indicator = IndicatorSerializer(indic).data
old_params = IndicatorParameter.objects.filter(indicator=indic)
old_params_ids = [ p.id for p in old_params].copy()
if len(backup_indicator.get('libelles')) > 1:
raise Exception('Cet indicateur possède plusieurs libellés')
parsing_tools.update_formula(equation, indic)
for parameter in IndicatorParameter.objects.filter(id__in=old_params_ids):
parameter.delete()
indic = Indicator.objects.get(name=backup_indicator.get('name'))
indic.save()
IndicatorTools.update_depth(indic)
return indic.name
@staticmethod
def check_equation_element(element):
if type(element) is Indic:
try:
Indicator.objects.get(name=element.name)
except ObjectDoesNotExist:
raise Exception(f"L'indicateur {element.name} n'existe pas.")
@staticmethod
def check_equation(equation):
try:
parsed_indicator = parsing_tools.parse_equation(equation)
for eq in parsed_indicator:
if type(eq['tree']) is tuple:
for element in eq['tree']:
IndicatorTools.check_equation_element(element)
else:
IndicatorTools.check_equation_element(eq['tree'])
except Exception as e:
raise Exception(f"Erreur dans la formule : {str(e)}")
@staticmethod
def check_indicator_usages_in_formulas(indicator):
"""
Checks if an indicator is part of a formula of any other indicator.
Used to check if an indicator is safe to remove.
:param indicator: The indicator to check
:type indicator: :class:`open_anafi.models.Indicator`
"""
result = [indicator_parameter.indicator.name for indicator_parameter in
IndicatorParameter.objects.filter(original_equation__icontains=indicator.name)]
return result
class IndicatorParameterTools:
@staticmethod
def calculate_depth(indicator_parameter):
"""Calculates the depth of an indicator parameter,
given that all the indicators present in its equation already exist and have the correct depth.
:param indicator_parameter: The indicator parameter to evaluate
:type indicator_parameter: class:`open_anafi.models.IndicatorParameter`
"""
depth = 0
indicators = IndicatorParameterTools.extract_indicators_from_equation(indicator_parameter.original_equation)
if len(indicators) == 0:
indicator_parameter.depth = 1
indicator_parameter.save()
for indicator in indicators:
if indicator.max_depth > depth:
depth = indicator.max_depth
indicator_parameter.depth = depth + 1
indicator_parameter.save()
@staticmethod
def extract_indicators_from_equation(equation):
"""Retrieves all the indicator objects contained in a equation
:param equation: An equation according to the defined language
:type equation: str
:return: The list of all the indicator objects present in the equation
:rtype: list of class:`open_anafi.models.Indicator`
"""
exp = re.compile('[\-+/*^(\[)\]]')
is_indicator = re.compile('[A-Z0-9]+(_[A-Z0-9]+)+')
split_equation = list(filter(None, map(str.strip, exp.split(equation))))
indicators = []
for item in split_equation:
if not is_indicator.match(item) : continue
try:
indic = Indicator.objects.get(name = item)
indicators.append(indic)
except ObjectDoesNotExist:
raise Exception(f"L'indicateur {item} n'existe pas.")
return indicators
| 2.09375 | 2 |
src/python/commands/LikeImpl.py | plewis/phycas | 3 | 17296 | <filename>src/python/commands/LikeImpl.py
import os,sys,math,random
from phycas import *
from MCMCManager import LikelihoodCore
from phycas.utilities.PhycasCommand import *
from phycas.readnexus import NexusReader
from phycas.utilities.CommonFunctions import CommonFunctions
class LikeImpl(CommonFunctions):
#---+----|----+----|----+----|----+----|----+----|----+----|----+----|
"""
To be written.
"""
def __init__(self, opts):
#---+----|----+----|----+----|----+----|----+----|----+----|----+----|
"""
Initializes the LikeImpl object by assigning supplied phycas object
to a data member variable.
"""
CommonFunctions.__init__(self, opts)
self.starting_tree = None
self.taxon_labels = None
self.data_matrix = None
self.ntax = None
self.nchar = None
self.reader = NexusReader()
self.npatterns = [] # Will hold the actual number of patterns for each subset after data file has been read
def _loadData(self, matrix):
self.data_matrix = matrix
if matrix is None:
self.taxon_labels = []
self.ntax = 0
self.nchar = 0 # used for Gelfand-Ghosh simulations only
else:
self.taxon_labels = matrix.taxa
self.ntax = self.data_matrix.getNTax()
self.nchar = self.data_matrix.getNChar() # used for Gelfand-Ghosh simulations only
self.phycassert(len(self.taxon_labels) == self.ntax, "Number of taxon labels does not match number of taxa.")
def getStartingTree(self):
if self.starting_tree is None:
try:
tr_source = self.opts.tree_source
tr_source.setActiveTaxonLabels(self.taxon_labels)
i = iter(tr_source)
self.starting_tree = i.next()
except:
self.stdout.error("A tree could not be obtained from the tree_source")
raise
return self.starting_tree
def run(self):
#---+----|----+----|----+----|----+----|----+----|----+----|----+----|
"""
Computes the log-likelihood based on the current tree and current
model.
"""
ds = self.opts.data_source
mat = ds and ds.getMatrix() or None
self.phycassert(self.opts.data_source is not None, "specify data_source before calling like()")
self._loadData(mat)
self.starting_tree = self.getStartingTree()
if self.opts.preorder_edgelens is not None:
self.starting_tree.replaceEdgeLens(self.opts.preorder_edgelens)
print '@@@@@@@@@@ self.starting_tree.makeNewick() =',self.starting_tree.makeNewick()
core = LikelihoodCore(self)
core.setupCore()
core.prepareForLikelihood()
if self.opts.store_site_likes:
core.likelihood.storeSiteLikelihoods(True)
self.opts.pattern_counts = None
self.opts.char_to_pattern = None
self.opts.site_likes = None
self.opts.site_uf = None
else:
core.likelihood.storeSiteLikelihoods(False)
lnL = core.calcLnLikelihood()
if self.opts.store_site_likes:
self.opts.pattern_counts = core.likelihood.getPatternCounts()
self.opts.char_to_pattern = core.likelihood.getCharIndexToPatternIndex()
self.opts.site_likes = core.likelihood.getSiteLikelihoods()
self.opts.site_uf = core.likelihood.getSiteUF()
return lnL
| 2.28125 | 2 |
app/models.py | dangger/awesome-flask-todo | 0 | 17297 | <reponame>dangger/awesome-flask-todo<gh_stars>0
from app import db
import datetime
from flask_mongoengine.wtf import model_form
class Todo(db.Document):
content = db.StringField(required=True, max_length=20)
time = db.DateTimeField(default=datetime.datetime.now())
status = db.IntField(default=0)
TodoForm = model_form(Todo)
| 2.203125 | 2 |
Python/Mundo01/teste/teste2.py | eStev4m/CursoPython | 0 | 17298 | dia = int(input('Dia = '))
mes = str(input('Mês = '))
ano = int(input('Ano = '))
print('Você nasceu no dia {} de {} de {}. Correto?' .format(dia, mes, ano))
| 3.84375 | 4 |
firebirdsql/services.py | dand-oss/pyfirebirdsql | 31 | 17299 | <filename>firebirdsql/services.py<gh_stars>10-100
##############################################################################
# Copyright (c) 2009-2021, <NAME><<EMAIL>>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Python DB-API 2.0 module for Firebird.
##############################################################################
from firebirdsql.consts import * # noqa
from firebirdsql.utils import * # noqa
from firebirdsql.fbcore import Connection
class Services(Connection):
def sweep(self, database_name, callback=None):
spb = bs([isc_spb_rpr_validate_db | isc_spb_rpr_sweep_db])
s = self.str_to_bytes(database_name)
spb += bs([isc_spb_dbname]) + int_to_bytes(len(s), 2) + s
optionMask = 0
optionMask |= 0x02
spb += bs([isc_spb_options]) + int_to_bytes(optionMask, 4)
self._op_service_start(spb)
(h, oid, buf) = self._op_response()
self.svc_handle = h
while True:
self._op_service_info(bs([0x02]), bs([0x3e]))
(h, oid, buf) = self._op_response()
if buf[:4] == bs([0x3e, 0x00, 0x00, 0x01]):
break
if callback:
ln = bytes_to_int(buf[1:3])
callback(self.bytes_to_str(buf[3:3+ln]))
def bringOnline(self, database_name, callback=None):
spb = bs([isc_action_svc_properties])
s = self.str_to_bytes(database_name)
spb += bs([isc_spb_dbname]) + int_to_bytes(len(s), 2) + s
optionMask = 0
optionMask |= 0x0200
spb += bs([isc_spb_options]) + int_to_bytes(optionMask, 4)
self._op_service_start(spb)
(h, oid, buf) = self._op_response()
self.svc_handle = h
while True:
self._op_service_info(bs([0x02]), bs([0x3e]))
(h, oid, buf) = self._op_response()
if buf[:4] == bs([0x3e, 0x00, 0x00, 0x01]):
break
if callback:
ln = bytes_to_int(buf[1:3])
callback(self.bytes_to_str(buf[3:3+ln]))
def shutdown(
self, database_name, timeout=0, shutForce=True,
shutDenyNewAttachments=False, shutDenyNewTransactions=False,
callback=None
):
spb = bs([isc_action_svc_properties])
s = self.str_to_bytes(database_name)
spb += bs([isc_spb_dbname]) + int_to_bytes(len(s), 2) + s
if shutForce:
spb += bs([isc_spb_prp_shutdown_db]) + int_to_bytes(timeout, 4)
if shutDenyNewAttachments:
spb += bs([isc_spb_prp_deny_new_attachments]) + int_to_bytes(timeout, 4)
if shutDenyNewTransactions:
spb += bs([isc_spb_prp_deny_new_transactions]) + int_to_bytes(timeout, 4)
self._op_service_start(spb)
(h, oid, buf) = self._op_response()
self.svc_handle = h
while True:
self._op_service_info(bs([0x02]), bs([0x3e]))
(h, oid, buf) = self._op_response()
if buf[:4] == bs([0x3e, 0x00, 0x00, 0x01]):
break
if callback:
ln = bytes_to_int(buf[1:3])
callback(self.bytes_to_str(buf[3:3+ln]))
def repair(
self, database_name,
readOnlyValidation=True, ignoreChecksums=False,
killUnavailableShadows=False, mendDatabase=False,
validateDatabase=False, validateRecordFragments=False, callback=None
):
spb = bs([isc_action_svc_repair])
s = self.str_to_bytes(database_name)
spb += bs([isc_spb_dbname]) + int_to_bytes(len(s), 2) + s
optionMask = 0
if readOnlyValidation:
optionMask |= isc_spb_rpr_check_db
if ignoreChecksums:
optionMask |= isc_spb_rpr_ignore_checksum
if killUnavailableShadows:
optionMask |= isc_spb_rpr_kill_shadows
if mendDatabase:
optionMask |= isc_spb_rpr_mend_db
if validateDatabase:
optionMask |= isc_spb_rpr_validate_db
if validateRecordFragments:
optionMask |= isc_spb_rpr_full
spb += bs([isc_spb_options]) + int_to_bytes(optionMask, 4)
self._op_service_start(spb)
(h, oid, buf) = self._op_response()
self.svc_handle = h
while True:
self._op_service_info(bs([0x02]), bs([0x3e]))
(h, oid, buf) = self._op_response()
if buf[:4] == bs([0x3e, 0x00, 0x00, 0x01]):
break
if callback:
ln = bytes_to_int(buf[1:3])
callback(self.bytes_to_str(buf[3:3+ln]))
def backup_database(
self, database_name, backup_filename,
transportable=True, metadataOnly=False, garbageCollect=True,
ignoreLimboTransactions=False, ignoreChecksums=False,
convertExternalTablesToInternalTables=True, expand=False, callback=None
):
spb = bs([isc_action_svc_backup])
s = self.str_to_bytes(database_name)
spb += bs([isc_spb_dbname]) + int_to_bytes(len(s), 2) + s
s = self.str_to_bytes(backup_filename)
spb += bs([isc_spb_bkp_file]) + int_to_bytes(len(s), 2) + s
optionMask = 0
if ignoreChecksums:
optionMask |= isc_spb_bkp_ignore_checksums
if ignoreLimboTransactions:
optionMask |= isc_spb_bkp_ignore_limbo
if metadataOnly:
optionMask |= isc_spb_bkp_metadata_only
if not garbageCollect:
optionMask |= isc_spb_bkp_no_garbage_collect
if not transportable:
optionMask |= isc_spb_bkp_non_transportable
if convertExternalTablesToInternalTables:
optionMask |= isc_spb_bkp_convert
if expand:
optionMask |= isc_spb_bkp_expand
spb += bs([isc_spb_options]) + int_to_bytes(optionMask, 4)
if callback:
spb += bs([isc_spb_verbose])
self._op_service_start(spb)
(h, oid, buf) = self._op_response()
self.svc_handle = h
while True:
self._op_service_info(bs([0x02]), bs([0x3e]))
(h, oid, buf) = self._op_response()
if buf[:4] == bs([0x3e, 0x00, 0x00, 0x01]):
break
if callback:
ln = bytes_to_int(buf[1:3])
callback(self.bytes_to_str(buf[3:3+ln]))
def restore_database(
self, restore_filename, database_name,
replace=False, create=False, deactivateIndexes=False,
doNotRestoreShadows=False, doNotEnforceConstraints=False,
commitAfterEachTable=False, useAllPageSpace=False, pageSize=None,
cacheBuffers=None, callback=None
):
spb = bs([isc_action_svc_restore])
s = self.str_to_bytes(restore_filename)
spb += bs([isc_spb_bkp_file]) + int_to_bytes(len(s), 2) + s
s = self.str_to_bytes(database_name)
spb += bs([isc_spb_dbname]) + int_to_bytes(len(s), 2) + s
optionMask = 0
if replace:
optionMask |= isc_spb_res_replace
if create:
optionMask |= isc_spb_res_create
if deactivateIndexes:
optionMask |= isc_spb_res_deactivate_idx
if doNotRestoreShadows:
optionMask |= isc_spb_res_no_shadow
if doNotEnforceConstraints:
optionMask |= isc_spb_res_no_validity
if commitAfterEachTable:
optionMask |= isc_spb_res_one_at_a_time
if useAllPageSpace:
optionMask |= isc_spb_res_use_all_space
spb += bs([isc_spb_options]) + int_to_bytes(optionMask, 4)
if pageSize:
spb += bs([isc_spb_res_page_size]) + int_to_bytes(pageSize, 4)
if cacheBuffers:
spb += bs([isc_spb_res_buffers]) + int_to_bytes(cacheBuffers, 4)
if callback:
spb += bs([isc_spb_verbose])
self._op_service_start(spb)
(h, oid, buf) = self._op_response()
self.svc_handle = h
while True:
self._op_service_info(bs([0x02]), bs([0x3e]))
(h, oid, buf) = self._op_response()
if buf[:4] == bs([0x3e, 0x00, 0x00, 0x01]):
break
if callback:
ln = bytes_to_int(buf[1:3])
callback(self.bytes_to_str(buf[3:3+ln]))
def trace_start(self, name=None, cfg=None, callback=None):
spb = bs([isc_action_svc_trace_start])
if name:
s = self.str_to_bytes(name)
spb += bs([isc_spb_trc_name]) + int_to_bytes(len(s), 2) + s
if cfg:
s = self.str_to_bytes(cfg)
spb += bs([isc_spb_trc_cfg]) + int_to_bytes(len(s), 2) + s
self._op_service_start(spb)
(h, oid, buf) = self._op_response()
self.svc_handle = h
while True:
self._op_service_info(bs([0x02]), bs([0x3e]))
(h, oid, buf) = self._op_response()
if buf[:4] == bs([0x3e, 0x00, 0x00, 0x01]):
break
ln = bytes_to_int(buf[1:2])
if callback:
callback(self.bytes_to_str(buf[3:3+ln]))
def trace_stop(self, id, callback=None):
id = int(id)
spb = bs([isc_action_svc_trace_stop])
spb += bs([isc_spb_trc_id]) + int_to_bytes(id, 4)
self._op_service_start(spb)
(h, oid, buf) = self._op_response()
self.svc_handle = h
self._op_service_info(bs([0x02]), bs([0x3e]))
(h, oid, buf) = self._op_response()
ln = bytes_to_int(buf[1:2])
if callback:
callback(self.bytes_to_str(buf[3:3+ln]))
def trace_suspend(self, id, callback=None):
id = int(id)
spb = bs([isc_action_svc_trace_suspend])
spb += bs([isc_spb_trc_id]) + int_to_bytes(id, 4)
self._op_service_start(spb)
(h, oid, buf) = self._op_response()
self.svc_handle = h
self._op_service_info(bs([0x02]), bs([0x3e]))
(h, oid, buf) = self._op_response()
ln = bytes_to_int(buf[1:2])
if callback:
callback(self.bytes_to_str(buf[3:3+ln]))
def trace_resume(self, id, callback=None):
id = int(id)
spb = bs([isc_action_svc_trace_resume])
spb += bs([isc_spb_trc_id]) + int_to_bytes(id, 4)
self._op_service_start(spb)
(h, oid, buf) = self._op_response()
self.svc_handle = h
self._op_service_info(bs([0x02]), bs([0x3e]))
(h, oid, buf) = self._op_response()
ln = bytes_to_int(buf[1:2])
if callback:
callback(self.bytes_to_str(buf[3:3+ln]))
def trace_list(self, callback=None):
spb = bs([isc_action_svc_trace_list])
self._op_service_start(spb)
(h, oid, buf) = self._op_response()
self.svc_handle = h
while True:
self._op_service_info(bs([0x02]), bs([0x3e]))
(h, oid, buf) = self._op_response()
if buf[:4] == bs([0x3e, 0x00, 0x00, 0x01]):
break
ln = bytes_to_int(buf[1:2])
if callback:
callback(self.bytes_to_str(buf[3:3+ln]))
def _getIntegerVal(self, item_id):
self._op_service_info(bs([]), bs([item_id]))
(h, oid, buf) = self._op_response()
assert byte_to_int(buf[0]) == item_id
return byte_to_int(buf[1])
def _getStringVal(self, item_id):
self._op_service_info(bs([]), bs([item_id]))
(h, oid, buf) = self._op_response()
assert byte_to_int(buf[0]) == item_id
ln = bytes_to_int(buf[1:3])
return self.bytes_to_str(buf[3:3+ln])
def _getSvrDbInfo(self):
self._op_service_info(bs([]), bs([isc_info_svc_svr_db_info]))
(h, oid, buf) = self._op_response()
assert byte_to_int(buf[0]) == isc_info_svc_svr_db_info
db_names = []
i = 1
while i < len(buf) and byte_to_int(buf[i]) != isc_info_flag_end:
if byte_to_int(buf[i]) == isc_spb_num_att:
num_attach = bytes_to_int(buf[i+1:i+5])
i += 5
elif byte_to_int(buf[i]) == isc_spb_num_db:
bytes_to_int(buf[7:11]) # db_num
i += 5
elif byte_to_int(buf[i]) == isc_spb_dbname:
ln = bytes_to_int(buf[i+1:i+3])
db_name = self.bytes_to_str(buf[i+3:i+3+ln])
db_names.append(db_name)
i += 3 + ln
return (num_attach, db_names)
def _getLogLines(self, spb):
self._op_service_start(spb)
(h, oid, buf) = self._op_response()
self.svc_handle = h
logs = ''
while True:
self._op_service_info(bs([]), bs([0x3e]))
(h, oid, buf) = self._op_response()
if buf[:4] == bs([0x3e, 0x00, 0x00, 0x01]):
break
ln = bytes_to_int(buf[1:2])
logs += self.bytes_to_str(buf[3:3+ln]) + '\n'
return logs
def getServiceManagerVersion(self):
return self._getIntegerVal(isc_info_svc_version)
def getServerVersion(self):
return self._getStringVal(isc_info_svc_server_version)
def getArchitecture(self):
return self._getStringVal(isc_info_svc_implementation)
def getHomeDir(self):
return self._getStringVal(isc_info_svc_get_env)
def getSecurityDatabasePath(self):
return self._getStringVal(isc_info_svc_user_dbpath)
def getLockFileDir(self):
return self._getStringVal(isc_info_svc_get_env_lock)
def getCapabilityMask(self):
return self._getIntegerVal(isc_info_svc_capabilities)
def getMessageFileDir(self):
return self._getStringVal(isc_info_svc_get_env_msg)
def getConnectionCount(self):
return self._getSvrDbInfo()[0]
def getAttachedDatabaseNames(self):
return self._getSvrDbInfo()[1]
def getLog(self):
spb = bs([isc_action_svc_get_fb_log])
return self._getLogLines(spb)
def getStatistics(
self, dbname, showOnlyDatabaseLogPages=False,
showOnlyDatabaseHeaderPages=False,
showUserDataPages=True,
showUserIndexPages=True,
showSystemTablesAndIndexes=False
):
optionMask = 0
if showUserDataPages:
optionMask |= isc_spb_sts_data_pages
if showOnlyDatabaseLogPages:
optionMask |= isc_spb_sts_db_log
if showOnlyDatabaseHeaderPages:
optionMask |= isc_spb_sts_hdr_pages
if showUserIndexPages:
optionMask |= isc_spb_sts_idx_pages
if showSystemTablesAndIndexes:
optionMask |= isc_spb_sts_sys_relations
spb = bs([isc_spb_res_length])
s = self.str_to_bytes(dbname)
spb += bs([isc_spb_dbname]) + int_to_bytes(len(s), 2) + s
spb += bs([isc_spb_options]) + int_to_bytes(optionMask, 4)
return self._getLogLines(spb)
def connect(**kwargs):
kwargs['is_services'] = True
return Services(**kwargs)
| 1.234375 | 1 |