id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
404386
|
from utils import *
class MetaDataParser(object):
def __init__(self):
# sentiment files
train_sentiment_files = sorted(glob.glob('../../input/petfinder-adoption-prediction/train_sentiment/*.json'))
test_sentiment_files = sorted(glob.glob('../../input/petfinder-adoption-prediction/test_sentiment/*.json'))
sentiment_files = train_sentiment_files + test_sentiment_files
self.sentiment_files = pd.DataFrame(sentiment_files, columns=['sentiment_filename'])
self.sentiment_files['PetID'] = self.sentiment_files['sentiment_filename'].apply(
lambda x: x.split('/')[-1].split('.')[0])
# metadata files
train_metadata_files = sorted(glob.glob('../../input/petfinder-adoption-prediction/train_metadata/*.json'))
test_metadata_files = sorted(glob.glob('../../input/petfinder-adoption-prediction/test_metadata/*.json'))
metadata_files = train_metadata_files + test_metadata_files
self.metadata_files = pd.DataFrame(metadata_files, columns=['metadata_filename'])
self.metadata_files['PetID'] = self.metadata_files['metadata_filename'].apply(
lambda x: x.split('/')[-1].split('-')[0])
def open_json_file(self, filename):
with open(filename, 'r', encoding="utf-8") as f:
metadata_file = json.load(f)
return metadata_file
def get_stats(self, array, name):
stats = [np.mean, np.max, np.min, np.sum]
result = {}
if len(array):
for stat in stats:
result[name + '_' + stat.__name__] = stat(array)
else:
for stat in stats:
result[name + '_' + stat.__name__] = 0
return result
def parse_sentiment_file(self, file):
file_sentiment = file['documentSentiment']
file_entities = [x['name'] for x in file['entities']]
file_entities = ' '.join(file_entities)
file_sentences_text = [x['text']['content'] for x in file['sentences']]
file_sentences_text = ' '.join(file_sentences_text)
file_sentences_sentiment = [x['sentiment'] for x in file['sentences']]
file_sentences_sentiment = pd.DataFrame.from_dict(
file_sentences_sentiment, orient='columns').sum()
file_sentences_sentiment = file_sentences_sentiment.add_prefix('document_').to_dict()
file_sentiment.update(file_sentences_sentiment)
file_sentiment.update({"sentiment_text": file_sentences_text})
return pd.Series(file_sentiment)
def parse_metadata(self, file):
file_keys = list(file.keys())
if 'labelAnnotations' in file_keys:
label_annotations = file['labelAnnotations']
file_top_score = [x['score'] for x in label_annotations]
file_top_desc = [x['description'] for x in label_annotations]
dog_cat_scores = []
dog_cat_topics = []
is_dog_or_cat = []
for label in label_annotations:
if label['description'] == 'dog' or label['description'] == 'cat':
dog_cat_scores.append(label['score'])
dog_cat_topics.append(label['topicality'])
is_dog_or_cat.append(1)
else:
is_dog_or_cat.append(0)
else:
file_top_score = []
file_top_desc = []
dog_cat_scores = []
dog_cat_topics = []
is_dog_or_cat = []
if 'faceAnnotations' in file_keys:
file_face = file['faceAnnotations']
n_faces = len(file_face)
else:
n_faces = 0
if 'textAnnotations' in file_keys:
text_annotations = file['textAnnotations']
file_n_text_annotations = len(text_annotations)
file_len_text = [len(text['description']) for text in text_annotations]
else:
file_n_text_annotations = 0
file_len_text = []
file_colors = file['imagePropertiesAnnotation']['dominantColors']['colors']
file_crops = file['cropHintsAnnotation']['cropHints']
file_color_score = [x['score'] for x in file_colors]
file_color_pixelfrac = [x['pixelFraction'] for x in file_colors]
file_color_red = [x['color']['red'] if 'red' in x['color'].keys() else 0 for x in file_colors]
file_color_blue = [x['color']['blue'] if 'blue' in x['color'].keys() else 0 for x in file_colors]
file_color_green = [x['color']['green'] if 'green' in x['color'].keys() else 0 for x in file_colors]
file_crop_conf = np.mean([x['confidence'] for x in file_crops])
file_crop_x = np.mean([x['boundingPoly']['vertices'][1]['x'] for x in file_crops])
file_crop_y = np.mean([x['boundingPoly']['vertices'][3]['y'] for x in file_crops])
if 'importanceFraction' in file_crops[0].keys():
file_crop_importance = np.mean([x['importanceFraction'] for x in file_crops])
else:
file_crop_importance = 0
metadata = {
'annots_top_desc': ' '.join(file_top_desc),
'n_faces': n_faces,
'n_text_annotations': file_n_text_annotations,
'crop_conf': file_crop_conf,
'crop_x': file_crop_x,
'crop_y': file_crop_y,
'crop_importance': file_crop_importance,
}
metadata.update(self.get_stats(file_top_score, 'annots_score'))
metadata.update(self.get_stats(file_color_score, 'color_score'))
metadata.update(self.get_stats(file_color_pixelfrac, 'color_pixel_score'))
metadata.update(self.get_stats(file_color_red, 'color_red_score'))
metadata.update(self.get_stats(file_color_blue, 'color_blue_score'))
metadata.update(self.get_stats(file_color_green, 'color_green_score'))
metadata.update(self.get_stats(dog_cat_scores, 'dog_cat_scores'))
metadata.update(self.get_stats(dog_cat_topics, 'dog_cat_topics'))
metadata.update(self.get_stats(is_dog_or_cat, 'is_dog_or_cat'))
metadata.update(self.get_stats(file_len_text, 'len_text'))
return pd.Series(metadata)
def _transform(self, path, sentiment=True):
file = self.open_json_file(path)
if sentiment:
result = self.parse_sentiment_file(file)
else:
result = self.parse_metadata(file)
return result
def len_text_features(train):
train['Length_Description'] = train['Description'].map(len)
train['Length_annots_top_desc'] = train['annots_top_desc'].map(len)
train['Lengths_sentences_text'] = train['sentences_text'].map(len)
return train
with timer('merge additional files'):
train = merge_breed_name(train)
with timer('metadata'):
# TODO: parallelization
meta_parser = MetaDataParser()
sentiment_features = meta_parser.sentiment_files['sentiment_filename'].apply(
lambda x: meta_parser._transform(x, sentiment=True))
meta_parser.sentiment_files = pd.concat([meta_parser.sentiment_files, sentiment_features], axis=1, sort=False)
meta_features = meta_parser.metadata_files['metadata_filename'].apply(
lambda x: meta_parser._transform(x, sentiment=False))
meta_parser.metadata_files = pd.concat([meta_parser.metadata_files, meta_features], axis=1, sort=False)
stats = ['mean', 'min', 'max', 'median']
columns = [c for c in sentiment_features.columns if c != 'sentiment_text']
g = meta_parser.sentiment_files[list(sentiment_features.columns) + ['PetID']].groupby('PetID').agg(stats)
g.columns = [c + '_' + stat for c in columns for stat in stats]
train = train.merge(g, how='left', on='PetID')
columns = [c for c in meta_features.columns if c != 'annots_top_desc']
g = meta_parser.metadata_files[columns + ['PetID']].groupby('PetID').agg(stats)
g.columns = [c + '_' + stat for c in columns for stat in stats]
train = train.merge(g, how='left', on='PetID')
with timer('metadata, annots_top_desc'):
meta_features = meta_parser.metadata_files[['PetID', 'annots_top_desc']]
meta_features = meta_features.groupby('PetID')['annots_top_desc'].sum().reset_index()
train = train.merge(meta_features, how='left', on='PetID')
sentiment_features = meta_parser.sentiment_files[['PetID', 'sentiment_text']]
sentiment_features = sentiment_features.groupby('PetID')['sentiment_text'].sum().reset_index()
train = train.merge(sentiment_features, how='left', on='PetID')
train['desc'] = ''
for c in ['BreedName_main_breed', 'BreedName_second_breed', 'annots_top_desc', 'sentences_text']:
train['desc'] += ' ' + train[c].astype(str)
with timer('kernel text features'):
orig_cols = train.columns
train = len_text_features(train)
new_cols = [c for c in train.columns if c not in orig_cols]
train[new_cols].to_feather("../feature/kernel_text.feather")
|
404405
|
import random
from typing import Dict
from framework.data_generator.replay.replay_generator import ReplayDownloaderGenerator
from framework.replay.replay_format import GeneratedHit
class HitGenerator(ReplayDownloaderGenerator):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.hit_filter = None
self.hit_buffer = []
self.current_replay = None
def initialize(self, hit_filter:Dict=None, **kwargs):
super().initialize(**kwargs)
self.hit_filter = hit_filter
def has_next(self):
return len(self.hit_buffer) > 0 or super().has_next()
def filter_hit(self, hit):
for key, filter in self.hit_filter.items():
if getattr(hit, key) != filter:
return False
return True
def __get_next_hit(self):
while len(self.hit_buffer) > 0:
if self.hit_filter is not None:
next_hit = self.hit_buffer.pop()
if self.filter_hit(next_hit):
return GeneratedHit(next_hit, self.current_replay)
else:
return GeneratedHit(self.hit_buffer.pop(), self.current_replay)
self.current_replay = super()._next()
proto = self.current_replay.get_proto()
hits = proto.game_stats.hits
self.hit_buffer = [proto_hit for proto_hit in hits]
self.logger.info('%s hits in queue', len(self.hit_buffer))
if self.shuffle and len(self.hit_buffer) > 0:
random.shuffle(self.replays)
return self.__get_next_hit()
def _next(self) -> GeneratedHit:
return self.__get_next_hit()
if __name__ == "__main__":
# https://calculated.gg/api/v1/parsed/1097A28E46D0756EEB7820BFD31BE226.replay.pts?key=1
hit_creator = HitGenerator(max_pages=1)
hit_creator.initialize(buffer_size=10, parallel_threads=1, hit_filter={'goal': True})
count = 1
for hit in hit_creator.get_data():
print(str(count))
count += 1
|
404408
|
import scipy.io
import scipy.stats
import numpy as np
from EasyTL import EasyTL
import time
if __name__ == "__main__":
datadir = r"D:\Datasets\EasyTL\amazon_review"
str_domain = ["books", "dvd", "elec", "kitchen"]
list_acc = []
for i in range(len(str_domain)):
for j in range(len(str_domain)):
if i == j:
continue
print("{} - {}".format(str_domain[i], str_domain[j]))
mat1 = scipy.io.loadmat(datadir + "/{}_400.mat".format(str_domain[i]))
Xs = mat1["fts"]
Ys = mat1["labels"]
mat2 = scipy.io.loadmat(datadir + "/{}_400.mat".format(str_domain[j]))
Xt = mat2["fts"]
Yt = mat2["labels"]
Ys += 1
Yt += 1
Xs = Xs / np.tile(np.sum(Xs,axis=1).reshape(-1,1), [1, Xs.shape[1]])
Xs = scipy.stats.mstats.zscore(Xs);
Xt = Xt / np.tile(np.sum(Xt,axis=1).reshape(-1,1), [1, Xt.shape[1]])
Xt = scipy.stats.mstats.zscore(Xt);
Xs[np.isnan(Xs)] = 0
Xt[np.isnan(Xt)] = 0
t0 = time.time()
Acc1, _ = EasyTL(Xs,Ys,Xt,Yt,"raw")
t1 = time.time()
print("Time Elapsed: {:.2f} sec".format(t1 - t0))
Acc2, _ = EasyTL(Xs,Ys,Xt,Yt)
t2 = time.time()
print("Time Elapsed: {:.2f} sec".format(t2 - t1))
print('EasyTL(c) Acc: {:.1f} % || EasyTL Acc: {:.1f} %'.format(Acc1*100, Acc2*100))
list_acc.append([Acc1,Acc2])
acc = np.array(list_acc)
avg = np.mean(acc, axis=0)
print('EasyTL(c) AVG Acc: {:.1f} %'.format(avg[0]*100))
print('EasyTL AVG Acc: {:.1f} %'.format(avg[1]*100))
|
404434
|
from common_fixtures import * # NOQA
def test_create_k8s_container_no_k8s(context):
c = context.create_container(labels={
'io.kubernetes.pod.namespace': 'n',
'io.kubernetes.pod.name': 'p',
'io.kubernetes.container.name': 'POD',
})
c = context.client.wait_success(c)
assert c.state == 'running'
def test_create_k8s_container_no_k8s_fail(new_context, super_client):
client = new_context.client
c = new_context.create_container(labels={
'io.kubernetes.pod.namespace': 'n',
'io.kubernetes.pod.name': 'p',
'io.kubernetes.container.name': 'POD',
}, startOnCreate=False)
super_client.update(c.account(), orchestration='kubernetes')
c = client.wait_transitioning(c.start())
assert c.transitioning == 'error'
assert c.transitioningMessage == 'Failed to find labels provider'
|
404442
|
from core.providers.aws.boto3 import prepare_aws_client_with_given_cred
import boto3
def get_event_client(aws_auth_cred):
"""
Returns the client object for AWS Events
Args:
aws_auth (dict): Dict containing AWS credentials
Returns:
obj: AWS Cloudwatch Event Client Obj
"""
return prepare_aws_client_with_given_cred("events", aws_auth_cred)
def check_rule_exists(rule_name, aws_auth_cred):
"""
Check wheter the given cloudwatch rule already exists in AWS account
Args:
rule_name (str): Cloudwatch rule name
aws_auth (dict): Dict containing AWS credentials
Returns:
Boolean: True if env exists else False
"""
client = get_event_client(aws_auth_cred)
try:
response = client.describe_rule(Name=rule_name)
return True if response else False
except:
return False
def get_targets_of_a_rule(rule_name, aws_auth_cred):
"""
Returns the targets of the given cloudwatch rule
Args:
rule_name (str): Cloudwatch rule name
aws_auth (dict): Dict containing AWS credentials
Returns:
targets (list): List of all targets attached to a rule
"""
client = get_event_client(aws_auth_cred)
try:
response = client.list_targets_by_rule(
Rule=rule_name
)
except:
return []
return response['Targets']
def remove_all_targets_of_a_rule(rule_name, aws_auth_cred):
"""
Remove all targets of a rule
Args:
rule_name (str): Cloudwatch rule name
aws_auth (dict): Dict containing AWS credentials
"""
targets = get_targets_of_a_rule(rule_name, aws_auth_cred)
target_ids = [item['Id'] for item in targets]
if len(target_ids) > 0:
client = get_event_client(aws_auth_cred)
client.remove_targets(
Rule=rule_name,
Ids=target_ids,
Force=True
)
|
404474
|
import time
import cv2
import numpy as np
import glob
import os
import dlib
import argparse
imgMustache = cv2.imread("mustache.png", -1)
orig_mask = imgMustache[:,:,3]
orig_mask_inv = cv2.bitwise_not(orig_mask)
imgMustache = imgMustache[:,:,0:3]
origMustacheHeight, origMustacheWidth = imgMustache.shape[:2]
imgGlass = cv2.imread("glasses.png", -1)
orig_mask_g = imgGlass[:,:,3]
orig_mask_inv_g = cv2.bitwise_not(orig_mask_g)
imgGlass = imgGlass[:,:,0:3]
origGlassHeight, origGlassWidth = imgGlass.shape[:2]
predictor_path = "shape_predictor_68_face_landmarks.dat"
face_rec_model_path = "dlib_face_recognition_resnet_model_v1.dat"
cnn_face_detector = dlib.cnn_face_detection_model_v1("mmod_human_face_detector.dat")
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(predictor_path)
fourcc = cv2.VideoWriter_fourcc(*'MP4V')
def main(file, output, frame_rate=30):
if (file == "camera"):
video_capture = cv2.VideoCapture(0)
else:
video_capture = cv2.VideoCapture(file)
ret, frame = video_capture.read()
if (output != None):
out = cv2.VideoWriter(output,fourcc, frame_rate, (frame.shape[1], frame.shape[0]))
while ret:
dets = cnn_face_detector(frame, 1)
for k, d in enumerate(dets):
shape = predictor(frame, d.rect)
mustacheWidth = abs(3 * (shape.part(31).x - shape.part(35).x))
mustacheHeight = int(mustacheWidth * origMustacheHeight / origMustacheWidth) - 10
mustache = cv2.resize(imgMustache, (mustacheWidth,mustacheHeight), interpolation = cv2.INTER_AREA)
mask = cv2.resize(orig_mask, (mustacheWidth,mustacheHeight), interpolation = cv2.INTER_AREA)
mask_inv = cv2.resize(orig_mask_inv, (mustacheWidth,mustacheHeight), interpolation = cv2.INTER_AREA)
y1 = int(shape.part(33).y - (mustacheHeight/2)) + 10
y2 = int(y1 + mustacheHeight)
x1 = int(shape.part(51).x - (mustacheWidth/2))
x2 = int(x1 + mustacheWidth)
roi = frame[y1:y2, x1:x2]
roi_bg = cv2.bitwise_and(roi,roi,mask = mask_inv)
roi_fg = cv2.bitwise_and(mustache,mustache,mask = mask)
frame[y1:y2, x1:x2] = cv2.add(roi_bg, roi_fg)
glassWidth = abs(shape.part(16).x - shape.part(1).x)
glassHeight = int(glassWidth * origGlassHeight / origGlassWidth)
glass = cv2.resize(imgGlass, (glassWidth,glassHeight), interpolation = cv2.INTER_AREA)
mask = cv2.resize(orig_mask_g, (glassWidth,glassHeight), interpolation = cv2.INTER_AREA)
mask_inv = cv2.resize(orig_mask_inv_g, (glassWidth,glassHeight), interpolation = cv2.INTER_AREA)
y1 = int(shape.part(24).y)
y2 = int(y1 + glassHeight)
x1 = int(shape.part(27).x - (glassWidth/2))
x2 = int(x1 + glassWidth)
roi1 = frame[y1:y2, x1:x2]
roi_bg = cv2.bitwise_and(roi1,roi1,mask = mask_inv)
roi_fg = cv2.bitwise_and(glass,glass,mask = mask)
frame[y1:y2, x1:x2] = cv2.add(roi_bg, roi_fg)
#'''
if (output != None):
out.write(frame)
else:
cv2.imshow("", frame)
ret, frame = video_capture.read()
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Release handle to the webcam
if (output != None):
out.release()
video_capture.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--file", type=str, help="give video file for filter write camera if you want to use webcam", required=True)
parser.add_argument("-o", "--output", type=str, help="give output name for video in .mp4 format")
parser.add_argument("-fr", "--frame_rate", type=str, help="give video frame", default=30)
args = parser.parse_args()
file = args.file
output = args.output
frame_rate = args.frame_rate
main(file, output, frame_rate)
|
404481
|
from wingedsheep.carcassonne.objects.coordinate_with_side import CoordinateWithSide
class City:
def __init__(self, city_positions: [CoordinateWithSide], finished: bool):
self.city_positions = city_positions
self.finished = finished
|
404515
|
from bentoml import env, artifacts, api, BentoService
from bentoml.adapters import JsonInput
@env(infer_pip_packages=True)
class ScoreIdentifier(BentoService):
"""
A score identifier service that returns a score based on the given data
"""
@api(input=JsonInput())
def predict(self, parsed_json):
"""
An interface API that takes a JSON type input,
calculates the score based on the data given,
and returns it
:param parsed_json:
:return: score %
"""
medians = {'Commits': 410,
'Followers': 16,
'Repos': 28,
'Stars': 4,
'Forks': 2,
'Organizations': 1,
'Issues': 44,
'Contributions': 32}
score = 0
for key in parsed_json.keys():
if medians[key] < parsed_json[key]:
score += 1
score = (score / len(medians.keys())) * 100
return score
|
404544
|
from logging import getLogger
import numpy
from rdkit.Chem import rdMolDescriptors
from chainer_chemistry.dataset.preprocessors.common import MolFeatureExtractionError # NOQA
from chainer_chemistry.dataset.preprocessors.mol_preprocessor import MolPreprocessor # NOQA
class ECFPPreprocessor(MolPreprocessor):
def __init__(self, radius=2):
super(ECFPPreprocessor, self).__init__()
self.radius = radius
def get_input_features(self, mol):
try:
fp = rdMolDescriptors.GetMorganFingerprintAsBitVect(mol,
self.radius)
except Exception as e:
logger = getLogger(__name__)
logger.debug('exception caught at ECFPPreprocessor:', e)
# Extracting feature failed
raise MolFeatureExtractionError
# TODO(Nakago): Test it.
return numpy.asarray(fp, numpy.float32)
|
404599
|
import c3d
import struct
import unittest
import numpy as np
def genByteWordArr(word, shape):
''' Generate a multi-dimensional byte array from a specific word.
'''
arr = np.array(word)
for d in shape[::-1]:
arr = arr[np.newaxis].repeat(d, 0)
return arr, [len(word)] + [d for d in shape]
def genRndByteArr(wordlen, shape, pad):
''' Generate a multi-dimensional byte array with random data.
'''
tot_len = wordlen + pad*wordlen
arr = np.empty(shape, dtype=np.dtype('S'+str(tot_len)))
for i in np.ndindex(arr.shape):
bytes = np.random.randint(21, 126, wordlen).astype(np.uint8)
if pad:
bytes = np.hstack((bytes, np.array([b'255']*wordlen, dtype=np.uint8)))
arr[i] = bytes.tobytes()
return arr, [tot_len] + [d for d in shape]
def genRndFloatArr(shape, rnd, range=(-1e6, 1e6)):
''' Generate a multi-dimensional array of 32 bit floating point data.
'''
return rnd.uniform(range[0], range[1], shape)
class ParameterValueTest(unittest.TestCase):
''' Test read Parameter arrays
'''
RANGE_8_BIT = (-127, 127)
RANGE_16_BIT = (-1e4, 1e4)
RANGE_32_BIT = (-1e6, 1e6)
RANGE_8_UNSIGNED_BIT = (0, 255)
RANGE_16_UNSIGNED_BIT = (0, 1e4)
RANGE_32_UNSIGNED_BIT = (0, 1e6)
TEST_ITERATIONS = 1000
def setUp(self):
self.rnd = np.random.default_rng()
self.dtypes = c3d.DataTypes(c3d.PROCESSOR_INTEL)
def test_a_param_float32(self):
''' Verify a single 32 bit floating point value is parsed correctly
'''
for i in range(ParameterValueTest.TEST_ITERATIONS):
value = np.float32(self.rnd.uniform(*ParameterValueTest.RANGE_32_BIT))
bytes = struct.pack('<f', value)
P = c3d.Param('FLOAT_TEST', self.dtypes, bytes_per_element=4, dimensions=[1], bytes=bytes)
value_out = P.float_value
assert value == value_out, 'Parameter float was not read correctly. Was %f, expected %f' %\
(value_out, value)
def test_b_param_int32(self):
''' Verify a single 32 bit integer value is parsed correctly
'''
for i in range(ParameterValueTest.TEST_ITERATIONS):
value = np.int32(self.rnd.uniform(*ParameterValueTest.RANGE_32_BIT))
bytes = struct.pack('<i', value)
P = c3d.Param('INT32_TEST', self.dtypes, bytes_per_element=4, dimensions=[1], bytes=bytes)
value_out = P.int32_value
assert value == value_out, 'Parameter int32 was not read correctly. Was %f, expected %f' %\
(value_out, value)
def test_b_param_uint32(self):
''' Verify a single 32 bit unsigned integer value is parsed correctly
'''
for i in range(ParameterValueTest.TEST_ITERATIONS):
value = np.uint32(self.rnd.uniform(*ParameterValueTest.RANGE_32_UNSIGNED_BIT))
bytes = struct.pack('<I', value)
P = c3d.Param('UINT32_TEST', self.dtypes, bytes_per_element=4, dimensions=[1], bytes=bytes)
value_out = P.int32_value
assert value == value_out, 'Parameter uint32 was not read correctly. Was %f, expected %f' %\
(value_out, value)
def test_b_param_int16(self):
''' Verify a single 16 bit integer value is parsed correctly
'''
for i in range(ParameterValueTest.TEST_ITERATIONS):
value = np.int16(self.rnd.uniform(*ParameterValueTest.RANGE_16_BIT))
bytes = struct.pack('<h', value)
P = c3d.Param('INT16_TEST', self.dtypes, bytes_per_element=2, dimensions=[1], bytes=bytes)
value_out = P.int16_value
assert value == value_out, 'Parameter int16 was not read correctly. Was %f, expected %f' %\
(value_out, value)
def test_b_param_uint16(self):
''' Verify a single 16 bit unsigned integer value is parsed correctly
'''
for i in range(ParameterValueTest.TEST_ITERATIONS):
value = np.uint16(self.rnd.uniform(*ParameterValueTest.RANGE_16_UNSIGNED_BIT))
bytes = struct.pack('<H', value)
P = c3d.Param('UINT16_TEST', self.dtypes, bytes_per_element=2, dimensions=[1], bytes=bytes)
value_out = P.uint16_value
assert value == value_out, 'Parameter uint16 was not read correctly. Was %f, expected %f' %\
(value_out, value)
def test_b_param_int8(self):
''' Verify a single 8 bit integer value is parsed correctly
'''
for i in range(ParameterValueTest.TEST_ITERATIONS):
value = np.int8(self.rnd.uniform(*ParameterValueTest.RANGE_8_BIT))
bytes = struct.pack('<b', value)
P = c3d.Param('INT8_TEST', self.dtypes, bytes_per_element=1, dimensions=[1], bytes=bytes)
value_out = P.int8_value
assert value == value_out, 'Parameter int8 was not read correctly. Was %f, expected %f' %\
(value_out, value)
def test_b_param_uint8(self):
''' Verify a single 8 bit unsigned integer value is parsed correctly
'''
for i in range(ParameterValueTest.TEST_ITERATIONS):
value = np.uint8(self.rnd.uniform(*ParameterValueTest.RANGE_8_UNSIGNED_BIT))
bytes = struct.pack('<B', value)
P = c3d.Param('UINT8_TEST', self.dtypes, bytes_per_element=1, dimensions=[1], bytes=bytes)
value_out = P.uint8_value
assert value == value_out, 'Parameter uint8 was not read correctly. Was %f, expected %f' %\
(value_out, value)
class ParameterArrayTest(unittest.TestCase):
''' Test read Parameter arrays
'''
SHAPES = [[7, 6, 5], [7, 5, 3], [7, 3], [19]]
def setUp(self):
self.rnd = np.random.default_rng()
self.dtypes = c3d.DataTypes(c3d.PROCESSOR_INTEL)
def test_a_parse_float32_array(self):
''' Verify array of 32 bit floating point values are parsed correctly
'''
flt_range = (-1e6, 1e6)
for shape in ParameterArrayTest.SHAPES:
arr = self.rnd.uniform(flt_range[0], flt_range[1], size=shape).astype(np.float32)
P = c3d.Param('FLOAT_TEST', self.dtypes, bytes_per_element=4, dimensions=arr.shape, bytes=arr.T.tobytes())
arr_out = P.float_array
assert arr.T.shape == arr_out.shape, "Mismatch in 'float_array' converted shape"
assert np.all(arr.T == arr_out), 'Value mismatch when reading float array'
def test_b_parse_int32_array(self):
''' Verify array of 32 bit integer values are parsed correctly
'''
flt_range = (-1e6, 1e6)
for shape in ParameterArrayTest.SHAPES:
arr = self.rnd.uniform(flt_range[0], flt_range[1], size=shape).astype(np.int32)
P = c3d.Param('INT32_TEST', self.dtypes, bytes_per_element=4, dimensions=arr.shape, bytes=arr.T.tobytes())
arr_out = P.int32_array
assert arr.T.shape == arr_out.shape, "Mismatch in 'int32_array' converted shape"
assert np.all(arr.T == arr_out), 'Value mismatch when reading int32 array'
def test_c_parse_uint32_array(self):
''' Verify array of 32 bit unsigned integer values are parsed correctly
'''
flt_range = (0, 1e6)
for shape in ParameterArrayTest.SHAPES:
arr = self.rnd.uniform(flt_range[0], flt_range[1], size=shape).astype(np.uint32)
P = c3d.Param('UINT32_TEST', self.dtypes, bytes_per_element=4, dimensions=arr.shape, bytes=arr.T.tobytes())
arr_out = P.uint32_array
assert arr.T.shape == arr_out.shape, "Mismatch in 'uint32_array' converted shape"
assert np.all(arr.T == arr_out), 'Value mismatch when reading uint32 array'
def test_d_parse_int16_array(self):
''' Verify array of 16 bit integer values are parsed correctly
'''
flt_range = (-1e4, 1e4)
for shape in ParameterArrayTest.SHAPES:
arr = self.rnd.uniform(flt_range[0], flt_range[1], size=shape).astype(np.int16)
P = c3d.Param('INT16_TEST', self.dtypes, bytes_per_element=2, dimensions=arr.shape, bytes=arr.T.tobytes())
arr_out = P.int16_array
assert arr.T.shape == arr_out.shape, "Mismatch in 'int32_array' converted shape"
assert np.all(arr.T == arr_out), 'Value mismatch when reading int32 array'
def test_e_parse_uint16_array(self):
''' Verify array of 16 bit unsigned integer values are parsed correctly
'''
flt_range = (0, 1e4)
for shape in ParameterArrayTest.SHAPES:
arr = self.rnd.uniform(flt_range[0], flt_range[1], size=shape).astype(np.uint16)
P = c3d.Param('UINT16_TEST', self.dtypes, bytes_per_element=2, dimensions=arr.shape, bytes=arr.T.tobytes())
arr_out = P.uint16_array
assert arr.T.shape == arr_out.shape, "Mismatch in 'uint32_array' converted shape"
assert np.all(arr.T == arr_out), 'Value mismatch when reading uint32 array'
def test_e_parse_int8_array(self):
''' Verify array of 8 bit integer values are parsed correctly
'''
flt_range = (-127, 127)
for shape in ParameterArrayTest.SHAPES:
arr = self.rnd.uniform(flt_range[0], flt_range[1], size=shape).astype(np.int8)
P = c3d.Param('INT8_TEST', self.dtypes, bytes_per_element=1, dimensions=arr.shape, bytes=arr.T.tobytes())
arr_out = P.int8_array
assert arr.T.shape == arr_out.shape, "Mismatch in 'int32_array' converted shape"
assert np.all(arr.T == arr_out), 'Value mismatch when reading int32 array'
def test_f_parse_uint8_array(self):
''' Verify array of 8 bit unsigned integer values are parsed correctly
'''
flt_range = (0, 255)
for shape in ParameterArrayTest.SHAPES:
arr = self.rnd.uniform(flt_range[0], flt_range[1], size=shape).astype(np.uint8)
P = c3d.Param('UINT8_TEST', self.dtypes, bytes_per_element=1, dimensions=arr.shape, bytes=arr.T.tobytes())
arr_out = P.uint8_array
assert arr.T.shape == arr_out.shape, "Mismatch in 'uint32_array' converted shape"
assert np.all(arr.T == arr_out), 'Value mismatch when reading uint32 array'
def test_g_parse_byte_array(self):
''' Verify byte arrays are parsed correctly
'''
word = b'WRIST'
# 1 dims
arr = np.array(word).repeat(3).repeat(3).repeat(3)
P = c3d.Param('BYTE_TEST', self.dtypes, bytes_per_element=1, dimensions=arr.shape, bytes=arr.T.tobytes())
arr_out = P.bytes_array
assert arr.shape[1:] == arr_out.shape, "Mismatch in 'bytes_array' converted shape"
assert np.all(arr.tobytes() == arr_out), 'Mismatch in reading single dimensional byte array'
# 4 dims
arr, shape = genByteWordArr(word, [5, 4, 3])
P = c3d.Param('BYTE_TEST', self.dtypes, bytes_per_element=1, dimensions=shape, bytes=arr.T.tobytes())
arr_out = P.bytes_array
assert arr.T.shape == arr_out.shape, "Mismatch in 'bytes_array' converted shape. Was %s, expected %s" %\
(str(arr_out.shape), str(arr.T.shape))
for i in np.ndindex(arr_out.shape):
assert np.all(arr[i[::-1]] == arr_out[i]), "Mismatch in 'bytes_array' converted value at index %s" % str(i)
# 5 dims
arr, shape = genByteWordArr(word, [6, 5, 4, 3])
P = c3d.Param('BYTE_TEST', self.dtypes, bytes_per_element=1, dimensions=shape, bytes=arr.T.tobytes())
arr_out = P.bytes_array
assert arr.T.shape == arr_out.shape, "Mismatch in 'bytes_array' converted shape. Was %s, expected %s" %\
(str(arr_out.shape), str(arr.T.shape))
for i in np.ndindex(arr_out.shape):
assert np.all(arr[i[::-1]] == arr_out[i]), "Mismatch in 'bytes_array' converted value at index %s" % str(i)
def test_h_parse_string_array(self):
''' Verify repeated word arrays are parsed correctly
'''
word = b'ANCLE'
# 3 dims
arr, shape = genByteWordArr(word, [7, 3])
P = c3d.Param('STRING_TEST', self.dtypes, bytes_per_element=-1, dimensions=shape, bytes=arr.T.tobytes())
arr_out = P.string_array
assert arr.T.shape == arr_out.shape, "Mismatch in 'string_array' converted shape. Was %s, expected %s" %\
(str(arr_out.shape), str(arr.T.shape))
for i in np.ndindex(arr_out.shape):
assert self.dtypes.decode_string(arr[i[::-1]]) == arr_out[i],\
"Mismatch in 'string_array' converted value at index %s" % str(i)
# 4 dims
arr, shape = genByteWordArr(word, [5, 4, 3])
P = c3d.Param('STRING_TEST', self.dtypes, bytes_per_element=-1, dimensions=shape, bytes=arr.T.tobytes())
arr_out = P.string_array
assert arr.T.shape == arr_out.shape, "Mismatch in 'string_array' converted shape. Was %s, expected %s" %\
(str(arr_out.shape), str(arr.T.shape))
for i in np.ndindex(arr_out.shape):
assert self.dtypes.decode_string(arr[i[::-1]]) == arr_out[i],\
"Mismatch in 'string_array' converted value at index %s" % str(i)
# 5 dims
arr, shape = genByteWordArr(word, [6, 5, 4, 3])
P = c3d.Param('STRING_TEST', self.dtypes, bytes_per_element=-1, dimensions=shape, bytes=arr.T.tobytes())
arr_out = P.string_array
assert arr.T.shape == arr_out.shape, "Mismatch in 'string_array' converted shape. Was %s, expected %s" %\
(str(arr_out.shape), str(arr.T.shape))
for i in np.ndindex(arr_out.shape):
assert self.dtypes.decode_string(arr[i[::-1]]) == arr_out[i],\
"Mismatch in 'string_array' converted value at index %s" % str(i)
def test_i_parse_random_string_array(self):
''' Verify random word arrays are parsed correctly
'''
##
# RND
# 3 dims
for wlen in range(10):
arr, shape = genRndByteArr(wlen, [7, 3], wlen > 5)
P = c3d.Param('STRING_TEST', self.dtypes, bytes_per_element=-1, dimensions=shape, bytes=arr.T.tobytes())
arr_out = P.string_array
assert arr.T.shape == arr_out.shape, "Mismatch in 'string_array' converted shape. Was %s, expected %s" %\
(str(arr_out.shape), str(arr.T.shape))
for i in np.ndindex(arr_out.shape):
assert self.dtypes.decode_string(arr[i[::-1]]) == arr_out[i],\
"Mismatch in 'string_array' converted value at index %s" % str(i)
# 4 dims
for wlen in range(10):
arr, shape = genRndByteArr(wlen, [7, 5, 3], wlen > 5)
P = c3d.Param('STRING_TEST', self.dtypes, bytes_per_element=-1, dimensions=shape, bytes=arr.T.tobytes())
arr_out = P.string_array
assert arr.T.shape == arr_out.shape, "Mismatch in 'string_array' converted shape. Was %s, expected %s" %\
(str(arr_out.shape), str(arr.T.shape))
for i in np.ndindex(arr_out.shape):
assert self.dtypes.decode_string(arr[i[::-1]]) == arr_out[i],\
"Mismatch in 'string_array' converted value at index %s" % str(i)
# 5 dims
for wlen in range(10):
arr, shape = genRndByteArr(wlen, [7, 6, 5, 3], wlen > 5)
P = c3d.Param('STRING_TEST', self.dtypes, bytes_per_element=-1, dimensions=shape, bytes=arr.T.tobytes())
arr_out = P.string_array
assert arr.T.shape == arr_out.shape, "Mismatch in 'string_array' converted shape. Was %s, expected %s" %\
(str(arr_out.shape), str(arr.T.shape))
for i in np.ndindex(arr_out.shape):
assert self.dtypes.decode_string(arr[i[::-1]]) == arr_out[i],\
"Mismatch in 'string_array' converted value at index %s" % str(i)
if __name__ == '__main__':
unittest.main()
|
404603
|
import logging
import collections
from werkzeug.utils import cached_property
# Source realms, to differentiate sources in the site itself ('User')
# and sources in the site's theme ('Theme').
REALM_USER = 0
REALM_THEME = 1
REALM_NAMES = {
REALM_USER: 'User',
REALM_THEME: 'Theme'}
# Types of relationships a content source can be asked for.
REL_PARENT_GROUP = 1
REL_LOGICAL_PARENT_ITEM = 2
REL_LOGICAL_CHILD_GROUP = 3
REL_ASSETS = 10
logger = logging.getLogger(__name__)
class SourceNotFoundError(Exception):
pass
class InsufficientRouteParameters(Exception):
pass
class AbortedSourceUseError(Exception):
pass
class GeneratedContentException(Exception):
pass
CONTENT_TYPE_PAGE = 0
CONTENT_TYPE_ASSET = 1
class ContentItem:
""" Describes a piece of content.
Some known metadata that PieCrust will use include:
- `date`: A `datetime.date` object that will set the date of the page.
- `datetime`: A `datetime.datetime` object that will set the date and
time of the page.
- `route_params`: A dictionary of route parameters to generate the
URL to the content.
- `config`: A dictionary of configuration settings to merge into the
settings found in the content itself.
"""
def __init__(self, spec, metadata):
self.spec = spec
self.metadata = metadata
@property
def is_group(self):
return False
class ContentGroup:
""" Describes a group of `ContentItem`s.
"""
def __init__(self, spec, metadata):
self.spec = spec
self.metadata = metadata
@property
def is_group(self):
return True
class ContentSource:
""" A source for content.
"""
SOURCE_NAME = None
DEFAULT_PIPELINE_NAME = None
def __init__(self, app, name, config):
self.app = app
self.name = name
self.config = config or {}
self._cache = None
self._page_cache = None
@property
def is_theme_source(self):
return self.config['realm'] == REALM_THEME
@cached_property
def route(self):
return self.app.getSourceRoute(self.name)
def openItem(self, item, mode='r', **kwargs):
raise NotImplementedError()
def getItemMtime(self, item):
raise NotImplementedError()
def getAllPages(self):
if self._page_cache is not None:
return self._page_cache
getter = self.app.getPage
self._page_cache = [getter(self, i) for i in self.getAllContents()]
return self._page_cache
def getAllContents(self):
if self._cache is not None:
return self._cache
cache = []
stack = collections.deque()
stack.append(None)
while len(stack) > 0:
cur = stack.popleft()
try:
contents = self.getContents(cur)
except GeneratedContentException:
continue
if contents is not None:
for c in contents:
if c.is_group:
stack.append(c)
else:
cache.append(c)
self._cache = cache
return cache
def getContents(self, group):
raise NotImplementedError(
"'%s' doesn't implement 'getContents'." % self.__class__)
def getRelatedContents(self, item, relationship):
raise NotImplementedError(
"'%s' doesn't implement 'getRelatedContents'." % self.__class__)
def findContentFromSpec(self, spec):
raise NotImplementedError(
"'%s' doesn't implement 'findContentFromSpec'." % self.__class__)
def findContentFromRoute(self, route_params):
raise NotImplementedError(
"'%s' doesn't implement 'findContentFromRoute'." % self.__class__)
def getSupportedRouteParameters(self):
raise NotImplementedError(
"'%s' doesn't implement 'getSupportedRouteParameters'." %
self.__class__)
def prepareRenderContext(self, ctx):
pass
def onRouteFunctionUsed(self, route_params):
pass
def describe(self):
return None
|
404609
|
import copy
import json
import os
from typing import Optional
from experiments.src.gin.gin_utils import bind_parameters_from_dict
from experiments.src.training.training_train_model import train_model
from src.huggingmolecules.featurization.featurization_api import PretrainedFeaturizerMixin
from src.huggingmolecules.models.models_api import PretrainedModelBase
def get_sampler(name: str, params: dict):
import optuna
sampler_cls = getattr(optuna.samplers, name)
if sampler_cls is optuna.samplers.GridSampler:
search_grid = copy.deepcopy(params)
for k, v in params.items():
params[k] = ('suggest_categorical', v)
return sampler_cls(search_grid)
else:
return sampler_cls()
class Objective:
import optuna
def __init__(self,
model: Optional[PretrainedModelBase],
featurizer: Optional[PretrainedFeaturizerMixin],
save_path: str,
optuna_params: dict,
metric: str):
self.model = model
self.featurizer = featurizer
self.save_path = save_path
self.optuna_params = optuna_params
self.metric = metric
def __call__(self, trial: optuna.trial.Trial) -> float:
trial_path = os.path.join(self.save_path, f'trial_{trial.number}')
suggested_values = {p: getattr(trial, v[0])(p, *v[1:]) for p, v in self.optuna_params.items()}
bind_parameters_from_dict(suggested_values)
model = copy.deepcopy(self.model) if self.model else None
trainer = train_model(model=model, featurizer=self.featurizer, root_path=trial_path)
return trainer.logged_metrics[self.metric]
def enqueue_failed_trials(study, retry_not_completed: bool) -> None:
from optuna.trial import TrialState
params_set = set(tuple(trial.params) for trial in study.get_trials()
if trial.state == TrialState.COMPLETE)
if retry_not_completed:
condition = lambda trial: trial.state not in [TrialState.COMPLETE, TrialState.WAITING]
else:
condition = lambda trial: trial.state == TrialState.FAIL
for trial in study.get_trials():
if condition(trial) and tuple(trial.params) not in params_set:
params_set.add(tuple(trial.params))
study.enqueue_trial(trial.params)
def print_and_save_search_results(study, metric: str, save_path: str) -> None:
dataframe = study.trials_dataframe(attrs=('number', 'value', 'params', 'state'))
trial = study.best_trial
print(str(dataframe))
print(f'Best trial no {trial.number}')
print(f' {metric}: {trial.value}')
print(f' params:')
for key, value in trial.params.items():
print(f' {key} = {value}')
with open(os.path.join(save_path, 'trials.dataframe.txt'), 'w') as fp:
fp.write(str(dataframe))
with open(os.path.join(save_path, 'best_trial.json'), 'w') as fp:
json.dump(trial.params, fp)
|
404683
|
import pytest
from bvspca.animals.management.commands.sync_petpoint_data import Command
from bvspca.animals.models import AnimalCountSettings
@pytest.mark.django_db
def test_decrement_animal_count():
animal_settings = AnimalCountSettings.objects.get(pk=1)
animal_settings.cats_adopted = 22
animal_settings.cats_rescued = 44
animal_settings.dogs_adopted = 55
animal_settings.dogs_rescued = 66
animal_settings.save()
Command.increment_animal_count('Cat', 'adopted', -1)
animal_settings.refresh_from_db()
assert animal_settings.cats_adopted == 21
Command.increment_animal_count('Dog', 'rescued', -1)
animal_settings.refresh_from_db()
assert animal_settings.dogs_rescued == 65
@pytest.mark.django_db
def test_increment_animal_count():
animal_settings = AnimalCountSettings.objects.get(pk=1)
for _ in range(1, 100):
Command.increment_animal_count('Cat', 'adopted')
animal_settings.refresh_from_db()
assert animal_settings.cats_adopted == 99
for _ in range(1, 10):
Command.increment_animal_count('Cat', 'rescued')
animal_settings.refresh_from_db()
assert animal_settings.cats_rescued == 9
for _ in range(1, 200):
Command.increment_animal_count('Dog', 'adopted')
animal_settings.refresh_from_db()
assert animal_settings.dogs_adopted == 199
for _ in range(1, 50):
Command.increment_animal_count('Dog', 'rescued')
animal_settings.refresh_from_db()
assert animal_settings.dogs_rescued == 49
|
404687
|
from django.db import models
class BackupFile(models.Model):
backup_file = models.FileField(upload_to="backup_mp3")
|
404689
|
from collections import OrderedDict
def load_pytorch_pretrain_model(paddle_model, pytorch_state_dict):
'''
paddle_model: dygraph layer object
pytorch_state_dict: pytorch state_dict, assume in CPU device
'''
paddle_weight=paddle_model.state_dict()
print("paddle num_params:",len(paddle_weight))
print("torch num_params:", len(pytorch_state_dict))
new_weight_dict=OrderedDict()
torch_key_list=[]
for key in pytorch_state_dict.keys():
if "num_batches_tracked" in key:
continue
torch_key_list.append(key)
for torch_key, paddle_key in zip(torch_key_list,paddle_weight.keys()):
print(torch_key, paddle_key, pytorch_state_dict[torch_key].shape,paddle_weight[paddle_key].shape)
if len(pytorch_state_dict[torch_key].shape)==0:
continue
##handle all FC weight cases
if ("fc" in torch_key and "weight" in torch_key) or (len(pytorch_state_dict[torch_key].shape)==2 and pytorch_state_dict[torch_key].shape[0]==pytorch_state_dict[torch_key].shape[1]):
new_weight_dict[paddle_key] = pytorch_state_dict[torch_key].cpu().detach().numpy().T.astype("float32")
elif int(paddle_weight[paddle_key].shape[-1])==int(pytorch_state_dict[torch_key].shape[-1]) :
new_weight_dict[paddle_key]=pytorch_state_dict[torch_key].cpu().detach().numpy().astype("float32")
else:
new_weight_dict[paddle_key] = pytorch_state_dict[torch_key].cpu().detach().numpy().T.astype("float32")
paddle_model.set_dict(new_weight_dict)
return paddle_model.state_dict()
|
404730
|
import pytest
from busy_beaver.blueprints.tasks.retweeter import start_post_tweets_to_slack_task
from busy_beaver.models import PostTweetTask
from busy_beaver.tasks.retweeter import fetch_tweets_post_to_slack
MODULE_TO_TEST = "busy_beaver.blueprints.tasks.retweeter"
@pytest.fixture
def patched_retweeter_trigger(mocker, patcher):
return patcher(
MODULE_TO_TEST,
namespace=start_post_tweets_to_slack_task.__name__,
replacement=mocker.Mock(),
)
##################
# Integration Test
##################
@pytest.fixture
def patched_background_task(patcher, create_fake_background_task):
return patcher(
"busy_beaver.tasks.retweeter",
namespace=fetch_tweets_post_to_slack.__name__,
replacement=create_fake_background_task(),
)
@pytest.mark.integration
def test_poll_twitter_smoke_test(
caplog, client, session, create_api_user, patched_background_task
):
# Arrange
create_api_user(username="test_user", token="abcd", role="admin")
# Act
client.post(
"/poll-twitter",
headers={"Authorization": "token abcd"},
json={"channel": "general"},
)
# Assert
tasks = PostTweetTask.query.all()
assert len(tasks) == 1
###########
# Unit Test
###########
@pytest.mark.unit
def test_poll_twitter_endpoint_no_token(
client, session, create_api_user, patched_retweeter_trigger
):
# Arrange
create_api_user(username="test_user", token="abcd", role="user")
# Act
result = client.post("/poll-twitter")
# Assert
assert result.status_code == 401
@pytest.mark.unit
def test_poll_twitter_endpoint_incorrect_token(
client, session, create_api_user, patched_retweeter_trigger
):
# Arrange
create_api_user(username="test_user", token="abcd", role="user")
# Act
result = client.post("/poll-twitter")
# Assert
assert result.status_code == 401
@pytest.mark.unit
def test_poll_twitter_endpoint_empty_body(
caplog, client, session, create_api_user, patched_retweeter_trigger
):
# Arrange
create_api_user(username="test_user", token="abcd", role="admin")
# Act
result = client.post("/poll-twitter", headers={"Authorization": "token abcd"})
# Assert
assert result.status_code == 422
@pytest.mark.unit
def test_poll_twitter_endpoint_success(
caplog, client, session, create_api_user, patched_retweeter_trigger
):
# Arrange
create_api_user(username="test_user", token="abcd", role="admin")
mock = patched_retweeter_trigger
# Act
result = client.post(
"/poll-twitter",
headers={"Authorization": "token abcd"},
json={"channel": "general"},
)
# Assert
assert result.status_code == 200
args, kwargs = mock.call_args
assert kwargs["channel_name"] == "general"
|
404740
|
import logging
import requests
from typing import Dict
from confluence.exceptions.generalerror import ConfluenceError
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
class ConfluenceAuthenticationError(ConfluenceError):
"""This exception corresponds to 401 errors on the REST API."""
def __init__(self, path, params, response):
# type: (str, Dict[str, str], requests.Response) -> None
msg = 'Authentication failure. This is most likely due to incorrect username/password'.format(path)
super(ConfluenceAuthenticationError, self).__init__(path, params, response, msg)
|
404769
|
from twilio.twiml.voice_response import VoiceResponse, Start, Stream
response = VoiceResponse()
start = Start()
start.stream(
name='Example Audio Stream', url='wss://mystream.ngrok.io/audiostream'
)
response.append(start)
print(response)
|
404781
|
import logging
from common.requester import DelayedRequester
from common.storage.image import ImageStore
from util.loader import provider_details as prov
LIMIT = 1000
DELAY = 5.0
RETRIES = 3
PROVIDER = prov.CLEVELAND_DEFAULT_PROVIDER
ENDPOINT = 'http://openaccess-api.clevelandart.org/api/artworks/'
delay_request = DelayedRequester(delay=DELAY)
image_store = ImageStore(provider=PROVIDER)
DEFAULT_QUERY_PARAM = {
'cc': '1',
'has_image': '1',
'limit': LIMIT,
'skip': 0
}
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s: %(message)s',
level=logging.INFO
)
logger = logging.getLogger(__name__)
def main():
logger.info('Begin: Cleveland Museum API requests')
condition = True
offset = 0
while condition:
query_param = _build_query_param(offset)
response_json, total_images = _get_response(query_param)
if response_json is not None and total_images != 0:
batch = response_json['data']
image_count = _handle_response(batch)
logger.info(f'Total images till now {image_count}')
offset += LIMIT
else:
logger.error('No more images to process')
logger.info('Exiting')
condition = False
image_count = image_store.commit()
logger.info(f'Total number of images received {image_count}')
def _build_query_param(offset=0,
default_query_param=DEFAULT_QUERY_PARAM
):
query_param = default_query_param.copy()
query_param.update(
skip=offset
)
return query_param
def _get_response(
query_param,
endpoint=ENDPOINT,
retries=RETRIES
):
response_json, total_images = None, 0
for tries in range(retries):
response = delay_request.get(
endpoint,
query_param
)
if response.status_code == 200 and response is not None:
try:
response_json = response.json()
total_images = len(response_json['data'])
except Exception as e:
logger.warning(f'response not captured due to {e}')
response_json = None
if response_json is not None and total_images is not None:
break
logger.info('Retrying \n'
f'endpoint -- {endpoint} \t'
f' with parameters -- {query_param} ')
if tries == retries-1 and ((response_json is None) or
(total_images is None)):
logger.warning('No more tries remaining. Returning Nonetypes.')
return None, 0
else:
return response_json, total_images
def _handle_response(
batch
):
for data in batch:
license_ = data.get('share_license_status', '').lower()
if license_ != 'cc0':
logger.error('Wrong license image')
continue
license_version = '1.0'
foreign_id = data.get('id')
foreign_landing_url = data.get('url', None)
image_data = data.get('images', None)
if image_data is not None:
image_url, key = _get_image_type(image_data)
else:
image_url, key = None, None
if image_url is not None:
width = image_data[key]['width']
height = image_data[key]['height']
else:
width, height = None, None
title = data.get('title', None)
metadata = _get_metadata(data)
if data.get('creators'):
creator_name = data.get('creators')[0].get('description', '')
else:
creator_name = ''
total_images = image_store.add_item(
foreign_landing_url=foreign_landing_url,
image_url=image_url,
license_=license_,
license_version=license_version,
foreign_identifier=foreign_id,
width=width,
height=height,
title=title,
creator=creator_name,
meta_data=metadata,
)
return total_images
def _get_image_type(
image_data
):
if image_data.get('web'):
key = 'web'
image_url = image_data.get('web').get('url', None)
elif image_data.get('print'):
key = 'print'
image_url = image_data.get('print').get('url', None)
elif image_data.get('full'):
key = 'full'
image_url = image_data.get('full').get('url', None)
else:
image_url = None
if image_url is None:
key = None
return image_url, key
def _get_metadata(data):
metadata = {}
metadata['accession_number'] = data.get('accession_number', '')
metadata['technique'] = data.get('technique', '')
metadata['date'] = data.get('creation_date', '')
metadata['credit_line'] = data.get('creditline', '')
metadata['classification'] = data.get('type', '')
metadata['tombstone'] = data.get('tombstone', '')
metadata['culture'] = ','.join(
[i for i in data.get('culture', []) if i is not None]
)
return metadata
if __name__ == '__main__':
main()
|
404844
|
import re
from conans import CMake, ConanFile, tools
def get_version():
try:
content = tools.load("CMakeLists.txt")
version = re.search("set\\(ASIOCHAN_VERSION (.*)\\)", content).group(1)
return version.strip()
except OSError:
return None
class AsioChan(ConanFile):
name = "asiochan"
version = get_version()
revision_mode = "scm"
description = "C++20 coroutine channels for ASIO"
homepage = "https://github.com/MiSo1289/asiochan"
url = "https://github.com/MiSo1289/asiochan"
license = "MIT"
generators = "cmake"
settings = ("os", "compiler", "arch", "build_type")
exports_sources = (
"examples/*",
"include/*",
"tests/*",
"CMakeLists.txt",
)
build_requires = (
# Unit-test framework
"catch2/2.13.3",
)
options = {
"asio": ["boost", "standalone"]
}
default_options = {
"asio": "boost",
}
def requirements(self):
if self.options.asio == "boost":
self.requires("boost/1.75.0")
else:
self.requires("asio/1.18.1")
def build(self):
cmake = CMake(self)
cmake.definitions["ASIOCHAN_USE_STANDALONE_ASIO"] = self.options.asio == "standalone"
cmake.configure()
cmake.build()
if tools.get_env("CONAN_RUN_TESTS", True):
cmake.test()
def package(self):
self.copy("*.hpp", dst="include", src="include")
def package_id(self):
self.info.header_only()
def package_info(self):
if self.options.asio == "standalone":
self.cpp_info.defines = ["ASIOCHAN_USE_STANDALONE_ASIO"]
|
404858
|
import json
import re
import base64
import pprint
from app.services import FormFillerService
def test_vr_en_form(app, db_session, client):
payload_file = 'app/services/tests/test-vr-en-payload.json'
with open(payload_file) as payload_f:
payload = json.load(payload_f)
ffs = FormFillerService(payload=payload, form_name='/vr/en')
img = ffs.as_image()
app.logger.info("got image:{}".format(img))
matches = re.fullmatch(r"(data:image\/(.+?);base64),(.+)", img, re.I)
assert matches.group(1) == 'data:image/png;base64'
assert matches.group(2) == 'png'
assert base64.b64decode(matches.group(3))
|
404905
|
import os
from os.path import dirname, join, abspath
from unittest.case import TestCase
from unittest.suite import TestSuite
from subprocess import STDOUT, check_output, CalledProcessError
from numba.testing.ddt import ddt, data
from numba.testing.notebook import NotebookTest
from numba import cuda
# setup coverage
default_config_file = abspath(join(dirname(dirname(__file__)), '.coveragerc'))
print('using coveragerc:', default_config_file)
os.environ['COVERAGE_PROCESS_START'] = default_config_file
test_scripts = [
'binarytree.py',
'bubblesort.py',
'cffi_example.py',
'compile_with_pycc.py',
'ctypes_example.py',
'fbcorr.py',
'jitclass.py',
'linkedlist.py',
'movemean.py',
'nogil.py',
'objects.py',
'ra24.py',
'stack.py',
'structures.py',
'sum.py',
'ufuncs.py',
'blackscholes/blackscholes.py',
'blackscholes/blackscholes_numba.py',
'laplace2d/laplace2d.py',
'laplace2d/laplace2d-numba.py',
'blur_image.py',
'mergesort.py',
'mandel/mandel_vectorize.py',
'mandel/mandel_jit.py',
'nbody/nbody.py',
'nbody/nbody_modified_by_MarkHarris.py',
'vectorize/sum.py',
'vectorize/polynomial.py',
]
if cuda.is_available():
test_scripts.extend([
'blackscholes/blackscholes_cuda.py',
'cudajit/matmul.py',
'cudajit/matmul_smem.py',
'cudajit/sum.py',
'laplace2d/laplace2d-numba-cuda.py',
'laplace2d/laplace2d-numba-cuda-improve.py',
'laplace2d/laplace2d-numba-cuda-smem.py',
'vectorize/cuda_polynomial.py',
# 'cuda_mpi.py',
])
notebooks = ['j0 in Numba.ipynb',
'LinearRegr.ipynb',
'numba.ipynb',
'Using Numba.ipynb']
@ddt
class TestExample(TestCase):
"""Test adapter to validate example applets."""
def setUp(self):
# to pick up sitecustomize.py
basedir = dirname(__file__)
os.environ['PYTHONPATH'] = basedir
# matplotlibrc to suppress display
os.environ['MATPLOTLIBRC'] = basedir
@data(*test_scripts)
def test(self, script):
script = abspath(join(dirname(dirname(__file__)), script))
status = 0
try:
print(script)
out = check_output(script, stderr=STDOUT, shell=True)
except CalledProcessError as e:
status = e.returncode
out = e.output
print(out.decode())
self.assertEqual(status, 0)
@ddt
class NBTest(NotebookTest):
@data(*notebooks)
def test(self, nb):
test = 'check_error' # This is the only currently supported test type
notebook = join(dirname(dirname(__file__)), 'notebooks', nb)
self._test_notebook(notebook, test)
def load_tests(loader, tests, pattern):
notebooks = loader.loadTestsFromTestCase(NBTest)
examples = loader.loadTestsFromTestCase(TestExample)
return TestSuite([notebooks, examples])
|
404931
|
from django.conf import settings
from elasticsearch_dsl import Search, Index, DocType, Date, Text, Nested
from elasticsearch_dsl import InnerObjectWrapper
from elasticsearch_dsl.connections import connections
connections.create_connection(
hosts=[settings.ES_URL],
**settings.ES_CONNECTION_PARAMS
)
def make_search():
"""Convenience function for returning a base :cls:`elasticsearch_dsl.Search`
instance using the index name in django settings.
Returns:
:cls:`elasticsearch_dsl.Search`: An elasticsearch_dsl Search instance.
"""
return Search(index=settings.ES_CW_INDEX)
def get_term_count_agg(results):
"""Convenience function for extracting the date histogram from the a term
count aggregation.
Returns:
list: A list of aggregation buckets containing both the date and the
aggregated term count for that bucket.
"""
return results.get('aggregations', {}).get('term_counts_by_day', {}).get('buckets')
def term_counts_script(term):
"""Returns the complete configuration for an elasticsearch scripted metric
aggregation script (https://www.elastic.co/guide/en/elasticsearch/reference/5.1/search-aggregations-metrics-scripted-metric-aggregation.html).
that will count the occurences of the provided term in every CREC document
in the bucket. The script parts are written in painless. Note: because
painless limits total iterations in a loop, this will not return accurate
counts for docs that contain more than 4999 tokens (the overwhelming
majority of CREC documents fall below this limit).
Args:
term (str): A search term to look for in the "content" field of each
CREC document.
Returns:
dict: The content for a scripted metric aggregation part of an
elasticsearch query.
"""
return {
'params': {
'_agg': {},
'term': term
},
'init_script': '''
params._agg.counts = [];
''',
'map_script': '''
def tokens = new StringTokenizer(params._source.content.toLowerCase());
int i = 0;
int n = 0;
while (tokens.hasMoreTokens() && n < 4999) {
if (tokens.nextToken().toLowerCase() == params.term) {
i += 1
}
n += 1;
}
params._agg.counts.add(i)
''',
'combine_script': '''
int j = 0;
for (i in params._agg.counts) {
j += i
}
return j
''',
'reduce_script': '''
int j = 0;
for (i in params._aggs) {
j += i ?: 0
}
return j;
''',
}
class CRECDoc(DocType):
"""An elasticsearch_dsl document model for CREC documents.
"""
title = Text()
title_part = Text()
date_issued = Date()
content = Text(fielddata=True)
crec_id = Text()
pdf_url = Text()
html_url = Text()
page_start = Text()
page_end = Text()
speakers = Text()
segments = Nested(
properties={
'segment_id': Text(),
'speaker': Text(),
'text': Text(),
'bioguide_id': Text()
}
)
class Meta:
index = settings.ES_CW_INDEX
def get_term_count_in_doc(es_conn, term, start_date, end_date):
"""Queries elasticsearch with a scripted metric aggregation, bucketed by
day, that counts the total number of occurrences of the provided term in
every CREC document in that bucket.
Args:
es_conn :cls:`elasticsearch.Elasticsearch`: A connection to an
elasticsearch cluster.
term (str): Search term.
start_date (date): Start of date range.
end_date (date): End of date range.
Returns:
dict: The response from the elasticsearch query.
"""
term = term.lower()
results = es_conn.search(
index=CRECDoc._doc_type.index,
doc_type=CRECDoc._doc_type.name,
body={
'size': 0,
'query': {
'bool': {
'must': {'term': {'content': term}},
'filter': {
'range': {
'date_issued': {
'gte': start_date.strftime('%Y-%m-%dT00:00:00Z'),
'lte': end_date.strftime('%Y-%m-%dT00:00:00Z')
}
}
}
}
},
'aggregations': {
'term_counts_by_day': {
'date_histogram': {
'field': 'date_issued',
'interval': 'day'
},
'aggregations': {
'term_counts': {
'scripted_metric': term_counts_script(term)
}
}
}
}
}
)
return results
|
404937
|
import torch
import torch.nn as nn
import torch.nn.init as nn_init
from .components import FlowNetC, FlowNetS, FlowNetSD, FlowNetFusion
# (Yuliang) Change directory structure
from .components import tofp16, tofp32, save_grad
from .components import ChannelNorm, Resample2d
class FlowNet2(nn.Module):
def __init__(self,
with_bn=False,
fp16=False,
rgb_max=255.,
div_flow=20.,
grads=None):
super(FlowNet2, self).__init__()
self.with_bn = with_bn
self.div_flow = div_flow
self.rgb_max = rgb_max
self.grads = {} if grads is None else grads
self.channelnorm = ChannelNorm()
# First Block (FlowNetC)
self.flownetc = FlowNetC(with_bn=with_bn, fp16=fp16)
self.upsample1 = nn.Upsample(scale_factor=4, mode='bilinear')
self.resample1 = (nn.Sequential(tofp32(), Resample2d(), tofp16())
if fp16 else Resample2d())
# Block (FlowNetS1)
self.flownets_1 = FlowNetS(with_bn=with_bn)
self.upsample2 = nn.Upsample(scale_factor=4, mode='bilinear')
self.resample2 = (nn.Sequential(tofp32(), Resample2d(), tofp16())
if fp16 else Resample2d())
# Block (FlowNetS2)
self.flownets_2 = FlowNetS(with_bn=with_bn)
# Block (FlowNetSD)
self.flownets_d = FlowNetSD(with_bn=with_bn)
self.upsample3 = nn.Upsample(scale_factor=4, mode='nearest')
self.upsample4 = nn.Upsample(scale_factor=4, mode='nearest')
self.resample3 = (nn.Sequential(tofp32(), Resample2d(), tofp16())
if fp16 else Resample2d())
self.resample4 = (nn.Sequential(tofp32(), Resample2d(), tofp16())
if fp16 else Resample2d())
# Block (FLowNetFusion)
self.flownetfusion = FlowNetFusion(with_bn=with_bn)
for m in self.modules():
if isinstance(m, nn.Conv2d):
if m.bias is not None:
nn_init.uniform(m.bias)
nn_init.xavier_uniform(m.weight)
if isinstance(m, nn.ConvTranspose2d):
if m.bias is not None:
nn_init.uniform(m.bias)
nn_init.xavier_uniform(m.weight)
def forward(self, inputs):
rgb_mean = inputs.contiguous().view(inputs.size()[:2] + (-1, )).mean(
dim=-1).view(inputs.size()[:2] + (1, 1, 1, ))
x = (inputs - rgb_mean) / self.rgb_max
x1 = x[:, :, 0, :, :]
x2 = x[:, :, 1, :, :]
x = torch.cat((x1, x2), dim=1)
# flownetc
flownetc_flow2 = self.flownetc(x)[0]
flownetc_flow = self.upsample1(flownetc_flow2 * self.div_flow)
# warp img1 to img0; magnitude of diff between img0 and and warped_img1,
resampled_img1 = self.resample1(x[:, 3:, :, :], flownetc_flow)
diff_img0 = x[:, :3, :, :] - resampled_img1
norm_diff_img0 = self.channelnorm(diff_img0)
# concat img0, img1, img1->img0, flow, diff-mag ;
concat1 = torch.cat(
[x, resampled_img1, flownetc_flow / self.div_flow, norm_diff_img0],
dim=1)
# flownets1
flownets1_flow2 = self.flownets_1(concat1)[0]
flownets1_flow = self.upsample2(flownets1_flow2 * self.div_flow)
# warp img1 to img0 using flownets1; magnitude of diff between img0 and and warped_img1
resampled_img1 = self.resample2(x[:, 3:, :, :], flownets1_flow)
diff_img0 = x[:, :3, :, :] - resampled_img1
norm_diff_img0 = self.channelnorm(diff_img0)
# concat img0, img1, img1->img0, flow, diff-mag
concat2 = torch.cat(
(x, resampled_img1, flownets1_flow / self.div_flow,
norm_diff_img0),
dim=1)
# flownets2
flownets2_flow2 = self.flownets_2(concat2)[0]
flownets2_flow = self.upsample4(flownets2_flow2 * self.div_flow)
norm_flownets2_flow = self.channelnorm(flownets2_flow)
diff_flownets2_flow = self.resample4(x[:, 3:, :, :], flownets2_flow)
req_grad = diff_flownets2_flow.requires_grad
if req_grad:
diff_flownets2_flow.register_hook(
save_grad(self.grads, 'diff_flownets2_flow'))
diff_flownets2_img1 = self.channelnorm(
(x[:, :3, :, :] - diff_flownets2_flow))
if req_grad:
diff_flownets2_img1.register_hook(
save_grad(self.grads, 'diff_flownets2_img1'))
# flownetsd
flownetsd_flow2 = self.flownets_d(x)[0]
flownetsd_flow = self.upsample3(flownetsd_flow2 / self.div_flow)
norm_flownetsd_flow = self.channelnorm(flownetsd_flow)
diff_flownetsd_flow = self.resample3(x[:, 3:, :, :], flownetsd_flow)
if req_grad:
diff_flownetsd_flow.register_hook(
save_grad(self.grads, 'diff_flownetsd_flow'))
diff_flownetsd_img1 = self.channelnorm(
(x[:, :3, :, :] - diff_flownetsd_flow))
if req_grad:
diff_flownetsd_img1.register_hook(
save_grad(self.grads, 'diff_flownetsd_img1'))
# concat img1 flownetsd, flownets2, norm_flownetsd, norm_flownets2,
# diff_flownetsd_img1, diff_flownets2_img1
concat3 = torch.cat(
(x[:, :3, :, :], flownetsd_flow, flownets2_flow,
norm_flownetsd_flow, norm_flownets2_flow, diff_flownetsd_img1,
diff_flownets2_img1),
dim=1)
flownetfusion_flow = self.flownetfusion(concat3)
if req_grad:
flownetfusion_flow.register_hook(
save_grad(self.grads, 'flownetfusion_flow'))
return flownetfusion_flow
class FlowNet2C(FlowNetC):
def __init__(self, with_bn=False, fp16=False, rgb_max=255., div_flow=20):
super(FlowNet2C, self).__init__(with_bn, fp16)
self.rgb_max = rgb_max
self.div_flow = div_flow
def forward(self, inputs):
rgb_mean = inputs.contiguous().view(inputs.size()[:2] + (-1, )).mean(
dim=-1).view(inputs.size()[:2] + (1, 1, 1, ))
x = (inputs - rgb_mean) / self.rgb_max
x1 = x[:, :, 0, :, :]
x2 = x[:, :, 1, :, :]
flows = super(FlowNet2C, self).forward(x1, x2)
if self.training:
return flows
else:
return self.upsample1(flows[0] * self.div_flow)
class FlowNet2S(FlowNetS):
def __init__(self, with_bn=False, rgb_max=255., div_flow=20):
super(FlowNet2S, self).__init__(input_channels=6, with_bn=with_bn)
self.rgb_max = rgb_max
self.div_flow = div_flow
def forward(self, inputs):
rgb_mean = inputs.contiguous().view(inputs.size()[:2] + (-1, )).mean(
dim=-1).view(inputs.size()[:2] + (1, 1, 1, ))
x = (inputs - rgb_mean) / self.rgb_max
x = torch.cat((x[:, :, 0, :, :], x[:, :, 1, :, :]), dim=1)
flows = super(FlowNet2S, self).forward(x)
if self.training:
return flows
else:
return self.upsample1(flows[0] * self.div_flow)
class FlowNet2SD(FlowNetSD):
def __init__(self, with_bn=False, rgb_max=255., div_flow=20):
super(FlowNet2SD, self).__init__(with_bn=with_bn)
self.rgb_max = rgb_max
self.div_flow = div_flow
def forward(self, inputs):
rgb_mean = inputs.contiguous().view(inputs.size()[:2] + (-1, )).mean(
dim=-1).view(inputs.size()[:2] + (1, 1, 1, ))
x = (inputs - rgb_mean) / self.rgb_max
x = torch.cat((x[:, :, 0, :, :], x[:, :, 1, :, :]), dim=1)
flows = super(FlowNet2SD, self).forward(x)
if self.training:
return flows
else:
return self.upsample1(flows[0] * self.div_flow)
class FlowNet2CS(nn.Module):
def __init__(self, with_bn=False, fp16=False, rgb_max=255., div_flow=20):
super(FlowNet2CS, self).__init__()
self.with_bn = with_bn
self.fp16 = fp16
self.rgb_max = rgb_max
self.div_flow = div_flow
self.channelnorm = ChannelNorm()
# First Block (FlowNetC)
self.flownetc = FlowNetC(with_bn=with_bn, fp16=fp16)
self.upsample1 = nn.Upsample(scale_factor=4, mode='bilinear')
self.resample1 = (nn.Sequential(tofp32(), Resample2d(), tofp16())
if fp16 else Resample2d())
# Block (FlowNetS1)
self.flownets_1 = FlowNetS(with_bn=with_bn)
self.upsample2 = nn.Upsample(scale_factor=4, mode='bilinear')
for m in self.modules():
if isinstance(m, nn.Conv2d):
if m.bias is not None:
nn_init.uniform(m.bias)
nn_init.xavier_uniform(m.weight)
if isinstance(m, nn.ConvTranspose2d):
if m.bias is not None:
nn_init.uniform(m.bias)
nn_init.xavier_uniform(m.weight)
def forward(self, inputs):
rgb_mean = inputs.contiguous().view(inputs.size()[:2] + (-1, )).mean(
dim=-1).view(inputs.size()[:2] + (1, 1, 1, ))
x = (inputs - rgb_mean) / self.rgb_max
x1 = x[:, :, 0, :, :]
x2 = x[:, :, 1, :, :]
x = torch.cat((x1, x2), dim=1)
# flownetc
flownetc_flow2 = self.flownetc(x)[0]
flownetc_flow = self.upsample1(flownetc_flow2 * self.div_flow)
# warp img1 to img0; magnitude of diff between img0 and and warped_img1,
resampled_img1 = self.resample1(x[:, 3:, :, :], flownetc_flow)
diff_img0 = x[:, :3, :, :] - resampled_img1
norm_diff_img0 = self.channelnorm(diff_img0)
# concat img0, img1, img1->img0, flow, diff-mag ;
concat1 = torch.cat(
[x, resampled_img1, flownetc_flow / self.div_flow, norm_diff_img0],
dim=1)
# flownets1
flownets1_flow2 = self.flownets_1(concat1)[0]
flownets1_flow = self.upsample2(flownets1_flow2 * self.div_flow)
return flownets1_flow
class FlowNet2CSS(nn.Module):
def __init__(self, with_bn=False, fp16=False, rgb_max=255., div_flow=20):
super(FlowNet2CSS, self).__init__()
self.with_bn = with_bn
self.fp16 = fp16
self.rgb_max = rgb_max
self.div_flow = div_flow
self.channelnorm = ChannelNorm()
# First Block (FlowNetC)
self.flownetc = FlowNetC(with_bn=with_bn, fp16=fp16)
self.upsample1 = nn.Upsample(scale_factor=4, mode='bilinear')
if fp16:
self.resample1 = nn.Sequential(tofp32(), Resample2d(), tofp16())
else:
self.resample1 = Resample2d()
# Block (FlowNetS1)
self.flownets_1 = FlowNetS(with_bn=with_bn)
self.upsample2 = nn.Upsample(scale_factor=4, mode='bilinear')
if fp16:
self.resample2 = nn.Sequential(tofp32(), Resample2d(), tofp16())
else:
self.resample2 = Resample2d()
# Block (FlowNetS2)
self.flownets_2 = FlowNetS(with_bn=with_bn)
self.upsample3 = nn.Upsample(scale_factor=4, mode='nearest')
for m in self.modules():
if isinstance(m, nn.Conv2d):
if m.bias is not None:
nn_init.uniform(m.bias)
nn_init.xavier_uniform(m.weight)
if isinstance(m, nn.ConvTranspose2d):
if m.bias is not None:
nn_init.uniform(m.bias)
nn_init.xavier_uniform(m.weight)
def forward(self, inputs):
rgb_mean = inputs.contiguous().view(inputs.size()[:2] + (-1, )).mean(
dim=-1).view(inputs.size()[:2] + (1, 1, 1, ))
x = (inputs - rgb_mean) / self.rgb_max
x1 = x[:, :, 0, :, :]
x2 = x[:, :, 1, :, :]
x = torch.cat((x1, x2), dim=1)
# flownetc
flownetc_flow2 = self.flownetc(x)[0]
flownetc_flow = self.upsample1(flownetc_flow2 * self.div_flow)
# warp img1 to img0; magnitude of diff between img0 and and warped_img1,
resampled_img1 = self.resample1(x[:, 3:, :, :], flownetc_flow)
diff_img0 = x[:, :3, :, :] - resampled_img1
norm_diff_img0 = self.channelnorm(diff_img0)
# concat img0, img1, img1->img0, flow, diff-mag ;
concat1 = torch.cat(
[x, resampled_img1, flownetc_flow / self.div_flow, norm_diff_img0],
dim=1)
# flownets1
flownets1_flow2 = self.flownets_1(concat1)[0]
flownets1_flow = self.upsample2(flownets1_flow2 * self.div_flow)
# warp img1 to img0 using flownets1; magnitude of diff between img0 and and warped_img1
resampled_img1 = self.resample2(x[:, 3:, :, :], flownets1_flow)
diff_img0 = x[:, :3, :, :] - resampled_img1
norm_diff_img0 = self.channelnorm(diff_img0)
# concat img0, img1, img1->img0, flow, diff-mag
concat2 = torch.cat(
(x, resampled_img1, flownets1_flow / self.div_flow,
norm_diff_img0),
dim=1)
# flownets2
flownets2_flow2 = self.flownets_2(concat2)[0]
flownets2_flow = self.upsample3(flownets2_flow2 * self.div_flow)
return flownets2_flow
|
405012
|
import logging
from telebot import TeleBot, logger
from config import Config
bot = TeleBot(
token=Config.TELEGRAM_BOT_RELEASE_TOKEN,
threaded=Config.IS_THREADED_BOT
)
logger.setLevel(logging.INFO)
from tg_bot import handlers
|
405027
|
from __future__ import print_function
import sys, os
sys.path.insert(1, os.path.join("..","..",".."))
import h2o
from tests import pyunit_utils
from h2o.estimators.extended_isolation_forest import H2OExtendedIsolationForestEstimator
def extended_isolation_forest():
print("Extended Isolation Forest Smoke Test")
train = h2o.import_file(pyunit_utils.locate("smalldata/anomaly/single_blob.csv"))
eif_model = H2OExtendedIsolationForestEstimator(ntrees=99, seed=0xBEEF, sample_size=255, extension_level=1)
eif_model.train(training_frame=train)
anomaly_score = eif_model.predict(train)
anomaly = anomaly_score['anomaly_score'].as_data_frame(use_pandas=True)["anomaly_score"]
assert 99 == eif_model._model_json["output"]["model_summary"]["number_of_trees"][0], "Python API is not working!"
assert 255 == eif_model._model_json["output"]["model_summary"]["size_of_subsample"][0], "Python API is not working!"
assert 1 == eif_model._model_json["output"]["model_summary"]["extension_level"][0], "Python API is not working!"
print(anomaly_score)
print(eif_model)
assert 0.36 < anomaly.mean() < 0.40, \
"Not expected output: Mean anomaly score is suspiciously different. " + str(anomaly.mean())
# The output of the EIF algorithm is based on randomly generated values.
# If the randomization is changed, then the output can be slightly different and it is fine to update them.
# The link to source paper: https://arxiv.org/pdf/1811.02141.pdf
assert anomaly[0] >= 0.70, \
"Not expected output: Anomaly point should have higher score " + str(anomaly[0])
assert anomaly[5] <= 0.55, \
"Not expected output: Anomaly point should have higher score about 0.5 " + str(anomaly[5])
assert anomaly[33] <= 0.55, \
"Not expected output: Anomaly point should have higher score about 0.5 " + str(anomaly[33])
assert anomaly[256] <= 0.55, \
"Not expected output: Anomaly point should have higher score about 0.5 " + str(anomaly[256])
assert anomaly[499] <= 0.55, \
"Not expected output: Anomaly point should have higher score about 0.5 " + str(anomaly[499])
if __name__ == "__main__":
pyunit_utils.standalone_test(extended_isolation_forest)
else:
extended_isolation_forest()
|
405054
|
import responses
from binance.spot import Spot as Client
from tests.util import random_str
from tests.util import mock_http_response
mock_item = {"key_1": "value_1", "key_2": "value_2"}
mock_exception = {"code": -1, "msg": "error message"}
key = random_str()
secret = random_str()
@mock_http_response(responses.POST, "/api/v3/userDataStream", mock_item, 200)
def test_new_listen_key():
"""Tests the API endpoint to create a new listen key"""
client = Client(key)
response = client.new_listen_key()
response.should.equal(mock_item)
|
405070
|
from typing import Any
from typing import Callable
from typing import List
from typing import Union
from notion_client import Client
from notion_scholar.publication import Publication
class Property:
@staticmethod
def title(value: str) -> dict:
return {'title': [{'text': {'content': value}}]}
@staticmethod
def rich_text(value: str) -> dict:
return {'rich_text': [{'text': {'content': value}}]}
@staticmethod
def number(value: Union[int, float]) -> dict:
return {'number': value}
@staticmethod
def url(value: str) -> dict:
return {'url': value if value else None}
@staticmethod
def checkbox(value: bool) -> dict:
return {'checkbox': value}
@staticmethod
def select(value: str) -> dict:
return {"select": {"name": value}}
def add_publications_to_database(
publications: List[Publication],
token: str,
database_id: str,
) -> None:
# todo retrieve the list of all the property and filter
# todo update_database_with_publications check the empty fields and fill them
client = Client(auth=token)
for i, publication in enumerate(publications, start=1):
print(f'{i}/{len(publications)}: {publication}')
client.pages.create(
parent={'database_id': database_id},
properties={
'Title': Property.title(publication.title),
'Abstract': Property.rich_text(publication.abstract),
'Bibtex': Property.rich_text(publication.bibtex),
'Filename': Property.rich_text(publication.key),
'Journal': Property.rich_text(publication.journal),
'Authors': Property.rich_text(publication.authors),
'Year': Property.number(publication.year),
'URL': Property.url(publication.url),
'Inbox': Property.checkbox(True),
'Type': Property.select(publication.type),
'DOI': Property.rich_text(publication.doi),
},
)
def get_property_list_from_database(
token: str,
database_id: str,
retriever: Callable[[dict], Any],
page_size: int = 100,
) -> List[str]:
notion = Client(auth=token)
results = []
query = notion.databases.query(
database_id=database_id, page_size=page_size,
)
results.extend(query['results'])
while query['next_cursor'] or (query['results'] is None and not results):
query = notion.databases.query(
database_id=database_id,
start_cursor=query['next_cursor'],
page_size=page_size,
)
results.extend(query['results'])
key_list = []
for result in results:
try:
key_list.append(retriever(result))
except IndexError:
pass
return key_list
def get_publication_key_list_from_database(
token: str,
database_id: str,
page_size: int = 100,
) -> List[str]:
def retrieve_publication_key(result: dict) -> str:
return result['properties']['Filename']['rich_text'][0]['plain_text']
return get_property_list_from_database(
token=token,
database_id=database_id,
retriever=retrieve_publication_key,
page_size=page_size,
)
def get_bibtex_string_list_from_database(
token: str,
database_id: str,
page_size: int = 100,
) -> List[str]:
def retrieve_bibtex_string(result: dict) -> str:
return result['properties']['Bibtex']['rich_text'][0]['plain_text']
return get_property_list_from_database(
token=token,
database_id=database_id,
retriever=retrieve_bibtex_string,
page_size=page_size,
)
|
405087
|
import bridgekeeper
import pytest
@pytest.yield_fixture(autouse=True)
def clean_global_permissions_map():
for k in list(bridgekeeper.perms.keys()):
del bridgekeeper.perms[k]
yield
|
405103
|
from collections import defaultdict
from typing import Dict, Any, List, Optional
from fluidml.common import Task
from fluidml.common.exception import TaskResultKeyAlreadyExists, TaskResultObjectMissing
from fluidml.storage import ResultsStore
def pack_results(all_tasks: List[Task],
results_store: ResultsStore,
return_results: bool = True) -> Dict[str, Any]:
results = defaultdict(list)
if return_results:
for task in all_tasks:
result = results_store.get_results(task_name=task.name,
task_unique_config=task.unique_config,
task_publishes=task.publishes)
results[task.name].append({'result': result,
'config': task.unique_config})
else:
for task in all_tasks:
results[task.name].append(task.unique_config)
return simplify_results(results=results)
def get_filtered_results_from_predecessor(predecessor: Task,
task_expects: List[str],
results_store: ResultsStore) -> Dict:
result = {}
for item_name in predecessor.publishes:
if item_name in task_expects:
obj: Optional[Any] = results_store.load(name=item_name,
task_name=predecessor.name,
task_unique_config=predecessor.unique_config)
if obj is not None:
result[item_name] = obj
return result
def get_results_from_predecessor(predecessor: Task,
task_expects: List[str],
results_store: ResultsStore) -> Dict:
if task_expects is None:
# get all published results from predecessor task
result = results_store.get_results(task_name=predecessor.name,
task_unique_config=predecessor.unique_config,
task_publishes=predecessor.publishes)
else:
# get only expected results by the task from predecessor tasks
result = get_filtered_results_from_predecessor(predecessor=predecessor,
task_expects=task_expects,
results_store=results_store)
return result
def pack_predecessor_results(predecessor_tasks: List[Task],
results_store: ResultsStore,
reduce_task: bool,
task_name: str,
task_expects: Optional[List[str]] = None) -> Dict[str, Any]:
if reduce_task:
all_results = []
for predecessor in predecessor_tasks:
result = get_results_from_predecessor(predecessor=predecessor,
task_expects=task_expects,
results_store=results_store)
all_results.append({'result': result,
'config': predecessor.unique_config})
# Assertion to check that all expected results are retrieved
if task_expects is not None:
retrieved_inputs = {
name for result in all_results for name in result['result'].keys()}
if retrieved_inputs != set(task_expects):
missing_input_results = list(
set(task_expects).difference(retrieved_inputs))
raise TaskResultObjectMissing(f'{task_name}: Result objects {missing_input_results} '
f'are required but could not be collected from predecessor tasks.')
return {"reduced_results": all_results}
else:
results = {}
for predecessor in predecessor_tasks:
result = get_results_from_predecessor(predecessor=predecessor,
task_expects=task_expects,
results_store=results_store)
for key, value in result.items():
if key in results.keys():
raise TaskResultKeyAlreadyExists(
f"{predecessor.name} saves a key '{key}' that already exists in another tasks's result")
else:
results[key] = value
# Assertion to check that all expected results are retrieved
if task_expects is not None:
retrieved_inputs = {name for name in results.keys()}
if retrieved_inputs != set(task_expects):
missing_input_results = list(
set(task_expects).difference(retrieved_inputs))
raise TaskResultObjectMissing(f'{task_name}: Result objects {missing_input_results} '
f'are required but could not be collected from predecessor tasks.')
return results
def simplify_results(results: Dict[str, Any]) -> Dict[str, Any]:
for task_name, task_results in results.items():
if len(task_results) == 1:
results[task_name] = task_results[0]
return results
|
405163
|
import pymysql
connection = pymysql.connect(host="localhost", user="user", password="<PASSWORD>")
cursor = connection.cursor()
cursor.execute("some sql", (42,)) # $ getSql="some sql"
|
405170
|
from rllab.baselines.linear_feature_baseline import LinearFeatureBaseline
from sandbox.ours.envs.normalized_env import normalize
from sandbox.ours.envs.base import TfEnv
from sandbox.ours.policies.improved_gauss_mlp_policy import GaussianMLPPolicy
from rllab.misc.instrument import run_experiment_lite
from rllab.misc.instrument import VariantGenerator
from rllab import config
from experiments.helpers.ec2_helpers import cheapest_subnets
from sandbox.ours.dynamics import MLPDynamicsEnsemble
from sandbox.ours.algos.ModelTRPO.model_trpo import ModelTRPO
from experiments.helpers.run_multi_gpu import run_multi_gpu
from sandbox.ours.envs.mujoco import AntEnvRandParams, HalfCheetahEnvRandParams, HopperEnvRandParams, \
SwimmerEnvRandParams, WalkerEnvRandomParams, PR2EnvRandParams
import tensorflow as tf
import sys
import argparse
import random
import os
EXP_PREFIX = 'model-ensemble-trpo-mb-comparison'
ec2_instance = 'c4.4xlarge'
NUM_EC2_SUBNETS = 3
def run_train_task(vv):
env = TfEnv(normalize(vv['env'](log_scale_limit=vv['log_scale_limit'])))
dynamics_model = MLPDynamicsEnsemble(
name="dyn_model",
env_spec=env.spec,
hidden_sizes=vv['hidden_sizes_model'],
weight_normalization=vv['weight_normalization_model'],
num_models=vv['num_models'],
valid_split_ratio=vv['valid_split_ratio'],
rolling_average_persitency=vv['rolling_average_persitency']
)
policy = GaussianMLPPolicy(
name="policy",
env_spec=env.spec,
hidden_sizes=vv['hidden_sizes_policy'],
hidden_nonlinearity=vv['hidden_nonlinearity_policy'],
)
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = ModelTRPO(
env=env,
policy=policy,
dynamics_model=dynamics_model,
baseline=baseline,
batch_size_env_samples=vv['batch_size_env_samples'],
batch_size_dynamics_samples=vv['batch_size_dynamics_samples'],
initial_random_samples=vv['initial_random_samples'],
num_gradient_steps_per_iter=vv['num_gradient_steps_per_iter'],
max_path_length=vv['path_length'],
n_itr=vv['n_itr'],
retrain_model_when_reward_decreases=vv['retrain_model_when_reward_decreases'],
discount=vv['discount'],
step_size=vv["step_size"],
reset_policy_std=vv['reset_policy_std'],
reinit_model_cycle=vv['reinit_model_cycle']
)
algo.train()
def run_experiment(argv):
# -------------------- Parse Arguments -----------------------------------
parser = argparse.ArgumentParser()
parser.add_argument('--mode', type=str, default='local',
help='Mode for running the experiments - local: runs on local machine, '
'ec2: runs on AWS ec2 cluster (requires a proper configuration file)')
parser.add_argument('--n_gpu', type=int, default=0,
help='Number of GPUs')
parser.add_argument('--ctx', type=int, default=4,
help='Number of tasks per GPU')
args = parser.parse_args(argv[1:])
# -------------------- Define Variants -----------------------------------
vg = VariantGenerator()
vg.add('seed', [12, 22, 44])
# env spec
vg.add('env', ['HalfCheetahEnvRandParams', 'AntEnvRandParams', 'WalkerEnvRandomParams',
'SwimmerEnvRandParams', 'HopperEnvRandParams', 'PR2EnvRandParams'])
vg.add('log_scale_limit', [0.0])
vg.add('path_length', [200])
# Model-based MAML algo spec
vg.add('n_itr', [100])
vg.add('step_size', [0.01])
vg.add('discount', [0.99])
vg.add('batch_size_env_samples', [4000])
vg.add('batch_size_dynamics_samples', [50000])
vg.add('initial_random_samples', [4000])
vg.add('num_gradient_steps_per_iter', [30])
vg.add('retrain_model_when_reward_decreases', [False])
vg.add('num_models', [5])
# neural network configuration
vg.add('hidden_nonlinearity_policy', ['tanh'])
vg.add('hidden_nonlinearity_model', ['relu'])
vg.add('hidden_sizes_policy', [(32, 32)])
vg.add('hidden_sizes_model', [(512, 512)])
vg.add('weight_normalization_model', [True])
vg.add('reset_policy_std', [False])
vg.add('reinit_model_cycle', [0])
vg.add('valid_split_ratio', [0.2])
vg.add('rolling_average_persitency', [0.99])
# other stuff
vg.add('exp_prefix', [EXP_PREFIX])
variants = vg.variants()
default_dict = dict(exp_prefix=EXP_PREFIX,
snapshot_mode="gap",
snapshot_gap=5,
periodic_sync=True,
sync_s3_pkl=True,
sync_s3_log=True,
python_command="python3",
pre_commands=["yes | pip install tensorflow=='1.6.0'",
"pip list",
"yes | pip install --upgrade cloudpickle"],
use_cloudpickle=True,
variants=variants)
if args.mode == 'mgpu':
current_path = os.path.dirname(os.path.abspath(__file__))
script_path = os.path.join(current_path, 'mgpu_model_ensemble_trpo_train.py')
n_gpu = args.n_gpu
if n_gpu == 0:
n_gpu = len(os.listdir('/proc/driver/nvidia/gpus'))
run_multi_gpu(script_path, default_dict, n_gpu=n_gpu, ctx_per_gpu=args.ctx)
else:
# ----------------------- AWS conficuration ---------------------------------
if args.mode == 'ec2':
info = config.INSTANCE_TYPE_INFO[ec2_instance]
n_parallel = int(info["vCPU"] / 2) # make the default 4 if not using ec2
else:
n_parallel = 6
if args.mode == 'ec2':
config.AWS_INSTANCE_TYPE = ec2_instance
config.AWS_SPOT_PRICE = str(info["price"])
subnets = cheapest_subnets(ec2_instance, num_subnets=NUM_EC2_SUBNETS)
print("\n" + "**********" * 10 + "\nexp_prefix: {}\nvariants: {}".format('TRPO', len(variants)))
print('Running on type {}, with price {}, on the subnets: '.format(config.AWS_INSTANCE_TYPE,
config.AWS_SPOT_PRICE, ), str(subnets))
# ----------------------- TRAINING ---------------------------------------
exp_ids = random.sample(range(1, 1000), len(variants))
for v, exp_id in zip(variants, exp_ids):
exp_name = "model_trpo_train_env_%s_%i_%i_%i_%i_id_%i" % (v['env'], v['path_length'], v['num_gradient_steps_per_iter'],
v['batch_size_env_samples'], v['seed'], exp_id)
v = instantiate_class_stings(v)
if args.mode == 'ec2':
subnet = random.choice(subnets)
config.AWS_REGION_NAME = subnet[:-1]
config.AWS_KEY_NAME = config.ALL_REGION_AWS_KEY_NAMES[
config.AWS_REGION_NAME]
config.AWS_IMAGE_ID = config.ALL_REGION_AWS_IMAGE_IDS[
config.AWS_REGION_NAME]
config.AWS_SECURITY_GROUP_IDS = \
config.ALL_REGION_AWS_SECURITY_GROUP_IDS[
config.AWS_REGION_NAME]
run_experiment_lite(
run_train_task,
exp_prefix=EXP_PREFIX,
exp_name=exp_name,
# Number of parallel workers for sampling
n_parallel=n_parallel,
snapshot_mode="gap",
snapshot_gap=5,
periodic_sync=True,
sync_s3_pkl=True,
sync_s3_log=True,
# Specifies the seed for the experiment. If this is not provided, a random seed
# will be used
seed=v["seed"],
python_command='python3',
pre_commands=["yes | pip install tensorflow=='1.6.0'",
"pip list",
"yes | pip install --upgrade cloudpickle"],
mode=args.mode,
use_cloudpickle=True,
variant=v,
)
def instantiate_class_stings(v):
v['env'] = globals()[v['env']]
for nonlinearity_key in ['hidden_nonlinearity_policy', 'hidden_nonlinearity_model']:
if v[nonlinearity_key] == 'relu':
v[nonlinearity_key] = tf.nn.relu
elif v[nonlinearity_key] == 'tanh':
v[nonlinearity_key] = tf.tanh
elif v[nonlinearity_key] == 'elu':
v[nonlinearity_key] = tf.nn.elu
else:
raise NotImplementedError('Not able to recognize spicified hidden_nonlinearity: %s' % v['hidden_nonlinearity'])
return v
if __name__ == "__main__":
run_experiment(sys.argv)
|
405174
|
import numpy as np
from precise.skaters.managers.covmanagerfactory import static_cov_manager_factory_d0
from precise.skaters.portfoliostatic.equalport import equal_long_port
from precise.skaters.covariance.identity import identity_scov
from precise.skaters.managers.buyandholdfactory import buy_and_hold_manager_factory
def equal_daily_long_manager(y, s, k=1, e=1, zeta=None, j=None):
""" Trivial version ignored j argument """
n_dim = len(y)
w = np.ones(n_dim) / n_dim
return w, {}
def equal_long_manager(y, s, k=1, e=1, zeta=None, j=1):
""" Rebalance every j observations """
return static_cov_manager_factory_d0(y=y, s=s, f=identity_scov, port=equal_long_port, e=e, n_cold=0, zeta=zeta, j=j)
def equal_weekly_long_manager(y, s, k=1, e=1, zeta=None, j=None):
""" Rebalance every 5 observations, ignoring supplied j """
return static_cov_manager_factory_d0(y=y, s=s, f=identity_scov, port=equal_long_port, e=e, n_cold=0, zeta=zeta, j=5)
def equal_weekly_buy_and_hold_long_manager(y, s, k=1, e=1, zeta=None, j=None):
""" Rebalance every 5 observations, implemented a different way as a check for tests """
return buy_and_hold_manager_factory(mgr=equal_daily_long_manager, j=5, y=y, s=s)
def equal_monthly_long_manager(y, s, k=1, e=1, zeta=None, j=None):
""" Rebalance every 20 observations, ignoring supplied j """
return static_cov_manager_factory_d0(y=y, s=s, f=identity_scov, port=equal_long_port, e=e, n_cold=0, zeta=zeta, j=20)
EQUAL_LONG_MANAGERS = [equal_long_manager, equal_daily_long_manager, equal_weekly_long_manager, equal_monthly_long_manager]
EQUAL_LS_MANAGERS = []
EQUAL_MANAGERS = EQUAL_LONG_MANAGERS + EQUAL_LS_MANAGERS
|
405185
|
import json
from .common import SearpcError
def _fret_int(ret_str):
try:
dicts = json.loads(ret_str)
except:
raise SearpcError('Invalid response format')
if 'err_code' in dicts:
raise SearpcError(dicts['err_msg'])
if 'ret' in dicts:
return dicts['ret']
else:
raise SearpcError('Invalid response format')
def _fret_string(ret_str):
try:
dicts = json.loads(ret_str)
except:
raise SearpcError('Invalid response format')
if 'err_code' in dicts:
raise SearpcError(dicts['err_msg'])
if 'ret' in dicts:
return dicts['ret']
else:
raise SearpcError('Invalid response format')
class _SearpcObj(object):
'''A compact class to emulate gobject.GObject
'''
def __init__(self, dicts):
new_dict = {}
for key in dicts:
value = dicts[key]
# replace hyphen with with underline
new_key = key.replace('-', '_')
new_dict[new_key] = value
# For compatibility with old usage peer.props.name
self.props = self
self._dict = new_dict
def __getattr__(self, key):
try:
return self._dict[key]
except:
return None
class SearpcObjEncoder(json.JSONEncoder):
def default(self, obj):
if not isinstance(obj, _SearpcObj):
return super(SearpcObjEncoder, self).default(obj)
return obj._dict
def _fret_obj(ret_str):
try:
dicts = json.loads(ret_str)
except:
raise SearpcError('Invalid response format')
if 'err_code' in dicts:
raise SearpcError(dicts['err_msg'])
if dicts['ret']:
return _SearpcObj(dicts['ret'])
else:
return None
def _fret_objlist(ret_str):
try:
dicts = json.loads(ret_str)
except:
raise SearpcError('Invalid response format')
if 'err_code' in dicts:
raise SearpcError(dicts['err_msg'])
l = []
if dicts['ret']:
for elt in dicts['ret']:
l.append(_SearpcObj(elt))
return l
def _fret_json(ret_str):
try:
dicts = json.loads(ret_str)
except:
raise SearpcError('Invalid response format')
if 'err_code' in dicts:
raise SearpcError(dicts['err_msg'])
if dicts['ret']:
return dicts['ret']
else:
return None
def searpc_func(ret_type, param_types):
def decorate(func):
if ret_type == "void":
fret = None
elif ret_type == "object":
fret = _fret_obj
elif ret_type == "objlist":
fret = _fret_objlist
elif ret_type == "int":
fret = _fret_int
elif ret_type == "int64":
fret = _fret_int
elif ret_type == "string":
fret = _fret_string
elif ret_type == "json":
fret = _fret_json
else:
raise SearpcError('Invial return type')
def newfunc(self, *args):
array = [func.__name__] + list(args)
fcall_str = json.dumps(array)
ret_str = self.call_remote_func_sync(fcall_str)
if fret:
return fret(ret_str)
return newfunc
return decorate
class SearpcClient(object):
def call_remote_func_sync(self, fcall_str):
raise NotImplementedError()
|
405214
|
from dataclasses import dataclass
from typing import List
import numpy as np
from paralleldomain.model.annotation.common import Annotation
from paralleldomain.utilities.mask import boolean_mask_by_value, boolean_mask_by_values, encode_int32_as_rgb8
@dataclass
class SemanticSegmentation2D(Annotation):
"""Represents a 2D Semantic Segmentation mask for a camera image.
Args:
class_ids: :attr:`~.SemanticSegmentation2D.class_ids`
Attributes:
class_ids: Matrix of shape `(H x W x 1)`, where `H` is height and `W` is width of corresponding camera image.
The third axis contains the class ID for each pixel as `int`.
"""
class_ids: np.ndarray
def get_class_mask(self, class_id: int) -> np.ndarray:
"""Returns a `bool` mask where class is present.
Args:
class_id: ID of class to be masked
Returns:
Mask of same shape as :py:attr:`~class_ids` and `bool` values.
`True` where pixel matches class, `False` where it doesn't.
"""
return boolean_mask_by_value(mask=self.class_ids, value=class_id)
def get_classes_mask(self, class_ids: List[int]) -> np.ndarray:
"""Returns a `bool` mask where classes are present.
Args:
class_ids: IDs of classes to be masked
Returns:
Mask of same shape as `class_ids` and `bool` values.
`True` where pixel matches one of the classes, `False` where it doesn't.
"""
return boolean_mask_by_values(mask=self.class_ids, values=class_ids)
@property
def rgb_encoded(self) -> np.ndarray:
"""Outputs :attr:`~.SemanticSegmentation.class_ids` mask as RGB-encoded image matrix with shape `(H x W x 3)`,
with `R` (index: 0) being the lowest and `B` (index: 2) being the highest 8 bit."""
return encode_int32_as_rgb8(mask=self.class_ids)
def __post_init__(self):
if len(self.class_ids.shape) != 3:
raise ValueError("Semantic Segmentation class_ids have to have shape (H x W x 1)")
if self.class_ids.dtype != int:
raise ValueError(
f"Semantic Segmentation class_ids has to contain only integers but has {self.class_ids.dtype}!"
)
if self.class_ids.shape[2] != 1:
raise ValueError("Semantic Segmentation class_ids has to have only 1 channel!")
def __sizeof__(self):
return self.class_ids.nbytes
|
405219
|
import json
from pyevt import abi, address, ecc, libevt
libevt.init_lib()
# Type and Structures
class BaseType:
def __init__(self, **kwargs):
self.kwargs = kwargs
def dict(self):
return self.kwargs
def dumps(self):
return json.dumps(self.kwargs)
class User:
def __init__(self):
self.pub_key, self.priv_key = ecc.generate_new_pair()
@staticmethod
def from_string(pub, priv):
user = User()
user.pub_key = ecc.PublicKey.from_string(pub)
user.priv_key = ecc.PrivateKey.from_string(priv)
return user
class AuthorizerRef:
def __init__(self, _type, key):
self.key = key
self.type = _type
def value(self):
return '[%s] %s' % (self.type, self.key)
class Receiver(BaseType):
def __init__(self, _type, key):
self.key = key
self.type = _type
def value(self):
return '%d,S#%d' % (self.precision, self.id)
class Address:
def __init__(self, from_string=None):
if from_string == None:
self.addr = address.Address.reserved()
else:
self.addr = address.Address.from_string(from_string)
def set_public_key(self, pub_key):
self.addr = address.Address.public_key(pub_key)
return self.addr
def set_generated(self, prefix, key, nonce):
self.addr = address.Address.generated(prefix, key, nonce)
return self.addr
def get_type(self):
return self.addr.get_type()
def __str__(self):
return self.addr.to_string()
class SymbolArgsErrorException(Exception):
def __init__(self):
err = 'Symobl_Args_Error'
super().__init__(self, err)
class Symbol:
def __init__(self, sym_name, sym_id, precision=5):
if precision > 17 or precision < 0:
raise SymbolArgsErrorException
if len(sym_name) > 7 or (not sym_name.isupper()):
raise SymbolArgsErrorException
self.name = sym_name
self.id = sym_id
self.precision = precision
def value(self):
return '%d,S#%d' % (self.precision, self.id)
def new_asset(symbol):
def value(num):
fmt = '%%.%df S#%d' % (symbol.precision, symbol.id)
return fmt % (num)
return value
EvtSymbol = Symbol(sym_name='EVT', sym_id=1, precision=5)
EvtAsset = new_asset(EvtSymbol)
class AuthorizerWeight(BaseType):
def __init__(self, ref, weight):
super().__init__(ref=ref.value(), weight=weight)
class PermissionDef(BaseType):
# name: permission_name
# threshold: uint32
# authorizers: authorizer_weight[]
def __init__(self, name, threshold, authorizers=[]):
super().__init__(name=name, threshold=threshold,
authorizers=[auth.dict() for auth in authorizers])
def add_authorizer(self, auth, weight):
self.kwargs['authorizers'].append(
AuthorizerWeight(auth, weight).dict())
# Special Type: group
class Node(BaseType):
def __init__(self, **kwargs):
super().__init__(**kwargs)
class NodeTypeException(Exception):
def __init__(self):
err = 'Group_Node_Type_Error'
super().__init__(self, err)
class NodeArgsExcetion(Exception):
def __init__(self):
err = 'Group_Node_Arguments_Error'
super().__init__(self, err)
def add_child(self, Node):
if 'key' in self.kwargs: # Leaf Node
raise NodeTypeException
if 'nodes' not in self.kwargs: # Error in parent node
raise NodeArgsException
self.kwargs['nodes'].append(Node.dict())
class RootNode(Node):
def __init__(self, threshold, nodes):
super().__init__(threshold=threshold,
nodes=[node.dict() for node in nodes])
class NonLeafNode(Node):
def __init__(self, threshold, weight, nodes):
super().__init__(threshold=threshold,
weight=weight,
nodes=[node.dict() for node in nodes])
class LeafNode(Node):
def __init__(self, key, weight):
super().__init__(key=key, weight=weight)
class Group(BaseType):
def __init__(self, name, key, root):
super().__init__(name=name, key=key, root=root)
# Abi jsons of Actions
class NewDomainAbi(BaseType):
def __init__(self, name, creator, issue, transfer, manage):
super().__init__(name=name,
creator=creator,
issue=issue.dict(),
transfer=transfer.dict(),
manage=manage.dict())
class UpdateDomainAbi(BaseType):
def __init__(self, name, issue, transfer, manage):
super().__init__(name=name,
issue=None if issue == None else issue.dict(),
transfer=None if transfer == None else transfer.dict(),
manage=None if manage == None else manage.dict())
class IssueTokenAbi(BaseType):
def __init__(self, domain, names, owner):
super().__init__(domain=domain,
names=names,
owner=owner)
class TransferAbi(BaseType):
def __init__(self, domain, name, to, memo):
super().__init__(domain=domain,
name=name,
to=to,
memo=memo)
class DestroyTokenAbi(BaseType):
def __init__(self, domain, name):
super().__init__(domain=domain,
name=name)
class NewGroupAbi(BaseType):
def __init__(self, name, group):
super().__init__(name=name,
group=group)
class UpdateGroupAbi(BaseType):
def __init__(self, name, group):
super().__init__(name=name,
group=group)
class AddMetaAbi(BaseType):
def __init__(self, key, value, creator):
super().__init__(key=key, value=value, creator=creator)
class NewFungibleAbi(BaseType):
def __init__(self, name, sym_name, sym, creator, issue, manage, total_supply):
super().__init__(name=name, sym_name=sym_name, sym=sym, creator=creator,
issue=issue.dict(), manage=manage.dict(), total_supply=total_supply)
class UpdFungibleAbi(BaseType):
def __init__(self, sym_id, issue, manage):
super().__init__(sym_id=sym_id,
issue=None if issue == None else issue.dict(),
manage=None if manage == None else manage.dict())
class IssueFungibleAbi(BaseType):
def __init__(self, address, number, memo):
super().__init__(address=address, number=number, memo=memo)
class TransferFtAbi(BaseType):
def __init__(self, _from, to, number, memo):
args = {'from': _from, 'to': to, 'number': number, 'memo': memo}
super().__init__(**args)
class EVT2PEVTAbi(BaseType):
def __init__(self, _from, to, number, memo):
args = {'from': _from, 'to': to, 'number': number, 'memo': memo}
super().__init__(**args)
class NewSuspendAbi(BaseType):
def __init__(self, name, proposer, trx):
super().__init__(name=name, proposer=proposer, trx=trx)
class AprvSuspendAbi(BaseType):
def __init__(self, name, signatures):
super().__init__(name=name, signatures=signatures)
class CancelSuspendAbi(BaseType):
def __init__(self, name):
super().__init__(name=name)
class ExecSuspendAbi(BaseType):
def __init__(self, name, executor):
super().__init__(name=name, executor=executor)
class EveripassAbi(BaseType):
def __init__(self, link):
super().__init__(link=link)
class EveripayAbi(BaseType):
def __init__(self,payee, number, link):
super().__init__(payee=payee, number=number, link=link)
class ProdvoteAbi(BaseType):
def __init__(self, producer, key, value):
super().__init__(producer=producer, key=key, value=value)
|
405250
|
from datetime import datetime
from elasticsearch import ConflictError
from ocd_backend import celery_app
from ocd_backend import settings
from ocd_backend.es import elasticsearch
from ocd_backend.exceptions import ConfigurationError
from ocd_backend.log import get_source_logger
from ocd_backend.mixins import (OCDBackendTaskSuccessMixin,
OCDBackendTaskFailureMixin)
log = get_source_logger('loader')
class BaseLoader(OCDBackendTaskSuccessMixin, OCDBackendTaskFailureMixin,
celery_app.Task):
"""The base class that other loaders should inherit."""
def run(self, *args, **kwargs):
"""Start loading of a single item.
This method is called by the transformer and expects args to
contain the output of the transformer as a tuple.
Kwargs should contain the ``source_definition`` dict.
:param item:
:param source_definition: The configuration of a single source in
the form of a dictionary (as defined in the settings).
:type source_definition: dict.
:returns: the output of :py:meth:`~BaseTransformer.transform_item`
"""
self.source_definition = kwargs['source_definition']
object_id, combined_index_doc, doc = args[0]
# Add the 'processing.finished' datetime to the documents
finished = datetime.now()
combined_index_doc['meta']['processing_finished'] = finished
doc['meta']['processing_finished'] = finished
return self.load_item(object_id, combined_index_doc, doc)
def load_item(self, object_id, combined_index_doc, doc):
raise NotImplemented
class ElasticsearchLoader(BaseLoader):
"""Indexes items into Elasticsearch.
Each item is added to two indexes: a 'combined' index that contains
items from different sources, and an index that only contains items
of the same source as the item.
Each URL found in ``media_urls`` is added as a document to the
``RESOLVER_URL_INDEX`` (if it doesn't already exist).
"""
def run(self, *args, **kwargs):
self.current_index_name = kwargs.get('current_index_name')
self.index_name = kwargs.get('new_index_name')
self.alias = kwargs.get('index_alias')
if not self.index_name:
raise ConfigurationError('The name of the index is not provided')
return super(ElasticsearchLoader, self).run(*args, **kwargs)
def load_item(self, object_id, combined_index_doc, doc):
log.info('Indexing documents...')
elasticsearch.index(index=settings.COMBINED_INDEX, doc_type='item',
id=object_id, body=combined_index_doc)
# Index documents into new index
elasticsearch.index(index=self.index_name, doc_type='item', body=doc,
id=object_id)
m_url_content_types = {}
if 'media_urls' in doc['enrichments']:
for media_url in doc['enrichments']['media_urls']:
if 'content_type' in media_url:
m_url_content_types[media_url['original_url']] = \
media_url['content_type']
# For each media_urls.url, add a resolver document to the
# RESOLVER_URL_INDEX
if 'media_urls' in doc:
for media_url in doc['media_urls']:
url_hash = media_url['url'].split('/')[-1]
url_doc = {
'original_url': media_url['original_url']
}
if media_url['original_url'] in m_url_content_types:
url_doc['content_type'] = \
m_url_content_types[media_url['original_url']]
try:
elasticsearch.create(index=settings.RESOLVER_URL_INDEX,
doc_type='url', id=url_hash,
body=url_doc)
except ConflictError:
log.debug('Resolver document %s already exists' % url_hash)
class DummyLoader(BaseLoader):
"""
Prints the item to the console, for debugging purposes.
"""
def load_item(self, object_id, combined_index_doc, doc, transformer_task_id):
print '=' * 50
print '%s %s %s' % ('=' * 4, object_id, '=' * 4)
print '%s %s %s' % ('-' * 20, 'combined', '-' * 20)
print combined_index_doc
print '%s %s %s' % ('-' * 20, 'doc', '-' * 25)
print doc
print '=' * 50
def run_finished(self, run_identifier):
print '*' * 50
print
print 'Finished run {}'.format(run_identifier)
print
print '*' * 50
|
405255
|
from flask import Blueprint
from flask_restx import Api
from orca.api.apis.v1 import graph as graph_ns
from orca.api.apis.v1 import ingestor as ingestor_ns
from orca.api.apis.v1 import alerts as alerts_ns
def initialize(graph):
blueprint = Blueprint('api', __name__, url_prefix='/v1')
api = Api(blueprint, title='OpenRCA API')
api.add_namespace(graph_ns.initialize(graph))
api.add_namespace(ingestor_ns.initialize(graph))
api.add_namespace(alerts_ns.initialize(graph))
return blueprint
|
405277
|
import unittest
from tests.utils import TestHarness, tasks, TaskErrorException
@tasks.bind()
def hello_workflow(first_name='Jane', last_name='Doe'):
return f'Hello {first_name} {last_name}'
@tasks.bind()
def pass_into_multi_arg_workflow(initial_return, *args, **kwargs):
return tasks.send(pass_through, *initial_return) \
.continue_with(multi_arg_workflow, *args, **kwargs)
@tasks.bind()
def pass_through(*args):
return args
@tasks.bind()
def multi_arg_workflow(a, b, c, d):
return ','.join([a, b, c, d])
@tasks.bind()
def multi_arg_workflow_with_some_defaults(a, b, c='c', d='d'):
return ','.join([a, b, c, d])
class ArgsAndKwargsTests(unittest.TestCase):
def setUp(self) -> None:
self.test_harness = TestHarness()
def test_passing_all_args(self):
pipeline = tasks.send(multi_arg_workflow, 'a', 'b', 'c', 'd')
result = self.test_harness.run_pipeline(pipeline)
self.assertEqual(result, 'a,b,c,d')
def test_passing_all_args_as_kwargs(self):
pipeline = tasks.send(multi_arg_workflow, a='a', b='b', c='c', d='d')
result = self.test_harness.run_pipeline(pipeline)
self.assertEqual(result, 'a,b,c,d')
def test_passing_mix_of_args_and_kwargs(self):
pipeline = tasks.send(multi_arg_workflow, 'a', 'b', c='c', d='d')
result = self.test_harness.run_pipeline(pipeline)
self.assertEqual(result, 'a,b,c,d')
def test_specifying_default_args_as_kwargs(self):
pipeline = tasks.send(hello_workflow, first_name='John', last_name='Smith')
result = self.test_harness.run_pipeline(pipeline)
self.assertEqual(result, 'Hello <NAME>')
def test_specifying_default_args_as_args(self):
pipeline = tasks.send(hello_workflow, 'John', 'Smith')
result = self.test_harness.run_pipeline(pipeline)
self.assertEqual(result, 'Hello <NAME>')
def test_specifying_single_default_arg(self):
pipeline = tasks.send(hello_workflow, last_name='Smith')
result = self.test_harness.run_pipeline(pipeline)
self.assertEqual(result, 'Hello <NAME>')
def test_multi_part_workflow_with_all_args_from_first_task_return(self):
pipeline = tasks.send(pass_into_multi_arg_workflow, ('a', 'b', 'c', 'd'))
result = self.test_harness.run_pipeline(pipeline)
self.assertEqual(result, 'a,b,c,d')
def test_multi_part_workflow_with_middle_args_from_first_task_return(self):
pipeline = tasks.send(pass_into_multi_arg_workflow, ('b', 'c'), a='a', d='d')
result = self.test_harness.run_pipeline(pipeline)
self.assertEqual(result, 'a,b,c,d')
def test_multi_part_workflow_with_additional_args_for_second_task_passed_as_args(self):
pipeline = tasks.send(pass_into_multi_arg_workflow, ('a', 'b'), 'c', 'd')
result = self.test_harness.run_pipeline(pipeline)
self.assertEqual(result, 'a,b,c,d')
def test_omitting_only_default_args(self):
pipeline = tasks.send(multi_arg_workflow_with_some_defaults, 'a', 'b')
result = self.test_harness.run_pipeline(pipeline)
self.assertEqual(result, 'a,b,c,d')
def test_providing_mandatory_plus_one_default_arg(self):
pipeline = tasks.send(multi_arg_workflow_with_some_defaults, 'a', 'b', d='d')
result = self.test_harness.run_pipeline(pipeline)
self.assertEqual(result, 'a,b,c,d')
def test_providing_mandatory_and_default_args(self):
pipeline = tasks.send(multi_arg_workflow_with_some_defaults, 'a', 'b', c='c', d='d')
result = self.test_harness.run_pipeline(pipeline)
self.assertEqual(result, 'a,b,c,d')
def test_not_providing_mandatory_args(self):
pipeline = tasks.send(multi_arg_workflow_with_some_defaults, 'a', c='c', d='d')
try:
self.test_harness.run_pipeline(pipeline)
except TaskErrorException as e:
self.assertEqual(e.task_error.message, 'Not enough args supplied')
else:
self.fail('Expected an exception')
if __name__ == '__main__':
unittest.main()
|
405287
|
from pprint import pprint
from pynso.client import NSOClient
from pynso.datastores import DatastoreType
# Setup a client
client = NSOClient('10.159.91.14', 'admin', 'admin')
# Get information about the API
print('Getting API version number')
pprint(client.info()['version'])
# Get the information about the running datastore
print('Getting the contents of the running datastore')
pprint(client.get_datastore(DatastoreType.RUNNING))
# Get a data path
print('Getting a specific data path: snmp:snmp namespace and the agent data object')
pprint(client.get_data(DatastoreType.RUNNING, ('snmp:snmp', 'agent')))
|
405345
|
import tensorflow as tf
import numpy as np
import os
import pandas as pd
import matplotlib.pyplot as plt
MODEL_NAME = ""
CLIP = True # if your model was trained with np.clip to clip values
CLIP_VAL = 10 # if above, what was the value +/-
model = tf.keras.models.load_model(MODEL_NAME)
VALDIR = 'validation_data'
ACTIONS = ['left','none','right']
PRED_BATCH = 32
def get_val_data(valdir, action, batch_size):
argmax_dict = {0: 0, 1: 0, 2: 0}
raw_pred_dict = {0: 0, 1: 0, 2: 0}
action_dir = os.path.join(valdir, action)
for session_file in os.listdir(action_dir):
filepath = os.path.join(action_dir,session_file)
if CLIP:
data = np.clip(np.load(filepath), -CLIP_VAL, CLIP_VAL) / CLIP_VAL
else:
data = np.load(filepath)
preds = model.predict([data.reshape(-1, 16, 60)], batch_size=batch_size)
for pred in preds:
argmax = np.argmax(pred)
argmax_dict[argmax] += 1
for idx,value in enumerate(pred):
raw_pred_dict[idx] += value
argmax_pct_dict = {}
for i in argmax_dict:
total = 0
correct = argmax_dict[i]
for ii in argmax_dict:
total += argmax_dict[ii]
argmax_pct_dict[i] = round(correct/total, 3)
return argmax_dict, raw_pred_dict, argmax_pct_dict
def make_conf_mat(left, none, right):
action_dict = {"left": left, "none": none, "right": right}
action_conf_mat = pd.DataFrame(action_dict)
actions = [i for i in action_dict]
fig = plt.figure()
ax = fig.add_subplot(111)
ax.matshow(action_conf_mat, cmap=plt.cm.RdYlGn)
ax.set_xticklabels([""]+actions)
ax.set_yticklabels([""]+actions)
print("__________")
print(action_dict)
for idx, i in enumerate(action_dict):
print('tf',i)
for idx2, ii in enumerate(action_dict[i]):
print(i, ii)
print(action_dict[i][ii])
ax.text(idx, idx2, f"{round(float(action_dict[i][ii]),2)}", va='center', ha='center')
plt.title("Action Thought")
plt.ylabel("Predicted Action")
plt.show()
left_argmax_dict, left_raw_pred_dict, left_argmax_pct_dict = get_val_data(VALDIR, "left", PRED_BATCH)
none_argmax_dict, none_raw_pred_dict, none_argmax_pct_dict = get_val_data(VALDIR, "none", PRED_BATCH)
right_argmax_dict, right_raw_pred_dict, right_argmax_pct_dict = get_val_data(VALDIR, "right", PRED_BATCH)
make_conf_mat(left_argmax_pct_dict, none_argmax_pct_dict, right_argmax_pct_dict)
|
405372
|
import FWCore.ParameterSet.Config as cms
# adapt the L1TMonitor_cff configuration to offline DQM
# DQM online L1 Trigger modules
from DQM.L1TMonitor.L1TMonitor_cff import *
# DTTF to offline configuration
l1tDttf.online = False
# input tag for BXTimining
bxTiming.FedSource = 'rawDataCollector'
|
405384
|
from django.conf.urls import url
from kitsune.motidings import views
# Note: This overrides the tidings tidings.unsubscribe url pattern, so
# we need to keep the name exactly as it is.
urlpatterns = [
url(r"^unsubscribe/(?P<watch_id>\d+)$", views.unsubscribe, name="tidings.unsubscribe")
]
|
405395
|
import websockets
import asyncio
class WSserver():
async def handle(self,websocket,path):
recv_msg = await websocket.recv()
print("i received %s" %recv_msg)
await websocket.send('server send ok')
def run(self):
ser = websockets.serve(self.handle,"192.168.43.51","1111")
asyncio.get_event_loop().run_until_complete(ser)
asyncio.get_event_loop().run_forever()
ws = WSserver()
ws.run()
|
405428
|
import featuretools as ft
import pandas as pd
import os
from tqdm import tqdm
def make_user_sample(orders, order_products, departments, products, user_ids, out_dir):
orders_sample = orders[orders["user_id"].isin(user_ids)]
orders_keep = orders_sample["order_id"].values
order_products_sample = order_products[order_products["order_id"].isin(orders_keep)]
try:
os.mkdir(out_dir)
except:
pass
order_products_sample.to_csv(os.path.join(out_dir, "order_products__prior.csv"), index=None)
orders_sample.to_csv(os.path.join(out_dir, "orders.csv"), index=None)
departments.to_csv(os.path.join(out_dir, "departments.csv"), index=None)
products.to_csv(os.path.join(out_dir, "products.csv"), index=None)
def main():
data_dir = "data"
order_products = pd.concat([pd.read_csv(os.path.join(data_dir,"order_products__prior.csv")),
pd.read_csv(os.path.join(data_dir, "order_products__train.csv"))])
orders = pd.read_csv(os.path.join(data_dir, "orders.csv"))
departments = pd.read_csv(os.path.join(data_dir, "departments.csv"))
products = pd.read_csv(os.path.join(data_dir, "products.csv"))
users_unique = orders["user_id"].unique()
chunksize = 1000
part_num = 0
partition_dir = "partitioned_data"
try:
os.mkdir(partition_dir)
except:
pass
for i in tqdm(range(0, len(users_unique), chunksize)):
users_keep = users_unique[i: i+chunksize]
make_user_sample(orders, order_products, departments, products, users_keep, os.path.join(partition_dir, "part_%d" % part_num))
part_num += 1
if __name__ == "__main__":
main()
|
405432
|
import torch
# Creating the graph
x = torch.tensor(1.0, requires_grad = True)
y = torch.tensor(2.0)
z = x * y
# Displaying
for i, name in zip([x, y, z], "xyz"):
print(f"{name}\ndata: {i.data}\nrequires_grad: {i.requires_grad}\n\
grad: {i.grad}\ngrad_fn: {i.grad_fn}\nis_leaf: {i.is_leaf}\n")
|
405445
|
import caffe
import numpy as np
import os
import sys
import lang_seg_model as segmodel
from util.processing_tools import *
import train_config
def train(config):
with open('./lang_seg_model/proto_train.prototxt', 'w') as f:
f.write(str(segmodel.generate_model('train', config)))
caffe.set_device(config.gpu_id)
caffe.set_mode_gpu()
solver = caffe.get_solver('./lang_seg_model/solver.prototxt')
if config.weights is not None:
solver.net.copy_from(config.weights)
cls_loss_avg = 0.0
avg_accuracy_all, avg_accuracy_pos, avg_accuracy_neg = 0.0, 0.0, 0.0
decay = 0.99
for it in range(config.max_iter):
solver.step(1)
cls_loss_val = solver.net.blobs['loss'].data
scores_val = solver.net.blobs['upscores'].data.copy()
label_val = solver.net.blobs['label'].data.copy()
cls_loss_avg = decay*cls_loss_avg + (1-decay)*cls_loss_val
print('\titer = %d, cls_loss (cur) = %f, cls_loss (avg) = %f'
% (it, cls_loss_val, cls_loss_avg))
# Accuracy
accuracy_all, accuracy_pos, accuracy_neg = compute_accuracy(scores_val, label_val)
avg_accuracy_all = decay*avg_accuracy_all + (1-decay)*accuracy_all
avg_accuracy_pos = decay*avg_accuracy_pos + (1-decay)*accuracy_pos
avg_accuracy_neg = decay*avg_accuracy_neg + (1-decay)*accuracy_neg
print('\titer = %d, accuracy (cur) = %f (all), %f (pos), %f (neg)'
% (it, accuracy_all, accuracy_pos, accuracy_neg))
print('\titer = %d, accuracy (avg) = %f (all), %f (pos), %f (neg)'
% (it, avg_accuracy_all, avg_accuracy_pos, avg_accuracy_neg))
if __name__ == '__main__':
config = train_config.Config()
train(config)
|
405453
|
import sys
import numpy as np
from data.format import Events
class VoxelGrid:
def __init__(self, num_bins: int=5, width: int=640, height: int=480, upsample_rate: int=1):
assert num_bins > 1
assert height > 0
assert width > 0
self.num_bins = num_bins
self.width = width
self.height = height
self.upsample_rate = upsample_rate
def event_slicer(self, events: Events, t_reconstruction: int):
assert np.max(events.t) <=t_reconstruction
sliced_events = []
t_start = events.t[0]
t_end = t_reconstruction
window_time = (t_end - t_start + 1)//self.upsample_rate
indices = [0]
max_idx = len(events.t) - 1
for i in range(1, self.upsample_rate):
idx = min(np.searchsorted(events.t, i*window_time+t_start, side='right'), max_idx)
indices.append(idx)
indices.append(len(events.t)-1) # Add the last time timestamp
max_event_time_in_event_slice = []
for i in range(0, self.upsample_rate):
if indices[i+1] <= indices[i]:
assert indices[i+1] == indices[i]
sliced_events.append(None)
continue
ts = events.t[indices[i]:indices[i+1]]
sliced_events.append( Events(events.x[indices[i]:indices[i+1]],
events.y[indices[i]:indices[i+1]],
events.p[indices[i]:indices[i+1]],
ts))
return sliced_events
def convert_to_event_array(self, events: Events):
ts = events.t
event_array = np.stack((
np.asarray(ts, dtype="int64"),
np.asarray(events.x, dtype="float32"),
np.asarray(events.y, dtype="float32"),
np.asarray(events.p, dtype="float32"))).T
return event_array
def events_to_voxel_grid(self, events: Events):
"""
Build a voxel grid with bilinear interpolation in the time domain from a set of events.
:param events: a [N x 4] NumPy array containing one event per row in the form: [timestamp, x, y, polarity]
:param num_bins: number of bins in the temporal axis of the voxel grid
:param width, height: dimensions of the voxel grid
"""
event_array = self.convert_to_event_array(events)
assert(event_array.shape[1] == 4)
voxel_grid = np.zeros((self.num_bins, self.height, self.width), np.float32).ravel()
# normalize the event timestamps so that they lie between 0 and num_bins
last_stamp = event_array[-1, 0]
first_stamp = event_array[0, 0]
deltaT = last_stamp - first_stamp
if deltaT == 0:
deltaT = 1.0
event_array[:, 0] = (self.num_bins - 1) * (event_array[:, 0] - first_stamp) / deltaT
ts = event_array[:, 0]
xs = event_array[:, 1].astype(np.int)
ys = event_array[:, 2].astype(np.int)
pols = event_array[:, 3]
pols[pols == 0] = -1 # polarity should be +1 / -1
tis = ts.astype(np.int)
dts = ts - tis
vals_left = pols * (1.0 - dts)
vals_right = pols * dts
valid_indices = tis < self.num_bins
np.add.at(voxel_grid, xs[valid_indices] + ys[valid_indices] * self.width +
tis[valid_indices] * self.width * self.height, vals_left[valid_indices])
valid_indices = (tis + 1) < self.num_bins
np.add.at(voxel_grid, xs[valid_indices] + ys[valid_indices] * self.width +
(tis[valid_indices] + 1) * self.width * self.height, vals_right[valid_indices])
voxel_grid = np.reshape(voxel_grid, (self.num_bins, self.height, self.width))
return voxel_grid, last_stamp
def normalize_voxel(self, voxel_grid, normalize=True):
if normalize:
mask = np.nonzero(voxel_grid)
if mask[0].size > 0:
mean, stddev = voxel_grid[mask].mean(), voxel_grid[mask].std()
if stddev > 0:
voxel_grid[mask] = (voxel_grid[mask] - mean) / stddev
return voxel_grid
if __name__ == '__main__':
events = Events(
x=np.array([0, 1, 5, 3, 4 ,7], dtype='uint16'),
y=np.array([1, 2, 4, 3, 4, 1], dtype='uint16'),
p=np.array([0, 0, 1, 1, 0, 1], dtype='uint8'),
t=np.array([5, 9, 11, 17, 27, 30], dtype='int64'),
width=8,
height=5,
t_reconstruction=31)
grid_repr = VoxelGrid(5, events.width, events.height, upsample_rate=2)
sliced_events = grid_repr.event_slicer(events)
voxel_grid = []
for i in range(len(sliced_events)):
voxel_grid.append(grid_repr.events_to_voxel_grid(sliced_events[i]))
|
405546
|
import copy
from unittest import mock
import graphene
import pytest
from ....plugins.error_codes import PluginErrorCode
from ....plugins.manager import get_plugins_manager
from ....plugins.models import PluginConfiguration
from ....plugins.tests.sample_plugins import ChannelPluginSample, PluginSample
from ....plugins.tests.utils import get_config_value
from ...tests.utils import assert_no_permission, get_graphql_content
PLUGIN_UPDATE_MUTATION = """
mutation pluginUpdate(
$id: ID!
$active: Boolean
$channel: ID
$configuration: [ConfigurationItemInput!]
) {
pluginUpdate(
id: $id
channelId: $channel
input: { active: $active, configuration: $configuration }
) {
plugin {
name
description
globalConfiguration{
active
configuration{
name
value
helpText
type
label
}
channel{
id
slug
}
}
channelConfigurations{
active
channel{
id
slug
}
configuration{
name
value
helpText
type
label
}
}
}
errors {
field
message
}
pluginsErrors {
field
code
}
}
}
"""
@pytest.mark.parametrize(
"active, updated_configuration_item",
[
(True, {"name": "Username", "value": "user"}),
(False, {"name": "Username", "value": "<EMAIL>"}),
],
)
def test_plugin_configuration_update(
staff_api_client_can_manage_plugins, settings, active, updated_configuration_item
):
settings.PLUGINS = ["saleor.plugins.tests.sample_plugins.PluginSample"]
manager = get_plugins_manager()
plugin = manager.get_plugin(PluginSample.PLUGIN_ID)
old_configuration = copy.deepcopy(plugin.configuration)
variables = {
"id": plugin.PLUGIN_ID,
"active": active,
"channel": None,
"configuration": [updated_configuration_item],
}
response = staff_api_client_can_manage_plugins.post_graphql(
PLUGIN_UPDATE_MUTATION, variables
)
content = get_graphql_content(response)
plugin_data = content["data"]["pluginUpdate"]["plugin"]
assert plugin_data["name"] == plugin.PLUGIN_NAME
assert plugin_data["description"] == plugin.PLUGIN_DESCRIPTION
plugin = PluginConfiguration.objects.get(identifier=PluginSample.PLUGIN_ID)
assert plugin.active == active
first_configuration_item = plugin.configuration[0]
assert first_configuration_item["name"] == updated_configuration_item["name"]
assert first_configuration_item["value"] == updated_configuration_item["value"]
second_configuration_item = plugin.configuration[1]
assert second_configuration_item["name"] == old_configuration[1]["name"]
assert second_configuration_item["value"] == old_configuration[1]["value"]
configuration = plugin_data["globalConfiguration"]["configuration"]
assert configuration is not None
assert configuration[0]["name"] == updated_configuration_item["name"]
assert configuration[0]["value"] == updated_configuration_item["value"]
@pytest.mark.parametrize(
"active",
[
True,
False,
],
)
def test_plugin_configuration_update_for_channel_configurations(
staff_api_client_can_manage_plugins, settings, active, channel_PLN
):
settings.PLUGINS = ["saleor.plugins.tests.sample_plugins.ChannelPluginSample"]
manager = get_plugins_manager()
plugin = manager.get_plugin(
ChannelPluginSample.PLUGIN_ID, channel_slug=channel_PLN.slug
)
variables = {
"id": plugin.PLUGIN_ID,
"active": active,
"channel": graphene.Node.to_global_id("Channel", channel_PLN.id),
"configuration": [{"name": "input-per-channel", "value": "update-value"}],
}
response = staff_api_client_can_manage_plugins.post_graphql(
PLUGIN_UPDATE_MUTATION, variables
)
content = get_graphql_content(response)
plugin_data = content["data"]["pluginUpdate"]["plugin"]
assert plugin_data["name"] == plugin.PLUGIN_NAME
assert plugin_data["description"] == plugin.PLUGIN_DESCRIPTION
assert len(plugin_data["channelConfigurations"]) == 1
api_configuration = plugin_data["channelConfigurations"][0]
plugin = PluginConfiguration.objects.get(identifier=ChannelPluginSample.PLUGIN_ID)
assert plugin.active == active == api_configuration["active"]
configuration_item = plugin.configuration[0]
assert configuration_item["name"] == "input-per-channel"
assert configuration_item["value"] == "update-value"
configuration = api_configuration["configuration"]
assert len(configuration) == 1
assert configuration[0]["name"] == configuration_item["name"]
assert configuration[0]["value"] == configuration_item["value"]
def test_plugin_configuration_update_channel_slug_required(
staff_api_client_can_manage_plugins, settings, channel_PLN
):
settings.PLUGINS = ["saleor.plugins.tests.sample_plugins.ChannelPluginSample"]
manager = get_plugins_manager()
plugin = manager.get_plugin(
ChannelPluginSample.PLUGIN_ID, channel_slug=channel_PLN.slug
)
variables = {
"id": plugin.PLUGIN_ID,
"active": True,
"channel": None,
"configuration": [{"name": "input-per-channel", "value": "update-value"}],
}
response = staff_api_client_can_manage_plugins.post_graphql(
PLUGIN_UPDATE_MUTATION, variables
)
content = get_graphql_content(response)
assert not content["data"]["pluginUpdate"]["plugin"]
assert len(content["data"]["pluginUpdate"]["pluginsErrors"]) == 1
error = content["data"]["pluginUpdate"]["pluginsErrors"][0]
assert error["field"] == "id"
assert error["code"] == PluginErrorCode.NOT_FOUND.name
def test_plugin_configuration_update_unneeded_channel_slug(
staff_api_client_can_manage_plugins, settings, channel_PLN
):
settings.PLUGINS = ["saleor.plugins.tests.sample_plugins.PluginSample"]
manager = get_plugins_manager()
plugin = manager.get_plugin(PluginSample.PLUGIN_ID, channel_slug=channel_PLN.slug)
variables = {
"id": plugin.PLUGIN_ID,
"active": True,
"channel": graphene.Node.to_global_id("Channel", channel_PLN.id),
"configuration": [{"name": "input-per-channel", "value": "update-value"}],
}
response = staff_api_client_can_manage_plugins.post_graphql(
PLUGIN_UPDATE_MUTATION, variables
)
content = get_graphql_content(response)
assert not content["data"]["pluginUpdate"]["plugin"]
assert len(content["data"]["pluginUpdate"]["pluginsErrors"]) == 1
error = content["data"]["pluginUpdate"]["pluginsErrors"][0]
assert error["field"] == "id"
assert error["code"] == PluginErrorCode.INVALID.name
def test_plugin_configuration_update_containing_invalid_plugin_id(
staff_api_client_can_manage_plugins,
):
variables = {
"id": "fake-id",
"active": True,
"channel": None,
"configuration": [{"name": "Username", "value": "user"}],
}
response = staff_api_client_can_manage_plugins.post_graphql(
PLUGIN_UPDATE_MUTATION, variables
)
content = get_graphql_content(response)
assert content["data"]["pluginUpdate"]["pluginsErrors"][0] == {
"field": "id",
"code": PluginErrorCode.NOT_FOUND.name,
}
def test_plugin_update_saves_boolean_as_boolean(
staff_api_client_can_manage_plugins, settings
):
settings.PLUGINS = ["saleor.plugins.tests.sample_plugins.PluginSample"]
manager = get_plugins_manager()
plugin = manager.get_plugin(PluginSample.PLUGIN_ID)
use_sandbox = get_config_value("Use sandbox", plugin.configuration)
variables = {
"id": plugin.PLUGIN_ID,
"active": plugin.active,
"channel": None,
"configuration": [{"name": "Use sandbox", "value": True}],
}
response = staff_api_client_can_manage_plugins.post_graphql(
PLUGIN_UPDATE_MUTATION, variables
)
content = get_graphql_content(response)
assert len(content["data"]["pluginUpdate"]["errors"]) == 0
use_sandbox_new_value = get_config_value("Use sandbox", plugin.configuration)
assert type(use_sandbox) == type(use_sandbox_new_value)
def test_plugin_configuration_update_as_customer_user(user_api_client, settings):
settings.PLUGINS = ["saleor.plugins.tests.sample_plugins.PluginSample"]
manager = get_plugins_manager()
plugin = manager.get_plugin(PluginSample.PLUGIN_ID)
variables = {
"id": plugin.PLUGIN_ID,
"active": True,
"channel": None,
"configuration": [{"name": "Username", "value": "user"}],
}
response = user_api_client.post_graphql(PLUGIN_UPDATE_MUTATION, variables)
assert_no_permission(response)
def test_cannot_update_configuration_hidden_plugin(
settings, staff_api_client_can_manage_plugins
):
"""Ensure one cannot edit the configuration of hidden plugins"""
client = staff_api_client_can_manage_plugins
settings.PLUGINS = ["saleor.plugins.tests.sample_plugins.PluginSample"]
plugin_id = PluginSample.PLUGIN_ID
original_config = get_plugins_manager().get_plugin(plugin_id).configuration
variables = {
"id": plugin_id,
"active": False,
"channel": None,
"configuration": [{"name": "Username", "value": "MyNewUsername"}],
}
# Attempt to update hidden plugin, should error with object not found
with mock.patch.object(PluginSample, "HIDDEN", new=True):
response = client.post_graphql(PLUGIN_UPDATE_MUTATION, variables)
assert response.status_code == 200
content = get_graphql_content(response)
assert content["data"]["pluginUpdate"]["pluginsErrors"] == [
{"code": "NOT_FOUND", "field": "id"}
]
# Hidden plugin should be untouched
plugin = get_plugins_manager().get_plugin(plugin_id)
assert plugin.active is True
assert plugin.configuration == original_config
# Ensure the plugin was modifiable if not hidden
response = client.post_graphql(PLUGIN_UPDATE_MUTATION, variables)
assert response.status_code == 200
content = get_graphql_content(response)
assert content["data"]["pluginUpdate"]["pluginsErrors"] == []
plugin = get_plugins_manager().get_plugin(plugin_id)
assert plugin.active is False
assert plugin.configuration != original_config
def test_cannot_update_configuration_hidden_multi_channel_plugin(
settings,
staff_api_client_can_manage_plugins,
channel_USD,
):
"""Ensure one cannot edit the configuration of hidden multi channel plugins"""
client = staff_api_client_can_manage_plugins
settings.PLUGINS = ["saleor.plugins.tests.sample_plugins.ChannelPluginSample"]
plugin_id = ChannelPluginSample.PLUGIN_ID
original_config = (
get_plugins_manager()
.get_plugin(plugin_id, channel_slug=channel_USD.slug)
.configuration
)
variables = {
"id": plugin_id,
"active": False,
"channel": graphene.Node.to_global_id("Channel", channel_USD.id),
"configuration": [{"name": "input-per-channel", "value": "NewValue"}],
}
# Attempt to update hidden plugin, should error with object not found
with mock.patch.object(PluginSample, "HIDDEN", new=True):
response = client.post_graphql(PLUGIN_UPDATE_MUTATION, variables)
assert response.status_code == 200
content = get_graphql_content(response)
assert content["data"]["pluginUpdate"]["pluginsErrors"] == [
{"code": "NOT_FOUND", "field": "id"}
]
# Hidden plugin should be untouched
plugin = get_plugins_manager().get_plugin(plugin_id, channel_slug=channel_USD.slug)
assert plugin.active is True
assert plugin.configuration == original_config
# Ensure the plugin was modifiable if not hidden
response = client.post_graphql(PLUGIN_UPDATE_MUTATION, variables)
assert response.status_code == 200
content = get_graphql_content(response)
assert content["data"]["pluginUpdate"]["pluginsErrors"] == []
plugin = get_plugins_manager().get_plugin(plugin_id, channel_slug=channel_USD.slug)
assert plugin.active is False
assert plugin.configuration != original_config
|
405614
|
import logging
logger = logging.getLogger(__name__)
def discover():
"""
Auto-discover any Gutter configuration present in the django
INSTALLED_APPS.
"""
from django.conf import settings
from django.utils.importlib import import_module
for app in settings.INSTALLED_APPS:
module = '%s.gutter' % app
try:
import_module(module)
logger.info('Successfully autodiscovered %s' % module)
except:
pass
|
405661
|
from django.contrib import admin
from grandchallenge.publications.forms import PublicationForm
from grandchallenge.publications.models import Publication
class PublicationAdmin(admin.ModelAdmin):
list_display = [
"identifier",
"year",
"title",
"referenced_by_count",
"citation",
]
readonly_fields = [
"title",
"referenced_by_count",
"csl",
"ama_html",
"year",
"citation",
]
form = PublicationForm
search_fields = (
"title",
"year",
"identifier",
)
def get_readonly_fields(self, request, obj=None):
if obj:
return self.readonly_fields + ["identifier"]
else:
return self.readonly_fields
admin.site.register(Publication, PublicationAdmin)
|
405675
|
import snoop
@snoop
def main():
try:
@int
def foo():
pass
except:
pass
if __name__ == '__main__':
main()
|
405694
|
from pub import Pub
class RecordthresherPub(Pub):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._authors = None
@property
def authors(self):
return self._authors
@authors.setter
def authors(self, authors):
self._authors = authors
@property
def first_author_lastname(self):
if self.authors:
return self.authors[0].get('family')
return None
@property
def last_author_lastname(self):
if self.authors:
return self.authors[-1].get('family')
return None
def __repr__(self):
if self.id:
my_string = self.id
else:
my_string = self.best_title
return "<RecordthresherPub ( {} )>".format(my_string)
|
405733
|
import os
import fileinput
for f in os.listdir('.'):
if ".md" in f:
# Read in the file
with open(f, 'r') as file :
filedata = file.read()
# Replace the target string
filedata = filedata.replace('\n', ' \n')
filedata = filedata.replace(' ?', chr(160)+"?")
filedata = filedata.replace(' !', chr(160)+"!")
# Write the file out again
with open(f, 'w') as file:
file.write(filedata)
|
405741
|
from __future__ import division, print_function
import os
import shutil
import theano
import theano.tensor as T
from blocks.extensions.saveload import Checkpoint
from plat.sampling import grid_from_latents
from plat.grid_layout import create_chain_grid
from plat.fuel_helper import get_anchor_images
class SampleCheckpoint(Checkpoint):
def __init__(self, interface, z_dim, image_size, channels, dataset, split, save_subdir, **kwargs):
super(SampleCheckpoint, self).__init__(path=None, **kwargs)
self.interface = interface
self.image_size = image_size
self.channels = channels
self.save_subdir = save_subdir
self.iteration = 0
self.epoch_src = "{0}/sample.png".format(save_subdir)
self.rows=7
self.cols=10
self.spacing = 3
self.z_dim = z_dim
numanchors = 10 + 10
self.anchor_images = get_anchor_images(dataset, split, numanchors=numanchors, image_size=image_size, include_targets=False)
if not os.path.exists(self.save_subdir):
os.makedirs(self.save_subdir)
def do(self, callback_name, *args):
"""Sample the model and save images to disk
"""
dmodel = self.interface(model=self.main_loop.model)
anchors = dmodel.encode_images(self.anchor_images)
z = create_chain_grid(self.rows, self.cols, self.z_dim, self.spacing, anchors=anchors, spherical=True, gaussian=False)
grid_from_latents(z, dmodel, rows=self.rows, cols=self.cols, anchor_images=None, tight=False, shoulders=False, save_path=self.epoch_src, batch_size=12)
if os.path.exists(self.epoch_src):
epoch_dst = "{0}/epoch-{1:03d}.png".format(self.save_subdir, self.iteration)
self.iteration = self.iteration + 1
shutil.copy2(self.epoch_src, epoch_dst)
os.system("convert -delay 5 -loop 1 {0}/epoch-*.png {0}/training.gif".format(self.save_subdir))
|
405744
|
from __future__ import absolute_import
from . import caffe_pb2 as pb
import google.protobuf.text_format as text_format
import numpy as np
from .layer_parameter import LayerParameter
class _Net(object):
def __init__(self):
self.net = pb.NetParameter()
self.name_dict = {}
self.add_layer_set = True
def set_add_layer(self, add_layer_set):
self.add_layer_set = add_layer_set
def layer_index(self, layer_name):
for i, layer in enumerate(self.net.layer):
if layer.name == layer_name:
return i
return -1
def add_output(self, outputs):
for item in outputs:
self.net.output.append(item)
def add_layer(self, layer_params, before='', after=''):
if (not self.add_layer_set):
return
if (layer_params.layerName in self.name_dict):
print("[ERROR] layer %s duplicate" % (layer_params.layerName))
exit(-1)
else:
self.name_dict[layer_params.layerName] = 1
index = -1
if after != '':
index = self.layer_index(after) + 1
if before != '':
index = self.layer_index(before)
new_layer = pb.LayerParameter()
new_layer.CopyFrom(layer_params.layerParameter)
if index != -1:
self.net.layer.add()
for i in xrange(len(self.net.layer) - 1, index, -1):
self.net.layer[i].CopyFrom(self.net.layer[i - 1])
self.net.layer[index].CopyFrom(new_layer)
else:
self.net.layer.extend([new_layer])
def remove_layer_by_name(self, layer_name):
for i,layer in enumerate(self.net.layer):
if layer.name == layer_name:
del self.net.layer[i]
return
def get_layer_by_name(self, layer_name):
for layer in self.net.layer:
if layer.name == layer_name:
return layer
def save_prototxt(self, path):
prototxt = pb.NetParameter()
prototxt.CopyFrom(self.net)
for layer in prototxt.layer:
del layer.blobs[:]
with open(path,'w') as f:
f.write(text_format.MessageToString(prototxt))
def layer(self, layer_name):
return self.get_layer_by_name(layer_name)
def layers(self):
return list(self.net.layer)
class Prototxt(_Net):
def __init__(self, file_name=''):
super(Prototxt, self).__init__()
self.file_name = file_name
if file_name != '':
f = open(file_name,'r')
text_format.Parse(f.read(), self.net)
pass
def init_caffemodel(self,caffe_cmd_path='caffe'):
s = pb.SolverParameter()
s.train_net = self.file_name
s.max_iter = 0
s.base_lr = 1
s.solver_mode = pb.SolverParameter.CPU
s.snapshot_prefix = './nn'
with open('/tmp/nn_tools_solver.prototxt','w') as f:
f.write(str(s))
os.system('%s train --solver /tmp/nn_tools_solver.prototxt'%caffe_cmd_path)
class CaffeModel(_Net):
def __init__(self, file_name=''):
super(CaffeModel, self).__init__()
if file_name != '':
f = open(file_name,'rb')
self.net.ParseFromString(f.read())
f.close()
def save(self, path):
with open(path,'wb') as f:
f.write(self.net.SerializeToString())
def add_layer_with_data(self,layer_params,datas, before='', after=''):
self.add_layer(layer_params,before,after)
new_layer = self.layer(layer_params.name)
#process blobs
del new_layer.blobs[:]
for data in datas:
new_blob = new_layer.blobs.add()
for dim in data.shape:
new_blob.shape.dim.append(dim)
new_blob.data.extend(data.flatten().astype(float))
def get_layer_data(self, layer_name):
layer = self.layer(layer_name)
datas = []
for blob in layer.blobs:
shape = list(blob.shape.dim)
data = np.array(blob.data).reshape(shape)
datas.append(data)
return datas
def set_layer_data(self, layer_name, datas):
layer = self.layer(layer_name)
for blob,data in zip(layer.blobs,datas):
blob.data[:] = data.flatten()
pass
|
405763
|
import os
import sys
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import seaborn as sns
from os.path import join
from algorithms.utils import summaries_dir, experiment_dir, experiments_dir, ensure_dir_exists
from utils.utils import log
sns.set()
COLORS = ['blue', 'green', 'red', 'cyan', 'magenta', 'black', 'purple', 'pink',
'brown', 'orange', 'teal', 'coral', 'lightblue', 'lime', 'lavender', 'turquoise',
'darkgreen', 'tan', 'salmon', 'gold', 'lightpurple', 'darkred', 'darkblue', 'yellow']
class Experiment:
def __init__(self, name, descr):
self.name = name
self.descr = descr
class Plot:
def __init__(self, name, axis, descr):
self.name = name
self.axis = axis
self.descr = descr
def running_mean(x, n):
"""Courtesy of https://stackoverflow.com/a/27681394/1645784"""
cumsum = np.cumsum(np.insert(x, 0, 0))
n += 1
return (cumsum[n:] - cumsum[:-n]) / float(n)
def main():
"""Script entry point."""
stop_at = 80 * 1000 * 1000
prefix = 'simple'
# noinspection PyUnusedLocal
experiments_very_sparse = [
Experiment('doom_curious_vs_vanilla/doom_maze_very_sparse/doom_maze_very_sparse_pre_0.0', 'A2C (no curiosity)'),
Experiment('doom_sweep_very_sparse/doom_sweep_i_0.5_p_0.05', 'A2C+ICM (curious)'),
]
# noinspection PyUnusedLocal
experiments_sparse = [
Experiment('doom_curious_vs_vanilla/doom_maze_sparse/doom_maze_sparse_pre_0.0', 'A2C (no curiosity)'),
Experiment('doom_curious_vs_vanilla/doom_maze_sparse/doom_maze_sparse_pre_0.05', 'A2C+ICM (curious)'),
]
# noinspection PyUnusedLocal
experiments_basic = [
Experiment('doom_curious_vs_vanilla/doom_maze/doom_maze_pre_0.0', 'A2C (no curiosity)'),
Experiment('doom_curious_vs_vanilla/doom_maze/doom_maze_pre_0.05', 'A2C+ICM (curious)'),
]
experiments = [
Experiment('doom_curious_vs_vanilla/doom_basic/doom_basic_pre_0.0', 'A2C (no curiosity)'),
Experiment('doom_curious_vs_vanilla/doom_basic/doom_basic_pre_0.05', 'A2C+ICM (curious)'),
]
plots = [
Plot('a2c_aux_summary/avg_reward', 'average reward', 'Avg. reward for the last 1000 episodes'),
Plot(
'a2c_agent_summary/policy_entropy',
'policy entropy, nats',
'Stochastic policy entropy',
),
]
for plot in plots:
fig = plt.figure(figsize=(5, 4))
fig.add_subplot()
for ex_i, experiment in enumerate(experiments):
experiment_name = experiment.name.split(os.sep)[-1]
experiments_root = join(*(experiment.name.split(os.sep)[:-1]))
exp_dir = experiment_dir(experiment_name, experiments_root)
path_to_events_dir = summaries_dir(exp_dir)
events_files = []
for f in os.listdir(path_to_events_dir):
if f.startswith('events'):
events_files.append(join(path_to_events_dir, f))
if len(events_files) == 0:
log.error('No events file for %s', experiment)
continue
events_files = sorted(events_files)
steps, values = [], []
for events_file in events_files:
iterator = tf.train.summary_iterator(events_file)
while True:
try:
e = next(iterator, None)
except Exception as exc:
log.warning(str(exc))
break
if e is None:
break
for v in e.summary.value:
if e.step >= stop_at:
break
if v.tag == plot.name:
steps.append(e.step)
values.append(v.simple_value)
# just in case
values = np.nan_to_num(values)
smooth = 10
values_smooth = running_mean(values, smooth)
steps = steps[smooth:]
values = values[smooth:]
plt.plot(steps, values, color=COLORS[ex_i], alpha=0.2, label='__nolegend__')
plt.plot(steps, values_smooth, color=COLORS[ex_i], label=experiment.descr, linewidth=2)
plt.xlabel('environment steps')
plt.ylabel(plot.axis)
plt.title(plot.descr)
plt.grid(True)
plt.legend()
plt.tight_layout()
plots_dir = ensure_dir_exists(join(experiments_dir(), 'plots'))
plot_name = plot.name.replace('/', '_')
plt.savefig(join(plots_dir, f'{prefix}_{plot_name}.png'))
plt.close()
return 0
if __name__ == '__main__':
sys.exit(main())
|
405769
|
class Solution(object):
def findWords(self, words):
"""
:type words: List[str]
:rtype: List[str]
"""
First = ['Q','W','E','R','T','Y','U','I','O','P']
SEC = ['A','S','D','F','G','H','J','K','L']
LAST= ['Z','X','C','V','B','N','M']
answer = []
for word in words:
now = -1
answer_flag = True
for chr in word:
if (now == - 1):
if ((chr.upper()) in First):
now = 1
if ((chr.upper()) in SEC):
now = 2
if ((chr.upper()) in LAST):
now = 3
else:
flag = False
if ((chr.upper()) in First and now == 1):
flag = True
if ((chr.upper()) in SEC and now == 2):
flag = True
if ((chr.upper()) in LAST and now == 3):
flag = True
if (flag == False):
answer_flag = False
break
if (answer_flag):
answer.append(word)
return answer
|
405854
|
class Solution:
def shortestPalindrome(self, s: str) -> str:
temp = s + '#' + s[::-1]
i = 1
l = 0
lps = [0] * len(temp)
while i < len(temp):
if temp[i] == temp[l]:
lps[i] = l + 1
i += 1
l += 1
elif l != 0:
l = lps[l - 1]
else:
i += 1
return s[l:][::-1] + s
|
405865
|
from bibliopixel.animation.strip import Strip
from bibliopixel.colors import COLORS, make, palette
from bibliopixel.colors.arithmetic import color_scale, color_blend
from numpy import random, concatenate
"""
Emitter animation
By <NAME> 4/1/2019
A strip animation with particle system emitters.
Watch a video describing the use of this class here:
https://www.youtube.com/watch?v=_UtHC1IhKxg
Each strip can have multiple emitters with positions and velocities
controlled by the emitters list paramter. Positions and velocities
are float values. The rendering step figures out what is visible at
each integral strip positon for a given frame.
Particles move away from an emitter starting at full brightness of a
randomly selected color. Colors are selected from the palette field
unless a particle palette is specified in the emitters list. The
particle brightness then varies in a random manner. The random
variations are chosen from a list built at initialization. The default
settings should make the particles "sparkle" and fade. The mean, mu,
and sigma fields can be altered to change the effect. The distribution
of random brightness changes is shaped like two peaks around the bd_mean
point, centered at +/-bd_mu, with their widths controlled by bd_sigma.
Particle velocities are random and based on vel_mu and vel_sigma
:param pallete: Each new emitted particle starts at a color picked from
this list (can be overriden on a per-emittter basis)
:param emitters: List of tuples for each emitter. Each tuple:
* position - In strip's _start to _end. Default: _start
* direction - +1 emit toward end, -1 toward start, 0 both ways.
Default +1
* velocity - Emitter velocity in positions/step. Default 0
* range - particles don't travel beyond this dist. Default: _end
* emitter color - Use None/null if the emitter is invisible
* particle palette - Pallete/color for particles
:param bgcolor: The background color for the animation
:param wrap: Particles wrap across ends of strip
:param aperture: Render particles within this visible distance
:param starts_at_once: Maximum number of particles that can be emitted
at each step
:param starts_prob: Probability of a particle being emitted (0.0 never -
1.0 always)
:param flare_prob: Probability that a particle will re-ignite on the
strip segment
:param bd_mean: Average fade (negative) or boost (positive) per particle
per step. The default value is computed (-2 * 256 / _size) if the
field is not set.
:param bd_mu: Average distance from mean for the positive and negative
variations.
:param bd_sigma: Standard deviation from mu for variations on the
positive and negative sides. The default value is computed (mu *
0.25) if the field is not set.
:param vel_mu: Average velocity of particles. Positive is away from
emitter.
:param vel_sigma: Standard deviation from mu for particle velocity
"""
class Emitter(Strip):
COLOR_DEFAULTS = ('palette', 'gold'),
def make_bd(self):
"Make a set of 'shaped' random #'s for particle brightness deltas (bd)"
self.bd = concatenate((
# These values will dim the particles
random.normal(
self.bd_mean - self.bd_mu, self.bd_sigma, 16).astype(int),
# These values will brighten the particles
random.normal(
self.bd_mean + self.bd_mu, self.bd_sigma, 16).astype(int)),
axis=0)
def make_vel(self):
"Make a set of velocities to be randomly chosen for emitted particles"
self.vel = random.normal(self.vel_mu, self.vel_sigma, 16)
# Make sure nothing's slower than 1/8 pixel / step
for i, vel in enumerate(self.vel):
if abs(vel) < 0.125 / self._size:
if vel < 0:
self.vel[i] = -0.125 / self._size
else:
self.vel[i] = 0.125 / self._size
def __init__(
self, *args,
emitters=None,
bgcolor=COLORS.black,
wrap=True,
aperture=0.5,
starts_at_once=6,
starts_prob=0.33,
flare_prob=0.05,
bd_mean=None,
bd_mu=80,
bd_sigma=None,
vel_mu=1,
vel_sigma=0.25,
**kwds):
"Process all the arguments and set up for the run"
super().__init__(*args, **kwds)
self.half_size = float(self._size) / 2.0
if emitters is None:
emitters = [(None, None, None, None, None, None)]
self.emitters = []
self.has_e_colors = False
self.has_moving_emitters = False
for e_pos, e_dir, e_vel, e_range, e_color_str, e_pal_list in emitters:
if e_pos is None:
e_pos = self._start
if e_dir is None:
e_dir = 1
if e_range is None:
e_range = self._size
if e_vel is None:
e_vel = 0
if e_vel != 0:
self.has_moving_emitters = True
if e_pos >= (self._end + 1):
raise ValueError('Emitter Position %d >= end+1 (%d)' %
(e_pos, self._end + 1))
if e_pos < self._start:
raise ValueError('Emitter position %d < start (%d)' %
(e_pos, self._start))
if e_color_str is not None:
e_color = make.color(e_color_str)
self.has_e_colors = True
else:
e_color = None
if e_pal_list is None:
e_pal = self.palette # passed in or default
else:
e_pal = palette.Palette(make.colors(e_pal_list))
self.emitters.append(
(e_pos, e_dir, e_vel, e_range, e_color, e_pal))
self.bgcolor = bgcolor
self.wrap = wrap
if aperture < 0:
raise ValueError('Render aperture %g < 0' % (aperture))
self.aperture = aperture
self.starts_at_once = starts_at_once
self.starts_prob = starts_prob
self.flare_prob = flare_prob
self.step_flare_prob = self.flare_prob // self._size
if bd_mean is None:
self.bd_mean = -2 * 256 // self._size
else:
self.bd_mean = bd_mean
self.bd_mu = 80
if bd_sigma is None:
self.bd_sigma = self.bd_mu * 0.25
else:
self.bd_sigma = bd_sigma
self.vel_mu = vel_mu
self.vel_sigma = vel_sigma
# Random number lists
self.make_bd()
self.make_vel()
# List of tuples for flying/walking particles
# (velocity, position, steps to live, color, brightness)
self.particles = []
def move_particles(self):
"""
Move each particle by it's velocity, adjusting brightness as we go.
Particles that have moved beyond their range (steps to live), and
those that move off the ends and are not wrapped get sacked.
Particles can stay between _end and up to but not including _end+1
No particles can exitst before start without wrapping.
"""
moved_particles = []
for vel, pos, stl, color, bright in self.particles:
stl -= 1 # steps to live
if stl > 0:
pos = pos + vel
if vel > 0:
if pos >= (self._end + 1):
if self.wrap:
pos = pos - (self._end + 1) + self._start
else:
continue # Sacked
else:
if pos < self._start:
if self.wrap:
pos = pos + self._end + 1 + self._start
else:
continue # Sacked
if random.random() < self.step_flare_prob:
bright = 255
else:
bright = bright + random.choice(self.bd)
if bright > 255:
bright = 255
# Zombie particles with bright<=0 walk, don't -overflow
if bright < -10000:
bright = -10000
moved_particles.append((vel, pos, stl, color, bright))
self.particles = moved_particles
def move_emitters(self):
"""
Move each emitter by it's velocity. Emmitters that move off the ends
and are not wrapped get sacked.
"""
moved_emitters = []
for e_pos, e_dir, e_vel, e_range, e_color, e_pal in self.emitters:
e_pos = e_pos + e_vel
if e_vel > 0:
if e_pos >= (self._end + 1):
if self.wrap:
e_pos = e_pos - (self._end + 1) + self._start
else:
continue # Sacked
else:
if e_pos < self._start:
if self.wrap:
e_pos = e_pos + self._end + 1 + self._start
else:
continue # Sacked
moved_emitters.append(
(e_pos, e_dir, e_vel, e_range, e_color, e_pal))
self.emitters = moved_emitters
def start_new_particles(self):
"""
Start some new particles from the emitters. We roll the dice
starts_at_once times, seeing if we can start each particle based
on starts_prob. If we start, the particle gets a color form
the palette and a velocity from the vel list.
"""
for e_pos, e_dir, e_vel, e_range, e_color, e_pal in self.emitters:
for roll in range(self.starts_at_once):
if random.random() < self.starts_prob: # Start one?
p_vel = self.vel[random.choice(len(self.vel))]
if e_dir < 0 or e_dir == 0 and random.random() > 0.5:
p_vel = -p_vel
self.particles.append((
p_vel, # Velocity
e_pos, # Position
int(e_range // abs(p_vel)), # steps to live
e_pal[
random.choice(len(e_pal))], # Color
255)) # Brightness
def visibility(self, strip_pos, particle_pos):
"""
Compute particle visibility based on distance between current
strip position being rendered and particle position. A value
of 0.0 is returned if they are >= one aperture away, values
between 0.0 and 1.0 are returned if they are less than one
aperature apart.
"""
dist = abs(particle_pos - strip_pos)
if dist > self.half_size:
dist = self._size - dist
if dist < self.aperture:
return (self.aperture - dist) / self.aperture
else:
return 0
def render_particles(self):
"""
Render visible particles at each strip position, by modifying
the strip's color list.
"""
for strip_pos in range(self._start, self._end + 1):
blended = COLORS.black
# Render visible emitters
if self.has_e_colors:
for (e_pos, e_dir, e_vel, e_range,
e_color, e_pal) in self.emitters:
if e_color is not None:
vis = self.visibility(strip_pos, e_pos)
if vis > 0:
blended = color_blend(
blended,
color_scale(e_color, int(vis * 255)))
# Render visible particles
for vel, pos, stl, color, bright in self.particles:
vis = self.visibility(strip_pos, pos)
if vis > 0 and bright > 0:
blended = color_blend(
blended,
color_scale(color, int(vis * bright)))
# Add background if showing
if (blended == COLORS.black):
blended = self.bgcolor
self.color_list[strip_pos] = blended
def step(self, amt=1):
"Make a frame of the animation"
self.move_particles()
if self.has_moving_emitters:
self.move_emitters()
self.start_new_particles()
self.render_particles()
if self.emitters == [] and self.particles == []:
self.completed = True
|
405878
|
from __future__ import unicode_literals
from six import text_type
from database_sanitizer.session import hash_text_to_int, hash_text_to_ints
def sanitize_email(value):
if not value:
return value
(num1, num2, num3) = hash_text_to_ints(value.strip(), [16, 16, 32])
given_name = given_names[num1 % given_names_count]
surname = surnames[num2 % surnames_count]
case_convert = (text_type.lower if num3 % 8 > 0 else lambda x: x)
return '{first}.{last}@x{num:x}.<EMAIL>'.format(
first=case_convert(given_name),
last=case_convert(surname).replace("'", ''),
num=num3)
def sanitize_username(value):
if not value:
return value
(num1, num2) = hash_text_to_ints(value, [16, 32])
return '{}{:x}'.format(given_names[num1 % given_names_count].lower(), num2)
def sanitize_full_name_en_gb(value):
if not value:
return value
(num1, num2) = hash_text_to_ints(value.strip().lower(), [16, 16])
return '{} {}'.format(
given_names[num1 % given_names_count], surnames[num2 % surnames_count])
def sanitize_given_name_en_gb(value):
if not value:
return value
num = hash_text_to_int(value.strip().lower())
return given_names[num % given_names_count]
def sanitize_surname_en_gb(value):
if not value:
return value
num = hash_text_to_int(value.strip().lower())
return surnames[num % surnames_count]
given_names = """
<NAME>
<NAME>
<NAME>
<NAME>
<NAME>
<NAME>
Charlotte Chelsea Cheryl Chloe Christian Christine Ch<NAME>
<NAME> <NAME>
<NAME>
<NAME>
<NAME>
<NAME>
<NAME>
<NAME>
<NAME> Heather <NAME>
<NAME>
<NAME> J<NAME>
<NAME> Joan Joanna Joanne Jodie Joe Joel John
<NAME> Julia Julian
<NAME>
K<NAME>
<NAME> Lee Leigh Leon Leonard Lesley
<NAME>
<NAME> <NAME>
<NAME> <NAME>
<NAME>
<NAME>
<NAME> <NAME>
<NAME>
<NAME>
<NAME>
<NAME>
<NAME>
<NAME>
Trace<NAME>
<NAME>
""".strip().split()
surnames = """
<NAME>
<NAME>
Baldwin Ball Banks Barber Barker Barlow Barnes Barnett Bar<NAME>
Bartlett B<NAME> B<NAME>
Bevan Bibi Birch Bird Bishop Black Blackburn Bolton Bond Booth Bowen
Boyle Bradley Bradshaw Brady Bray Brennan Briggs Brookes Brooks Brown
Browne Bruce Bryan Bryant Bull Burgess Burke Burns Burrows Burton
Butcher Butler Byrne C<NAME> Carey Carpenter C<NAME>
Car<NAME> Chadwick Chambers Chan Chandler Chapman Char<NAME>
Clarke Clayton Clements Coates C<NAME> Coles Col<NAME>
Connolly Conn<NAME> Cook Cooke Cooper Cox C<NAME> Cross
C<NAME> D<NAME> D<NAME>
D<NAME> Dean D<NAME> D<NAME> Donnelly
D<NAME> Farmer
<NAME> Field Finch Fisher Fitzgerald Fleming Fletcher
<NAME> F<NAME> Fox Franc<NAME> Freeman
French Frost F<NAME> Gardiner G<NAME>
G<NAME> Glover G<NAME>
Gough Gould Gra<NAME> Green Greenwood Gregory Griffin Griffiths
Hale Hall Hamilton Hammond Hancock Hanson Harding Hardy Hargreaves
Har<NAME> Harrison Hart H<NAME> Hayes Haynes Hayward
Heath Henderson Hen<NAME> Hicks Higgins Hill Hilton Hodgson
Holden Holland Holloway Holmes Holt Hooper Hope Hopkins Horton Houghton
Howard Howarth Howe Howell Howells Hudson Hughes Humphreys Humphries
Hunt Hunter Hurst Hussain Hutchinson Hyde Ingram Iqbal Jack<NAME>
Jar<NAME> Jones Jordan Joyce Kaur
Kay <NAME> Kennedy Kent Kerr Khan King Kirby Kirk Knight Knowles
<NAME> Law Law<NAME> Leach Lee Lees Le<NAME>
Little Lloyd Long Lord Low<NAME>
<NAME>
Mc<NAME> Mellor Metcalfe M<NAME> M<NAME>
M<NAME> Morton
Mo<NAME>olls
Nicholson Nixon Noble N<NAME> North Norton O'Blake O'Buckley
O'Chamberlain O'Hobbs O'Thompson O<NAME> Owen Owens Page Palmer
Parker Parkes Park<NAME> P<NAME>
Peacock Pearce P<NAME> P<NAME> Pickering Pollard
Poole Pope Porter Potter Potts Powell Power Pratt Preston Price
Prit<NAME> Read Reed Rees Reeves Reid Reynolds
Rh<NAME>
Rogers Rose Ross Row<NAME>
Savage Schofield Scott Shah Sharp Sharpe Shaw Shepherd Sheppard Short
Simmons Simpson Sims Sinclair Singh Skinner Slater Smart Smith Spencer
Stanley Steele Stephens Stephenson Stevens Stevenson Stewart Stokes
Stone Storey Sullivan Summers Sutton Swift Sykes Talbot T<NAME>
<NAME> T<NAME> Turnbull Turner
<NAME> Wade Walker Wall Wallace Wallis Walsh Walters
Walton Ward Warner Warren Waters Watkins Watson Watts Webb Webster Welch
Wells West Weston Wheeler White Whitehead Whitehouse Whittaker Wilkins
<NAME> Wong Wood Woods
Woodward Wright <NAME>
""".strip().split()
given_names_count = len(given_names)
surnames_count = len(surnames)
|
405886
|
import unittest
import torch
import numpy as np
import os
import shutil
import neural_network_lyapunov.dynamics_learning as dynamics_learning
import neural_network_lyapunov.worlds as worlds
import neural_network_lyapunov.encoders as encoders
import neural_network_lyapunov.relu_system as relu_system
import neural_network_lyapunov.lyapunov as lyapunov
import neural_network_lyapunov.pybullet_data_generation as\
pybullet_data_generation
import neural_network_lyapunov.utils as utils
z_dim = 2
dtype = torch.float64
opt_default = dict(
dtype=dtype,
# data
world_cb=worlds.get_load_urdf_callback(worlds.urdf_path("pendulum.urdf")),
joint_space=True,
camera_eye_position=[0, -3, 0],
camera_target_position=[0, 0, 0],
camera_up_vector=[0, 0, 1],
grayscale=True,
image_width=64,
image_height=64,
x_dim=2,
x_equilibrium=torch.tensor([np.pi, 0], dtype=dtype),
x_lo_stable=torch.tensor([np.pi / 4, -.5], dtype=dtype),
x_up_stable=torch.tensor([np.pi + 3 * np.pi / 4, .5], dtype=dtype),
dataset_x_lo=torch.tensor([0., -5.], dtype=dtype),
dataset_x_up=torch.tensor([2. * np.pi, 5.], dtype=dtype),
dataset_noise=torch.tensor([.1, .1]),
dataset_dt=.1,
dataset_N=1,
dataset_num_rollouts=25,
dataset_num_val_rollouts=10,
batch_size=50,
long_horizon_N=10,
long_horizon_num_val_rollouts=10,
V_lambda=0.,
V_eps_pos=.01,
V_eps_der_lo=0.1,
V_eps_der_up=0.001,
R=None,
# dynamics nn
dyn_nn_width=(z_dim, z_dim * 5, z_dim * 3, z_dim),
# lyapunov nn
lyap_nn_width=(z_dim, z_dim * 5, z_dim * 3, 1),
# encoder (image-space learning)
encoder_class=encoders.CNNEncoder2,
decoder_class=encoders.CNNDecoder2,
use_bce=True,
use_variational=False,
kl_weight_lo=1.,
kl_weight_up=1.,
kl_weight_center_step=0,
kl_weight_steps_lo_to_up=1,
decoded_equilibrium_loss_weight=1e-3,
z_dim=z_dim,
z_equilibrium=torch.zeros(z_dim, dtype=dtype),
z_lo_stable=-1. * torch.ones(z_dim, dtype=dtype),
z_up_stable=torch.ones(z_dim, dtype=dtype),
)
opt_variants = dict(
unstable=dict(
lyap_loss_optimal=True,
lyap_loss_warmstart=False,
lyap_loss_freq=0,
lyap_pos_loss_at_samples_weight=0.,
lyap_der_lo_loss_at_samples_weight=0.,
lyap_der_up_loss_at_samples_weight=0.,
lyap_pos_loss_weight=0.,
lyap_der_lo_loss_weight=0.,
lyap_der_up_loss_weight=0.,
),
stable=dict(
lyap_loss_optimal=True,
lyap_loss_warmstart=False,
lyap_loss_freq=2,
lyap_pos_loss_at_samples_weight=1.,
lyap_der_lo_loss_at_samples_weight=1.,
lyap_der_up_loss_at_samples_weight=1.,
lyap_pos_loss_weight=1.,
lyap_der_lo_loss_weight=1.,
lyap_der_up_loss_weight=1.,
),
)
class TestDynamicsLearning(unittest.TestCase):
def setUp(self):
torch.manual_seed(123)
opt = dynamics_learning.DynamicsLearningOptions(opt_default)
opt.set_options(opt_variants["stable"])
self.opt = opt
self.x_data = torch.rand((2 * opt.batch_size, opt.x_dim),
dtype=opt.dtype)
self.x_next_data = torch.rand((2 * opt.batch_size, opt.x_dim),
dtype=opt.dtype)
if opt.grayscale:
num_channels = 1
else:
num_channels = 3
self.X_data = torch.rand((2 * opt.batch_size, 2 * num_channels,
opt.image_width, opt.image_height),
dtype=opt.dtype)
self.X_next_data = torch.rand((2 * opt.batch_size, num_channels,
opt.image_width, opt.image_height),
dtype=opt.dtype)
self.x_train_dataloader = pybullet_data_generation.get_dataloader(
self.x_data, self.x_next_data, opt.batch_size)
self.x_validation_dataloader = pybullet_data_generation.get_dataloader(
self.x_data, self.x_next_data, opt.batch_size)
self.X_train_dataloader = pybullet_data_generation.get_dataloader(
self.X_data, self.X_next_data, opt.batch_size)
self.X_validation_dataloader = pybullet_data_generation.get_dataloader(
self.X_data, self.X_next_data, opt.batch_size)
self.X_equilibrium = torch.rand(
(2 * num_channels, opt.image_width, opt.image_height),
dtype=opt.dtype)
self.dyn_nn_model = utils.setup_relu(opt.dyn_nn_width,
negative_slope=0.,
dtype=opt.dtype)
self.lyap_nn_model = utils.setup_relu(opt.lyap_nn_width,
negative_slope=0.,
dtype=opt.dtype)
self.relu_sys = relu_system.AutonomousReLUSystemGivenEquilibrium(
opt.dtype, opt.dataset_x_lo, opt.dataset_x_up, self.dyn_nn_model,
opt.x_equilibrium)
self.lyap = lyapunov.LyapunovDiscreteTimeHybridSystem(
self.relu_sys, self.lyap_nn_model)
self.ss_dyn_learner = dynamics_learning.StateSpaceDynamicsLearning(
self.x_train_dataloader, self.x_validation_dataloader, self.lyap,
opt)
self.encoder = opt.encoder_class(opt.z_dim, opt.image_width,
opt.image_height, opt.grayscale)
self.decoder = opt.decoder_class(opt.z_dim, opt.image_width,
opt.image_height, opt.grayscale)
self.latent_dyn_learner = dynamics_learning.\
LatentSpaceDynamicsLearning(
self.X_train_dataloader, self.X_validation_dataloader,
self.lyap, opt,
self.encoder, self.decoder,
decoded_equilibrium=self.X_equilibrium)
def test_lyapunov_loss(self):
for dyn_learner, data in [(self.ss_dyn_learner, self.x_data),
(self.latent_dyn_learner, self.X_data)]:
self.opt.set_option("lyap_loss_optimal", True)
lyap_pos_loss, lyap_der_lo_loss, lyap_der_up_loss =\
dyn_learner.lyapunov_loss()
self.opt.set_option("lyap_loss_optimal", False)
lyap_pos_loss_sub, lyap_der_lo_loss_sub, lyap_der_up_loss_sub =\
dyn_learner.lyapunov_loss()
lyap_pos_loss_samp, lyap_der_lo_loss_samp, lyap_der_up_loss_samp =\
dyn_learner.lyapunov_loss_at_samples(data)
self.assertLessEqual(lyap_pos_loss_samp.item(),
lyap_pos_loss.item())
self.assertLessEqual(lyap_pos_loss_sub.item(),
lyap_pos_loss.item())
self.assertGreaterEqual(lyap_pos_loss.item(), 0.)
self.assertGreaterEqual(lyap_pos_loss_sub.item(), 0.)
self.assertGreaterEqual(lyap_pos_loss_samp.item(), 0.)
self.assertGreaterEqual(lyap_der_lo_loss.item(), 0.)
self.assertGreaterEqual(lyap_der_lo_loss_sub.item(), 0.)
self.assertGreaterEqual(lyap_der_lo_loss_samp.item(), 0.)
self.assertGreaterEqual(lyap_der_up_loss.item(), 0.)
self.assertGreaterEqual(lyap_der_up_loss_sub.item(), 0.)
self.assertGreaterEqual(lyap_der_up_loss_samp.item(), 0.)
def test_lyapunov_sample_boundaries(self):
lyap_pos_loss_samp, lyap_der_lo_loss_samp, lyap_der_up_loss_samp =\
self.ss_dyn_learner.lyapunov_loss_at_samples(
self.opt.x_up_stable.unsqueeze(0) + 10.)
self.assertEqual(lyap_pos_loss_samp, 0.)
self.assertEqual(lyap_der_lo_loss_samp, 0.)
self.assertEqual(lyap_der_up_loss_samp, 0.)
lyap_pos_loss_samp, lyap_der_lo_loss_samp, lyap_der_up_loss_samp =\
self.ss_dyn_learner.lyapunov_loss_at_samples(
self.opt.x_lo_stable.unsqueeze(0) - 10.)
self.assertEqual(lyap_pos_loss_samp, 0.)
self.assertEqual(lyap_der_lo_loss_samp, 0.)
self.assertEqual(lyap_der_up_loss_samp, 0.)
self.opt.set_option("use_vae", False)
z = self.latent_dyn_learner.encoder.forward(self.X_data[0:1, :] +
10000.)[0]
self.assertTrue(torch.all(z > self.opt.z_up_stable))
lyap_pos_loss_samp, lyap_der_lo_loss_samp, lyap_der_up_loss_samp =\
self.latent_dyn_learner.lyapunov_loss_at_samples(
self.X_data[0:1, :] + 10000.)
self.assertEqual(lyap_pos_loss_samp, 0.)
self.assertEqual(lyap_der_lo_loss_samp, 0.)
self.assertEqual(lyap_der_up_loss_samp, 0.)
def test_adversarial_samples(self):
for dyn_learner in [self.ss_dyn_learner, self.latent_dyn_learner]:
for optimality in [True, False]:
self.opt.set_option("lyap_loss_optimal", optimality)
z_adv_pos, z_adv_der_lo, z_adv_der_up = dyn_learner.\
adversarial_samples()
self.assertGreaterEqual(z_adv_pos.shape[0], 1)
self.assertGreaterEqual(z_adv_der_lo.shape[0], 1)
self.assertGreaterEqual(z_adv_der_up.shape[0], 1)
for k in range(z_adv_pos.shape[0]):
V = self.lyap.lyapunov_value(
z_adv_pos[k, :],
self.lyap.system.x_equilibrium,
self.opt.V_lambda,
R=self.opt.R)
self.assertLessEqual(
V.item() - self.opt.V_eps_pos * torch.norm(
z_adv_pos[k, :] - self.lyap.system.x_equilibrium,
p=1), 0.)
for k in range(z_adv_der_lo.shape[0]):
dV = self.lyap.lyapunov_derivative(
z_adv_der_lo[k, :],
self.lyap.system.x_equilibrium,
self.opt.V_lambda,
self.opt.V_eps_der_lo,
R=self.opt.R)
[self.assertGreaterEqual(dv.item(), 0.) for dv in dV]
for k in range(z_adv_der_up.shape[0]):
dV = self.lyap.lyapunov_derivative(
z_adv_der_up[k, :],
self.lyap.system.x_equilibrium,
self.opt.V_lambda,
self.opt.V_eps_der_up,
R=self.opt.R)
[self.assertLessEqual(dv.item(), 0.) for dv in dV]
def test_dynamics_loss(self):
loss = self.ss_dyn_learner.dynamics_loss(self.x_data, self.x_next_data)
self.assertGreaterEqual(loss, 0.)
loss = self.ss_dyn_learner.dynamics_loss(
self.x_data, self.lyap.system.step_forward(self.x_data))
self.assertEqual(loss, 0.)
for variational in [True, False]:
self.opt.set_option("use_variational", variational)
loss = self.latent_dyn_learner.dynamics_loss(
self.X_data, self.X_next_data)
self.assertGreaterEqual(loss, 0.)
def test_kl_div(self):
z_mu = torch.zeros((5, self.opt.z_dim), dtype=self.opt.dtype)
z_log_var = torch.log(
torch.ones((5, self.opt.z_dim), dtype=self.opt.dtype))
kl = self.latent_dyn_learner.kl_loss(z_mu, z_log_var)
self.assertEqual(kl.item(), 0.)
z_mu += (torch.rand(z_mu.shape, dtype=self.opt.dtype) - .5) * 2
kl1 = self.latent_dyn_learner.kl_loss(z_mu, z_log_var)
self.assertGreater(kl1, kl)
z_log_var -= (torch.rand(z_log_var.shape, dtype=self.opt.dtype) -
.5) * 2
kl2 = self.latent_dyn_learner.kl_loss(z_mu, z_log_var)
self.assertGreater(kl2, kl1)
def test_save_load(self):
self.ss_dyn_learner.save(".")
dl = dynamics_learning.StateSpaceDynamicsLearning.load(
".", self.x_train_dataloader, self.x_validation_dataloader,
self.opt)
x1 = self.ss_dyn_learner.lyap.lyapunov_relu(self.x_data)
x2 = dl.lyap.lyapunov_relu(self.x_data)
self.assertTrue(torch.all(x1 == x2))
os.remove("./dyn_learner.pkl")
self.opt.set_option("use_variational", False)
self.latent_dyn_learner.save(".")
dl = dynamics_learning.LatentSpaceDynamicsLearning.load(
".", self.x_train_dataloader, self.x_validation_dataloader,
self.opt)
x1, _, _, _, _ = self.latent_dyn_learner.vae_forward(self.X_data)
x2, _, _, _, _ = dl.vae_forward(self.X_data)
self.assertTrue(torch.all(x1 == x2))
os.remove("./dyn_learner.pkl")
def test_rollout(self):
x0 = self.x_data[0, :]
roll, V_roll = self.ss_dyn_learner.rollout(x0, 10)
self.assertEqual(roll.shape, (10 + 1, self.opt.x_dim))
self.assertEqual(V_roll.shape, (10 + 1, ))
self.assertTrue(torch.all(x0 == roll[0, :]))
for decode in [True, False]:
X0 = self.X_data[0, :]
roll, V_roll, z_roll = self.latent_dyn_learner.rollout(
X0, 10, decode_intermediate=decode)
num_channels = int(X0.shape[0] / 2)
self.assertEqual(roll.shape,
(10 + 2, num_channels, self.opt.image_width,
self.opt.image_height))
self.assertEqual(V_roll.shape, (10 + 1, ))
self.assertEqual(z_roll.shape, (10 + 1, self.opt.z_dim))
self.assertTrue(torch.all(X0[:num_channels, :] == roll[0, :]))
self.assertTrue(torch.all(X0[num_channels:, :] == roll[1, :]))
def test_rollout_loss(self):
x0 = self.x_data[0, :]
roll, V_roll = self.ss_dyn_learner.rollout(x0, 10)
loss = self.ss_dyn_learner.rollout_loss(roll)
self.assertEqual(loss.shape, (11, ))
self.assertEqual(loss[0].item(), 0.)
self.assertTrue(torch.all(loss >= 0))
for decode in [True, False]:
self.opt.set_option("use_bce", False)
X0 = self.X_data[0, :]
roll, _, _ = self.latent_dyn_learner.rollout(X0, 10, decode)
loss = self.latent_dyn_learner.rollout_loss(roll, decode)
self.assertEqual(loss.shape, (12, ))
self.assertEqual(loss[0].item(), 0.)
self.assertEqual(loss[1].item(), 0.)
self.assertTrue(torch.all(loss >= 0))
self.opt.set_option("use_bce", True)
X0 = self.X_data[0, :]
roll, _, _ = self.latent_dyn_learner.rollout(X0, 10, decode)
loss = self.latent_dyn_learner.rollout_loss(roll, decode)
self.assertEqual(loss.shape, (12, ))
self.assertTrue(torch.all(loss > 0))
self.assertTrue(torch.all(loss[0] < loss[2:]))
self.assertTrue(torch.all(loss[1] < loss[2:]))
def test_rollout_validation_loss(self):
rollouts = [
torch.rand((10, self.opt.x_dim), dtype=self.opt.dtype)
for i in range(4)
]
loss = self.ss_dyn_learner.rollout_validation(rollouts)
self.assertEqual(loss.shape, (10, ))
rollouts = [
torch.rand((10, self.X_next_data.shape[1], self.opt.image_width,
self.opt.image_height),
dtype=self.opt.dtype) for i in range(4)
]
loss = self.latent_dyn_learner.rollout_validation(rollouts)
self.assertEqual(loss.shape, (10, ))
if torch.cuda.is_available():
loss_cuda = self.latent_dyn_learner.rollout_validation(
rollouts, device='cuda')
self.assertEqual(loss_cuda.shape, (10, ))
self.assertTrue(torch.all(torch.abs(loss - loss_cuda) < 1e-6))
def test_train(self):
if torch.cuda.is_available():
devices = ['cpu', 'cuda']
else:
devices = ['cpu']
x_test = torch.ones(self.opt.x_dim, dtype=self.opt.dtype)
z_test = torch.ones(self.opt.z_dim, dtype=self.opt.dtype)
for device in devices:
for var_name in opt_variants.keys():
self.opt.set_options(opt_variants[var_name])
x_pre = self.ss_dyn_learner.lyap.system.step_forward(x_test)
v_pre = self.ss_dyn_learner.lyap.lyapunov_relu.forward(x_test)
self.ss_dyn_learner.reset_optimizer()
self.ss_dyn_learner.train(2, validate=True, device=device)
x_post = self.ss_dyn_learner.lyap.system.step_forward(x_test)
v_post = self.ss_dyn_learner.lyap.lyapunov_relu.forward(x_test)
self.assertFalse(torch.all(x_pre == x_post))
if var_name == "unstable":
self.assertTrue(torch.all(v_pre == v_post))
elif var_name == "stable":
self.assertFalse(torch.all(v_pre == v_post))
self.ss_dyn_learner.reset_optimizer(lyapunov_only=True)
self.ss_dyn_learner.train(2, validate=True, device=device)
# testing lyapunov training only
x_post2 = self.ss_dyn_learner.lyap.system.step_forward(x_test)
v_post2 = self.ss_dyn_learner.lyap.lyapunov_relu.forward(
x_test)
self.assertTrue(torch.all(x_post == x_post2))
if var_name == "unstable":
self.assertTrue(torch.all(v_post == v_post2))
elif var_name == "stable":
self.assertFalse(torch.all(v_post == v_post2))
z_pre = self.latent_dyn_learner.lyap.system.step_forward(
z_test)
v_pre = self.latent_dyn_learner.lyap.lyapunov_relu.forward(
z_test)
self.latent_dyn_learner.reset_optimizer()
self.latent_dyn_learner.train(2, validate=True, device=device)
z_post = self.latent_dyn_learner.lyap.system.step_forward(
z_test)
v_post = self.latent_dyn_learner.lyap.lyapunov_relu.forward(
z_test)
self.assertFalse(torch.all(z_pre == z_post))
if var_name == "unstable":
self.assertTrue(torch.all(v_pre == v_post))
elif var_name == "stable":
self.assertFalse(torch.all(v_pre == v_post))
self.latent_dyn_learner.reset_optimizer(lyapunov_only=True)
self.latent_dyn_learner.train(2, validate=True, device=device)
z_post2 = self.latent_dyn_learner.lyap.system.step_forward(
z_test)
v_post2 = self.latent_dyn_learner.lyap.lyapunov_relu.forward(
z_test)
self.assertTrue(torch.all(z_post == z_post2))
if var_name == "unstable":
self.assertTrue(torch.all(v_post == v_post2))
elif var_name == "stable":
self.assertFalse(torch.all(v_post == v_post2))
shutil.rmtree("runs")
def test_early_term(self):
for dyn_learner, data in [(self.ss_dyn_learner, self.x_data),
(self.latent_dyn_learner, self.X_data)]:
self.opt.set_option("lyap_loss_optimal", True)
lyap_pos_loss1, lyap_der_lo_loss1, lyap_der_up_loss1 =\
dyn_learner.lyapunov_loss()
self.opt.set_option("lyap_loss_optimal", False)
lyap_pos_loss2, lyap_der_lo_loss2, lyap_der_up_loss2 =\
dyn_learner.lyapunov_loss(
lyap_pos_threshold=lyap_pos_loss1,
lyap_der_lo_threshold=lyap_der_lo_loss1,
lyap_der_up_threshold=lyap_der_up_loss1)
lyap_pos_loss3, lyap_der_lo_loss3, lyap_der_up_loss3 =\
dyn_learner.lyapunov_loss()
self.assertEqual(lyap_pos_loss1, lyap_pos_loss2)
self.assertEqual(lyap_der_lo_loss1, lyap_der_lo_loss2)
self.assertEqual(lyap_der_up_loss1, lyap_der_up_loss2)
self.assertLessEqual(lyap_pos_loss3, lyap_pos_loss1)
self.assertLessEqual(lyap_der_lo_loss3, lyap_der_lo_loss1)
self.assertLessEqual(lyap_der_up_loss3, lyap_der_up_loss1)
if __name__ == "__main__":
unittest.main()
|
405903
|
import FWCore.ParameterSet.Config as cms
from DQMServices.Core.DQMEDHarvester import DQMEDHarvester
rpcDCSSummary = DQMEDHarvester("RPCDCSSummary",
NumberOfEndcapDisks = cms.untracked.int32(4),
)
|
405929
|
from tomviz import utils
import numpy as np
import tomviz.operators
class CenterOfMassAlignmentOperator(tomviz.operators.CancelableOperator):
def transform_scalars(self, dataset):
"""Automatically align tilt images by center of mass method"""
self.progress.maximum = 1
tiltSeries = utils.get_array(dataset).astype(float)
self.progress.maximum = tiltSeries.shape[2]
step = 1
offsets = np.zeros((tiltSeries.shape[2], 2))
for i in range(tiltSeries.shape[2]):
if self.canceled:
return
self.progress.message = 'Processing tilt image No.%d/%d' % (
i + 1, tiltSeries.shape[2])
offsets[i, :], tiltSeries[:, :, i] = centerOfMassAlign(
tiltSeries[:, :, i]
)
step += 1
self.progress.value = step
utils.set_array(dataset, tiltSeries)
# Create a spreadsheet data set from table data
column_names = ["X Offset", "Y Offset"]
offsetsTable = utils.make_spreadsheet(column_names, offsets)
# Set up dictionary to return operator results
returnValues = {}
returnValues["alignments"] = offsetsTable
return returnValues
def centerOfMassAlign(image):
"""Shift image so that the center of mass of is at origin"""
(Nx, Ny) = image.shape
# set up coordinate
y = np.linspace(0, Ny - 1, Ny)
x = np.linspace(0, Nx - 1, Nx)
[X, Y] = np.meshgrid(x, y, indexing="ij")
imageCOM_x = int(np.sum(image * X) / np.sum(image))
imageCOM_y = int(np.sum(image * Y) / np.sum(image))
sx = -(imageCOM_x - Nx // 2)
sy = -(imageCOM_y - Ny // 2)
output = np.roll(image, sx, axis=0)
output = np.roll(output, sy, axis=1)
return (sx, sy), output
|
405939
|
from aws_cdk import core
from aws_cdk.aws_ec2 import Vpc, NatProvider, SubnetConfiguration, SubnetType
class NetworkStack(core.Stack):
def __init__(self, scope: core.Construct, id: str, props, **kwargs) -> None:
super().__init__(scope, id, **kwargs)
# Subnet configurations for a public and private tier
subnet1 = SubnetConfiguration(
name="Public",
subnet_type=SubnetType.PUBLIC,
cidr_mask=24)
subnet2 = SubnetConfiguration(
name="Private",
subnet_type=SubnetType.PRIVATE,
cidr_mask=24)
vpc = Vpc(self,
"TheVPC",
cidr="10.0.0.0/16",
enable_dns_hostnames=True,
enable_dns_support=True,
max_azs=2,
nat_gateway_provider=NatProvider.gateway(),
nat_gateways=1,
subnet_configuration=[subnet1, subnet2]
)
# This will export the VPC's ID in CloudFormation under the key
# 'vpcid'
core.CfnOutput(self, "vpcid", value=vpc.vpc_id)
# Prepares output attributes to be passed into other stacks
# In this case, it is our VPC and subnets.
self.output_props = props.copy()
self.output_props['vpc'] = vpc
self.output_props['subnets'] = vpc.public_subnets
@property
def outputs(self):
return self.output_props
|
405968
|
from __future__ import print_function
import pandas as pd
df = pd.read_csv("validation-metadata.csv").fillna(value='')
df.loc[df.column_name == 'id', 'column_name'] = 'name'
df.loc[df.foreign_column == 'id', 'foreign_column'] = 'name'
# Remove "_id" from column names
for idx, row in df.iterrows():
col_name = row.column_name
if col_name.endswith('_id'):
df.loc[idx, 'column_name'] = col_name[:-3]
col_name = row.foreign_column
if col_name.endswith('_id'):
df.loc[idx, 'foreign_column'] = col_name[:-3]
# Create and populate "not_null" and "cascade_delete" columns
df['not_null'] = 'FALSE'
df.loc[df.nullable == 'NO', 'not_null'] = 'TRUE'
df['cascade_delete'] = 'FALSE'
df.loc[df.on_del == 'CASCADE', 'cascade_delete'] = 'TRUE'
# Eliminate duplicate rows for which there are table-independent column specification
manualDF = pd.read_csv("manual-validation-info.csv").fillna(value='')
genericCols = manualDF.loc[manualDF.table_name == ''].column_name.unique()
# Drop table-specific entries for columns with table-independent entries only
# if all the "nullable" values are the same, and set the "not_null" value in
# the manualDF to the value found in the postgres metadata.
dropable = []
for col in genericCols:
values = df.query("column_name == @col").not_null.unique()
if len(values) == 1:
dropable.append(col)
manualDF.loc[manualDF.column_name == col, 'not_null'] = values[0]
df = df.query("column_name not in @dropable").copy()
# Add other columns as empty placeholders
for col in ['linked_column', 'dtype', 'folder', 'additional_valid_inputs']:
df[col] = ''
df.rename(columns={'foreign_table': 'ref_tbl', 'foreign_column' : 'ref_col'}, inplace=True)
# Re-order columns and drop unneeded ones
cols = ['table_name', 'column_name', 'not_null', 'linked_column', 'dtype', 'folder', 'ref_tbl',
'ref_col', 'cascade_delete', 'additional_valid_inputs']
df = df[cols]
# Add as many "Unnamed: X" columns as occur in the manualDF
for col in manualDF.columns:
if col.startswith('Unnamed:'):
df[col] = ''
df = manualDF.append(df)
prefix = 'Unnamed: '
count = len(prefix)
df.rename(columns=lambda col: '_c_' + col[count:] if col.startswith(prefix) else col, inplace=True)
df.to_csv('merged-validation-data.csv', index=None)
|
405971
|
from typing import Tuple
import itertools
import re
import numpy as np
import GPy
from emukit.model_wrappers.gpy_model_wrappers import GPyModelWrapper
from ..parameters.cfg_parameter import CFGParameter
from ..parameters.string_parameter import StringParameter
from ..parameters.protein_base_parameter import ProteinBaseParameter
from ..parameters.protein_codon_parameter import ProteinCodonParameter
class linear_model(GPyModelWrapper):
"""
This is a thin wrapper around GPy models that one-hot encodes each character and applies linear kernel
"""
def __init__(self, space , X_init, Y_init, n_restarts: int = 1,observation_noise: bool = False):
"""
:param space: string space
:param X_init: starting strings
:param Y_init: evals of starting strings
:param n_restarts: Number of restarts during hyper-parameter optimization
:param observation_noise: have noise term in likelihood
"""
# check space is either StringParameter, ProteinParameter (CFG params not okay cause variable length)
if not (isinstance(space.parameters[0],StringParameter) or isinstance(space.parameters[0],ProteinCodonParameter) or isinstance(space.parameters[0],ProteinBaseParameter)):
raise ValueError("Not a valid string space (needs to be fixed length for linear model)")
# generate encoding for characters
encoding = {}
for i in range(len(space.parameters[0].alphabet)):
encoding[space.parameters[0].alphabet[i]]=[0]*len(space.parameters[0].alphabet)
encoding[space.parameters[0].alphabet[i]][i]=1
self.encoding = encoding
# transform initialization
X_feature_init = map_to_feature_space(X_init,self.encoding)
# first fit of model
kernel = GPy.kern.Linear(X_feature_init.shape[1], ARD=False)
gpy_model= GPy.models.GPRegression(X_feature_init, Y_init,kernel,normalizer=True)
if not observation_noise:
# no observation noise here but keep a little to help
# with matrix inversions (i.e jitter)
gpy_model.Gaussian_noise.variance.constrain_fixed(1e-6)
gpy_model.optimize_restarts(n_restarts)
self.model = gpy_model
# store inputs as strings seperately
self.X_strings = X_init
self.n_restarts = n_restarts
def predict(self, X: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""
:param X: (n_points x n_dimensions) array containing locations at which to get predictions
:return: (mean, variance) Arrays of size n_points x 1 of the predictive distribution at each input location
"""
X = map_to_feature_space(X,self.encoding)
return self.model.predict(X)
def predict_with_full_covariance(self, X: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""
:param X: (n_points x n_dimensions) array containing locations at which to get predictions
:return: (mean, variance) Arrays of size n_points x 1 and n_points x n_points of the predictive
mean and variance at each input location
"""
X = map_to_feature_space(X,self.encoding)
return self.model.predict(X, full_cov=True)
def get_prediction_gradients(self, X: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""
:param X: (n_points x n_dimensions) array containing locations at which to get gradient of the predictions
:return: (mean gradient, variance gradient) n_points x n_dimensions arrays of the gradients of the predictive
distribution at each input location
"""
X = map_to_feature_space(X,self.encoding)
d_mean_dx, d_variance_dx = self.model.predictive_gradients(X)
return d_mean_dx[:, :, 0], d_variance_dx
def get_joint_prediction_gradients(self, X: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""
Computes and returns model gradients of mean and full covariance matrix at given points
:param X: points to compute gradients at, nd array of shape (q, d)
:return: Tuple with first item being gradient of the mean of shape (q) at X with respect to X (return shape is (q, q, d)).
The second item is the gradient of the full covariance matrix of shape (q, q) at X with respect to X
(return shape is (q, q, q, d)).
"""
X = map_to_feature_space(X,self.encoding)
dmean_dx = dmean(X, self.model.X, self.model.kern, self.model.posterior.woodbury_vector[:, 0])
dvariance_dx = dSigma(X, self.model.X, self.model.kern, self.model.posterior.woodbury_inv)
return dmean_dx, dvariance_dx
def set_data(self, X: np.ndarray, Y: np.ndarray) -> None:
"""
Sets training data in model
:param X: New training features
:param Y: New training outputs
"""
# keep track of strings
self.X_strings = X
X = map_to_feature_space(X,self.encoding)
self.model.set_XY(X, Y)
def optimize(self):
"""
Optimizes model hyper-parameters
"""
self.model.optimize_restarts(self.n_restarts, robust=True)
def calculate_variance_reduction(self, x_train_new: np.ndarray, x_test: np.ndarray) -> np.ndarray:
raise ValueError("not implemented for this model")
def predict_covariance(self, X: np.ndarray, with_noise: bool=True) -> np.ndarray:
"""
Calculates posterior covariance between points in X
:param X: Array of size n_points x n_dimensions containing input locations to compute posterior covariance at
:param with_noise: Whether to include likelihood noise in the covariance matrix
:return: Posterior covariance matrix of size n_points x n_points
"""
X = map_to_feature_space(X,self.encoding)
_, v = self.model.predict(X, full_cov=True, include_likelihood=with_noise)
v = np.clip(v, 1e-10, np.inf)
return v
def get_covariance_between_points(self, X1: np.ndarray, X2: np.ndarray) -> np.ndarray:
"""
Calculate posterior covariance between two points
:param X1: An array of shape 1 x n_dimensions that contains a data single point. It is the first argument of the
posterior covariance function
:param X2: An array of shape n_points x n_dimensions that may contain multiple data points. This is the second
argument to the posterior covariance function.
:return: An array of shape n_points x 1 of posterior covariances between X1 and X2
"""
X1 = map_to_feature_space(X1,self.encoding)
X2 = map_to_feature_space(X2,self.encoding)
return self.model.posterior_covariance_between_points(X1, X2)
@property
def X(self) -> np.ndarray:
"""
:return: An array of shape n_points x n_dimensions containing training inputs
"""
return self.X_strings
@property
def Y(self) -> np.ndarray:
"""
:return: An array of shape n_points x 1 containing training outputs
"""
return self.model.Y
# make map from string to one-hoteconded feature space
# each char -- > one-hot of length len(alphabet)
def map_to_feature_space(x,encoding):
x_mapped = []
for row in x:
row_chars = row[0].split(" ")
row_temp=[]
for char in row_chars:
row_temp.extend(encoding[char])
x_mapped.append(row_temp)
return np.array(x_mapped)
def dSigma(x_predict: np.ndarray, x_train: np.ndarray, kern: GPy.kern, w_inv: np.ndarray) -> np.ndarray:
"""
Compute the derivative of the posterior covariance with respect to the prediction input
:param x_predict: Prediction inputs of shape (q, d)
:param x_train: Training inputs of shape (n, d)
:param kern: Covariance of the GP model
:param w_inv: Woodbury inverse of the posterior fit of the GP
:return: Gradient of the posterior covariance of shape (q, q, q, d)
"""
q, d, n = x_predict.shape[0], x_predict.shape[1], x_train.shape[0]
dkxX_dx = np.empty((q, n, d))
dkxx_dx = np.empty((q, q, d))
for i in range(d):
dkxX_dx[:, :, i] = kern.dK_dX(x_predict, x_train, i)
dkxx_dx[:, :, i] = kern.dK_dX(x_predict, x_predict, i)
K = kern.K(x_predict, x_train)
dsigma = np.zeros((q, q, q, d))
for i in range(q):
for j in range(d):
Ks = np.zeros((q, n))
Ks[i, :] = dkxX_dx[i, :, j]
dKss_dxi = np.zeros((q, q))
dKss_dxi[i, :] = dkxx_dx[i, :, j]
dKss_dxi[:, i] = dkxx_dx[i, :, j].T
dKss_dxi[i, i] = 0
dsigma[:, :, i, j] = dKss_dxi - Ks @ w_inv @ K.T - K @ w_inv @ Ks.T
return dsigma
def dmean(x_predict: np.ndarray, x_train: np.ndarray, kern: GPy.kern, w_vec: np.ndarray) -> np.ndarray:
"""
Compute the derivative of the posterior mean with respect to prediction input
:param x: Prediction inputs of shape (q, d)
:param X: Training inputs of shape (n, d)
:param kern: Covariance of the GP model
:param w_inv: Woodbury vector of the posterior fit of the GP
:return: Gradient of the posterior mean of shape (q, q, d)
"""
q, d, n = x_predict.shape[0], x_predict.shape[1], x_train.shape[0]
dkxX_dx = np.empty((q, n, d))
dmu = np.zeros((q, q, d))
for i in range(d):
dkxX_dx[:, :, i] = kern.dK_dX(x_predict, x_train, i)
for j in range(q):
dmu[j, j, i] = (dkxX_dx[j, :, i][None, :] @ w_vec[:, None]).flatten()
return dmu
|
405973
|
import unittest
import os
from hamlpy.ext import has_any_extension
class ExtTest(unittest.TestCase):
"""
Tests for methods found in ../ext.py
"""
def test_has_any_extension(self):
extensions = [
'hamlpy',
'haml',
'.txt'
]
# no directory
self.assertTrue(has_any_extension('dir.hamlpy', extensions))
self.assertTrue(has_any_extension('dir.haml', extensions))
self.assertTrue(has_any_extension('dir.txt', extensions))
self.assertFalse(has_any_extension('dir.html', extensions))
# with dot in filename
self.assertTrue(has_any_extension('dir.dot.hamlpy', extensions))
self.assertTrue(has_any_extension('dir.dot.haml', extensions))
self.assertTrue(has_any_extension('dir.dot.txt', extensions))
self.assertFalse(has_any_extension('dir.dot.html', extensions))
# relative path
self.assertTrue(has_any_extension('../dir.hamlpy', extensions))
self.assertTrue(has_any_extension('../dir.haml', extensions))
self.assertTrue(has_any_extension('../dir.txt', extensions))
self.assertFalse(has_any_extension('../dir.html', extensions))
# with dot in filename
self.assertTrue(has_any_extension('../dir.dot.hamlpy', extensions))
self.assertTrue(has_any_extension('../dir.dot.haml', extensions))
self.assertTrue(has_any_extension('../dir.dot.txt', extensions))
self.assertFalse(has_any_extension('../dir.dot.html', extensions))
# absolute paths
self.assertTrue(has_any_extension('/home/user/dir.hamlpy', extensions))
self.assertTrue(has_any_extension('/home/user/dir.haml', extensions))
self.assertTrue(has_any_extension('/home/user/dir.txt', extensions))
self.assertFalse(has_any_extension('/home/user/dir.html', extensions))
# with dot in filename
self.assertTrue(has_any_extension('/home/user/dir.dot.hamlpy', extensions))
self.assertTrue(has_any_extension('/home/user/dir.dot.haml', extensions))
self.assertTrue(has_any_extension('/home/user/dir.dot.txt', extensions))
self.assertFalse(has_any_extension('/home/user/dir.dot.html', extensions))
|
405983
|
import re
import logging
# Console colors
W = '\033[0m' # white (normal)
R = '\033[31m' # red
G = '\033[32m' # green
# regex for timestamp patched masscan grepable output
reg1 = re.compile('^Timestamp: (?P<Timestamp>\d+)\tHost: (?P<IP>\d+.\d+.\d+.\d+) \(\)\tPort: (?P<Port>\d+)\tService: title\tBanner: (?P<Info>.*)$')
reg2 = re.compile('^Timestamp: (?P<Timestamp>\d+)\tHost: (?P<IP>\d+.\d+.\d+.\d+) \(\)\tPort: (?P<Port>\d+)\tService: vuln\tBanner: (?P<Info>.*)$')
reg3 = re.compile('^Timestamp: (?P<Timestamp>\d+)\tHost: (?P<IP>\d+.\d+.\d+.\d+) \(\)\tPorts: (?P<Port>\d+)/open/tcp////$')
reg4 = re.compile('^Timestamp: (?P<Timestamp>\d+)\tHost: (?P<IP>\d+.\d+.\d+.\d+) \(\)\tPort: (?P<Port>\d+)\tService: (?P<Service>\w+)\tBanner: (?P<Banner>.*)$')
# regex for default masscan grepable output
reg5 = re.compile('^Host: (?P<IP>\d+.\d+.\d+.\d+) \(\)\tPort: (?P<Port>\d+)\tService: title\tBanner: (?P<Info>.*)$')
reg6 = re.compile('^Host: (?P<IP>\d+.\d+.\d+.\d+) \(\)\tPort: (?P<Port>\d+)\tService: vuln\tBanner: (?P<Info>.*)$')
reg7 = re.compile('^Host: (?P<IP>\d+.\d+.\d+.\d+) \(\)\tPorts: (?P<Port>\d+)/open/tcp////$')
reg8 = re.compile('^Host: (?P<IP>\d+.\d+.\d+.\d+) \(\)\tPort: (?P<Port>\d+)\tService: (?P<Service>\w+)\tBanner: (?P<Banner>.*)$')
class Masscan_Parser(object):
def __init__(self, file):
try:
self.log = open(file,'r')
except:
logging.warning(R+'Error opening scan-result file, exiting...'+W)
exit(1)
self.insertDB = []
def parse(self):
index = 0
error = 0
if self.log != None:
logging.info('Starting output parsing...')
for line in self.log.readlines():
index += 1
if re.match(reg1, line):
# handle reg1 result
res = reg1.search(line)
timestamp = res.group(1)
ip = res.group(2)
port = res.group(3)
info = res.group(4).strip('\t')
self.insertDB.append((index, timestamp, ip, port, ' ', ' ', info, ' '))
elif re.match(reg2, line):
# handle reg2 result
res = reg2.search(line)
timestamp = res.group(1)
ip = res.group(2)
port = res.group(3)
info = res.group(4).strip('\t')
self.insertDB.append((index, timestamp, ip, port, ' ', ' ', info, ' '))
elif re.match(reg3, line):
# handle reg3 result
res = reg3.search(line)
timestamp = res.group(1)
ip = res.group(2)
port = res.group(3)
self.insertDB.append((index, timestamp, ip, port, ' ', ' ', ' ', ' '))
elif re.match(reg4, line):
# handle reg4 result
res = reg4.search(line)
timestamp = res.group(1)
ip = res.group(2)
port = res.group(3)
service = res.group(4).strip('\t')
banner = res.group(5).strip('\t')
self.insertDB.append((index, timestamp, ip, port, service, banner, ' ', ' '))
elif re.match(reg5, line):
# handle reg5 result
res = reg5.search(line)
ip = res.group(1)
port = res.group(2)
info = res.group(3).strip('\t')
self.insertDB.append((index, ' ', ip, port, ' ', ' ', info, ' '))
elif re.match(reg6, line):
# handle reg6 result
res = reg6.search(line)
ip = res.group(1)
port = res.group(2)
info = res.group(3).strip('\t')
self.insertDB.append((index, ' ', ip, port, ' ', ' ', info, ' '))
elif re.match(reg7, line):
# handle reg7 result
res = reg7.search(line)
ip = res.group(1)
port = res.group(2)
self.insertDB.append((index, ' ', ip, port, ' ', ' ', ' ', ' '))
elif re.match(reg8, line):
# handle reg8 result
res = reg8.search(line)
ip = res.group(1)
port = res.group(2)
service = res.group(3).strip('\t')
banner = res.group(4).strip('\t')
self.insertDB.append((index, ' ', ip, port, service, banner, ' ', ' '))
else:
error +=1
logging.debug('Error line: '+line)
parsed = index - error
logging.debug('Total line: ' + str(index) + ' Parsed line: ' + str(parsed) + ' Error line: ' + str(error))
if len(self.insertDB) > 0:
logging.info(G+'Parsing process completed!'+W)
return self.insertDB
else:
logging.warning(R+'No Host found in scanning logfile, exiting...'+W)
exit(1)
|
405986
|
import rdtest
import renderdoc as rd
class GL_Buffer_Truncation(rdtest.Buffer_Truncation):
demos_test_name = 'GL_Buffer_Truncation'
internal = False
|
405988
|
import logging
import os
from dotenv import load_dotenv
from objdict import ObjDict
import json
import requests
from impl.timer import timefunc
from impl.parser import Parser
load_dotenv()
def set_log_level(debug):
"""
:param debug: Boolean value
:return: None
"""
if bool(debug):
logging.basicConfig(level=logging.DEBUG)
set_log_level(bool(os.environ['DEBUG']))
def build_parse_output_response(inputs, outputs, errors=None):
"""
:param inputs: The inputs gathered from the extraction process
:param outputs: The outputs object - power skill output
:return: The json response object
"""
values = ObjDict()
values.values = []
for input, output, error in zip(inputs['values'], outputs['data'], errors):
values.values.append({'recordId': input['recordId'],
"errors": error,
"data": output,
})
return values
@timefunc
def parse(inputs):
"""
Read the video indexer insights file.
Split into intervals
Augment with classifications from an external service
:param args:
:return:
"""
outputs = {'data': []}
errors = []
try:
parser = Parser()
values = inputs['values']
for insight_file_data in values:
video_insights_json = insight_file_data['data']
# convert insight to searchable docs
acs_json = parser.parse_vi_json(video_insights_json)
# convert to a scene list from dictionary keyed on scene id
scenes = list(acs_json.values())
outputs['data'].append({
"scenes": scenes
})
errors.append('')
except Exception as ProcessingError:
logging.exception(ProcessingError)
errors.append(str(ProcessingError))
output_response = build_parse_output_response(inputs, outputs, errors)
logging.debug(output_response)
return output_response
|
405992
|
import asyncio
import os
import subprocess
import tempfile
from pathlib import Path
from .version import VERSION
__all__ = [
'AsyncPydf',
'generate_pdf',
'get_version',
'get_help',
'get_extended_help',
]
THIS_DIR = Path(__file__).parent.resolve()
WK_PATH = os.getenv('WKHTMLTOPDF_PATH', str(THIS_DIR / 'bin' / 'wkhtmltopdf'))
DFT_CACHE_DIR = Path(tempfile.gettempdir()) / 'pydf_cache'
def _execute_wk(*args, input=None):
"""
Generate path for the wkhtmltopdf binary and execute command.
:param args: args to pass straight to subprocess.Popen
:return: stdout, stderr
"""
wk_args = (WK_PATH,) + args
return subprocess.run(wk_args, input=input, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
def _convert_args(**py_args):
cmd_args = []
for name, value in py_args.items():
if value in {None, False}:
continue
arg_name = '--' + name.replace('_', '-')
if value is True:
cmd_args.append(arg_name)
else:
cmd_args.extend([arg_name, str(value)])
# read from stdin and write to stdout
cmd_args.extend(['-', '-'])
return cmd_args
class AsyncPydf:
def __init__(self, *, max_processes=20, loop=None, cache_dir=DFT_CACHE_DIR):
self.semaphore = asyncio.Semaphore(value=max_processes, loop=loop)
self.loop = loop
if not cache_dir.exists():
Path.mkdir(cache_dir)
self.cache_dir = cache_dir
async def generate_pdf(self, html, **cmd_args):
cmd_args = [WK_PATH] + _convert_args(cache_dir=self.cache_dir, **cmd_args)
async with self.semaphore:
p = await asyncio.create_subprocess_exec(
*cmd_args,
stdin=asyncio.subprocess.PIPE,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
loop=self.loop,
)
p.stdin.write(html.encode())
p.stdin.close()
await p.wait()
pdf_content = await p.stdout.read()
if p.returncode != 0 and pdf_content[:4] != b'%PDF':
stderr = await p.stderr.read()
raise RuntimeError(
'error running wkhtmltopdf, command: {!r}\n'
'response: "{}"'.format(cmd_args, stderr.decode().strip())
)
return pdf_content
def generate_pdf(
html,
*,
cache_dir: Path = DFT_CACHE_DIR,
grayscale: bool = False,
lowquality: bool = False,
margin_bottom: str = None,
margin_left: str = None,
margin_right: str = None,
margin_top: str = None,
orientation: str = None,
page_height: str = None,
page_width: str = None,
page_size: str = None,
image_dpi: str = None,
image_quality: str = None,
**extra_kwargs,
):
"""
Generate a pdf from either a url or a html string.
After the html and url arguments all other arguments are
passed straight to wkhtmltopdf
For details on extra arguments see the output of get_help()
and get_extended_help()
All arguments whether specified or caught with extra_kwargs are converted
to command line args with "'--' + original_name.replace('_', '-')"
Arguments which are True are passed with no value eg. just --quiet, False
and None arguments are missed, everything else is passed with str(value).
:param html: html string to generate pdf from
:param grayscale: bool
:param lowquality: bool
:param margin_bottom: string eg. 10mm
:param margin_left: string eg. 10mm
:param margin_right: string eg. 10mm
:param margin_top: string eg. 10mm
:param orientation: Portrait or Landscape
:param page_height: string eg. 10mm
:param page_width: string eg. 10mm
:param page_size: string: A4, Letter, etc.
:param image_dpi: int default 600
:param image_quality: int default 94
:param extra_kwargs: any exotic extra options for wkhtmltopdf
:return: string representing pdf
"""
if not cache_dir.exists():
Path.mkdir(cache_dir)
py_args = dict(
cache_dir=cache_dir,
grayscale=grayscale,
lowquality=lowquality,
margin_bottom=margin_bottom,
margin_left=margin_left,
margin_right=margin_right,
margin_top=margin_top,
orientation=orientation,
page_height=page_height,
page_width=page_width,
page_size=page_size,
image_dpi=image_dpi,
image_quality=image_quality,
)
py_args.update(extra_kwargs)
cmd_args = _convert_args(**py_args)
p = _execute_wk(*cmd_args, input=html.encode())
pdf_content = p.stdout
# it seems wkhtmltopdf's error codes can be false, we'll ignore them if we
# seem to have generated a pdf
if p.returncode != 0 and pdf_content[:4] != b'%PDF':
raise RuntimeError(
'error running wkhtmltopdf, command: {!r}\n' 'response: "{}"'.format(cmd_args, p.stderr.decode().strip())
)
return pdf_content
def _string_execute(*args):
return _execute_wk(*args).stdout.decode().strip(' \n')
def get_version():
"""
Get version of pydf and wkhtmltopdf binary
:return: version string
"""
try:
wk_version = _string_execute('-V')
except Exception as e:
# we catch all errors here to make sure we get a version no matter what
wk_version = '%s: %s' % (e.__class__.__name__, e)
return 'pydf version: %s\nwkhtmltopdf version: %s' % (VERSION, wk_version)
def get_help():
"""
get help string from wkhtmltopdf binary
uses -h command line option
:return: help string
"""
return _string_execute('-h')
def get_extended_help():
"""
get extended help string from wkhtmltopdf binary
uses -H command line option
:return: extended help string
"""
return _string_execute('-H')
|
406007
|
import urllib
import os
import time
import requests
import uuid
import json
import zipfile
import base64
from azure.storage.table import TableService, Entity, TablePermissions
STORAGE_ACCOUNT_NAME = os.environ['STORAGE_ACCOUNT_NAME']
STORAGE_ACCOUNT_KEY = os.environ['STORAGE_ACCOUNT_KEY']
DATABRICKS_API_BASE_URL = os.environ['DATABRICKS_WORKSPACE_URL'] + '/api/'
FEATURIZER_JAR_URL = os.environ['FEATURIZER_JAR_URL']
DATABRICKS_TOKEN = os.environ['DATABRICKS_TOKEN']
IOT_HUB_NAME = os.environ['IOT_HUB_NAME']
EVENT_HUB_ENDPOINT = os.environ['EVENT_HUB_ENDPOINT']
TMP = os.environ['TMP']
NOTEBOOKS_URL = os.environ['NOTEBOOKS_URL']
STORAGE_ACCOUNT_CONNECTION_STRING = "DefaultEndpointsProtocol=https;AccountName=" + STORAGE_ACCOUNT_NAME + ";AccountKey=" + STORAGE_ACCOUNT_KEY + ";EndpointSuffix=core.windows.net"
def call_api(uri, method=requests.get, json=None, data=None, files=None):
headers = { 'Authorization': 'Bearer ' + DATABRICKS_TOKEN }
#TODO: add retries
response = method(DATABRICKS_API_BASE_URL + uri, headers=headers, json=json, data=data, files=files)
if response.status_code != 200:
raise Exception('Error when calling Databricks API {0}. Response:\n{1}'.format(uri, response.text))
return response
def get_last_run_id():
table_service = TableService(account_name=STORAGE_ACCOUNT_NAME, account_key=STORAGE_ACCOUNT_KEY)
databricks_cluster_details_entries = table_service.query_entities('databricks', filter="PartitionKey eq 'pdm'")
databricks_cluster_details = list(databricks_cluster_details_entries)
if databricks_cluster_details:
return databricks_cluster_details[0]['run_id']
return None
def set_last_run_id(run_id):
table_service = TableService(account_name=STORAGE_ACCOUNT_NAME, account_key=STORAGE_ACCOUNT_KEY)
databricks_details = {'PartitionKey': 'pdm', 'RowKey': 'pdm', 'run_id' : str(run_id)}
table_service.insert_or_replace_entity('databricks', databricks_details)
def get_run(run_id):
run_state = 'PENDING'
while run_state in ['PENDING', 'RESIZING']:
run_details = call_api('2.0/jobs/runs/get?run_id=' + str(run_id)).json()
run_state = run_details['state']['life_cycle_state']
time.sleep(10)
return run_details
def is_job_active(run_details):
run_state = run_details['state']['life_cycle_state']
return run_state == 'RUNNING'
def upload_notebooks_databricks():
#upload notebook to app service
notebooks_zip_local_path = os.path.join(TMP, 'Notebooks.zip')
urllib.request.urlretrieve(NOTEBOOKS_URL, notebooks_zip_local_path)
zip_ref = zipfile.ZipFile(notebooks_zip_local_path, 'r')
notebooks_local_path = os.path.join(TMP, 'Notebooks')
zip_ref.extractall(notebooks_local_path)
#upload feature engineering notebook to databricks workspace
featureEngineering_local_path = os.path.join(notebooks_local_path, 'FeatureEngineering.ipynb')
files = {'file': open(featureEngineering_local_path, 'rb')}
bdfs = "/FeatureEngineering"
put_payload = { 'path' : bdfs, 'overwrite' : 'true', 'language':'PYTHON', 'format':'JUPYTER' }
resp = call_api('2.0/workspace/import', method=requests.post, data=put_payload, files = files).json()
#upload data ingestion notebook to databricks workspace
dataIngestion_local_path = os.path.join(notebooks_local_path, 'DataIngestion.ipynb')
files = {'file': open(dataIngestion_local_path, 'rb')}
bdfs = "/DataIngestion"
put_payload = { 'path' : bdfs, 'overwrite' : 'true', 'language':'PYTHON', 'format':'JUPYTER' }
resp = call_api('2.0/workspace/import', method=requests.post, data=put_payload, files = files).json()
upload_notebooks_databricks()
data = '{"DataIngestion" : { "STORAGE_ACCOUNT_NAME" :"' + STORAGE_ACCOUNT_NAME + '", "STORAGE_ACCOUNT_KEY" :"' + STORAGE_ACCOUNT_KEY +'", "TELEMETRY_CONTAINER_NAME" : "telemetry", "LOG_TABLE_NAME" : "Logs", "DATA_ROOT_FOLDER" : "/root"}}'
file = open('D:/home/site/NotebookEnvironmentVariablesConfig.json','w')
file.write(data)
file.close()
config_path = '/root/NotebookEnvironmentVariablesConfig.json'
files = {'file': open('D:/home/site/NotebookEnvironmentVariablesConfig.json', 'rb')}
put_payload = { 'path' : config_path, 'overwrite' : 'true' }
call_api('2.0/dbfs/put', method=requests.post, data=put_payload, files=files)
last_run_id = get_last_run_id()
if last_run_id is not None and is_job_active(get_run(last_run_id)):
exit(0)
jar_local_path = os.path.join(TMP, 'featurizer_2.11-1.0.jar')
dbfs_path = '/predictive-maintenance/jars/'
jar_dbfs_path = dbfs_path + 'featurizer_2.11-1.0.jar'
urllib.request.urlretrieve(FEATURIZER_JAR_URL, jar_local_path)
mkdirs_payload = { 'path': dbfs_path }
call_api('2.0/dbfs/mkdirs', method=requests.post, json=mkdirs_payload)
files = {'file': open(jar_local_path, 'rb')}
put_payload = { 'path' : jar_dbfs_path, 'overwrite' : 'true' }
call_api('2.0/dbfs/put', method=requests.post, data=put_payload, files=files)
sparkSpec= {
'spark.speculation' : 'true'
}
payload = {
'spark_version' : '4.2.x-scala2.11',
'node_type_id' : 'Standard_D3_v2',
'spark_conf' : sparkSpec,
'num_workers' : 1
}
#run job
jar_path = "dbfs:" + jar_dbfs_path
jar = {
'jar' : jar_path
}
maven_coordinates = {
'coordinates' : 'com.microsoft.azure:azure-eventhubs-spark_2.11:2.3.1'
}
maven = {
'maven' : maven_coordinates
}
libraries = [jar, maven]
jar_params = [EVENT_HUB_ENDPOINT, IOT_HUB_NAME, STORAGE_ACCOUNT_CONNECTION_STRING]
spark_jar_task= {
'main_class_name' : 'com.microsoft.ciqs.predictivemaintenance.Featurizer',
'parameters' : jar_params
}
payload = {
"run_name": "featurization_task",
"new_cluster" : payload,
'libraries' : libraries,
'max_retries' : 1,
'spark_jar_task' : spark_jar_task
}
run_job = True
i = 0
while run_job and i < 5:
run_details = call_api('2.0/jobs/runs/submit', method=requests.post, json=payload).json()
run_id = run_details['run_id']
set_last_run_id(run_id)
run_details = get_run(run_id)
i= i + 1
if not is_job_active(run_details):
run_job = True
errorMessage = 'Unable to create Spark job. Run ID: {0}. Failure Details: {1}'.format(run_id, run_details['state']['state_message'])
print(errorMessage)
else:
run_job = False
|
406008
|
import unittest
from unittest.mock import patch, MagicMock
from datetime import timedelta
import json
import os
import threading
import signal
import subprocess
import tempfile
import sys
from osgar.record import Recorder
class Sleeper:
def __init__(self, cfg, bus):
self.e = threading.Event()
def start(self):
self.t = threading.Thread(target=self.e.wait, args=(5,))
self.t.start()
def join(self, timeout=None):
self.t.join(timeout)
def request_stop(self):
self.e.set()
class RecorderTest(unittest.TestCase):
def test_dummy_usage(self):
empty_config = {'modules': {}, 'links':[]}
with Recorder(config=empty_config, logger=MagicMock()) as recorder:
pass
def test_missing_init(self):
# init section for modules is now optional
mini_config = {'modules': {
"dummy": {
"driver": "osgar.test_record:Sleeper"
},
}, 'links':[]}
with Recorder(config=mini_config, logger=MagicMock()) as recorder:
pass
def test_config(self):
with patch('osgar.drivers.logserial.serial.Serial') as mock:
instance = mock.return_value
instance.read = MagicMock(return_value=b'$GNGGA,182433.10,5007.71882,N,01422.50467,E,1,05,6.09,305.1,M,44.3,M,,*41')
config = {
'modules': {
'gps': {
'driver': 'gps',
'out':['position'],
'init':{}
},
'serial_gps': {
'driver': 'serial',
'out':['raw'],
'init': {'port': 'COM51', 'speed': 4800}
}
},
'links': [('serial_gps.raw', 'gps.raw')]
}
logger = MagicMock(write = MagicMock(return_value=timedelta(seconds=135)))
with Recorder(config=config, logger=logger) as recorder:
self.assertEqual(len(recorder.modules), 2)
self.assertEqual(sum([sum([len(q) for q in module.bus.out.values()])
for module in recorder.modules.values()]), 1)
def test_spider_config(self):
# first example with loop spider <-> serial
with open(os.path.dirname(__file__) + '/../config/test-spider.json') as f:
config = json.loads(f.read())
with patch('osgar.drivers.logserial.serial.Serial') as mock:
logger = MagicMock()
recorder = Recorder(config=config['robot'], logger=logger)
def test_all_supported_config_files(self):
supported = ['test-spider.json', 'test-gps-imu.json',
'test-spider-gps-imu.json', 'test-windows-gps.json']
with patch('osgar.drivers.logserial.serial.Serial') as mock:
logger = MagicMock()
for filename in supported:
with open(os.path.join(os.path.dirname(__file__), '..', 'config',
filename)) as f:
config = json.loads(f.read())
recorder = Recorder(config=config['robot'], logger=logger)
@unittest.skipIf(os.name != "posix", "requires posix shell")
def test_sigint_shell(self):
config = {
'version': 2,
'robot': {
'modules': {
"app": {
"driver": "osgar.test_record:Sleeper",
"init": {}
},
}, 'links':[]
}
}
with tempfile.NamedTemporaryFile() as cfg:
cfg.write(json.dumps(config).encode('ascii'))
cfg.flush()
env = os.environ.copy()
env['OSGAR_LOGS'] = '.'
with subprocess.Popen(
f"echo starting; {sys.executable} -m osgar.record {cfg.name}; echo should not get here",
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
start_new_session=True,
env=env,
) as proc:
grp_id = os.getpgid(proc.pid)
self.assertEqual(proc.stdout.readline().strip(), b"starting")
log_line = proc.stderr.readline().strip().split()
log_filename = log_line[-1]
self.assertTrue(log_filename.endswith(b".log"), log_line)
self.assertIn(b"SIGINT handler installed", proc.stderr.readline())
os.killpg(grp_id, signal.SIGINT)
stdout, stderr = proc.communicate()
self.assertIn(b"committing suicide by SIGINT", stderr)
self.assertEqual(len(stdout), 0, stdout)
self.assertEqual(len(stderr.splitlines()), 1, stderr)
os.unlink(log_filename)
# vim: expandtab sw=4 ts=4
|
406010
|
from quick_orm.core import Database
from sqlalchemy import Column, String
class DefaultModel:
name = Column(String(70))
__metaclass__ = Database.MetaBuilder(DefaultModel)
class User:
pass
class Group:
pass
Database.register()
if __name__ == '__main__':
db = Database('sqlite://')
db.create_tables()
user = User(name = 'tylerlong')
db.session.add(user)
group = Group(name = 'python')
db.session.add_then_commit(group)
print user.name
print group.name
|
406013
|
from dataclasses import dataclass
from typing import Dict, Any, Optional
from lighttree.interactive import Obj
from elasticsearch import Elasticsearch
from pandagg import Mappings, MappingsDict
from pandagg.interactive.mappings import IMappings
from pandagg.search import Search
@dataclass
class Index:
name: str
settings: Dict[str, Any]
mappings: MappingsDict
aliases: Any
client: Optional[Elasticsearch] = None
@property
def imappings(self) -> IMappings:
# TODO- create mypy issue
mappings: Mappings = Mappings(**self.mappings) # type: ignore
return IMappings(mappings=mappings, client=self.client, index=[self.name])
def search(
self, nested_autocorrect: bool = True, repr_auto_execute: bool = True
) -> Search:
return Search(
using=self.client,
mappings=self.mappings,
index=self.name,
nested_autocorrect=nested_autocorrect,
repr_auto_execute=repr_auto_execute,
)
def __str__(self) -> str:
return self.__repr__()
def __repr__(self) -> str:
return "<Index '%s'>" % self.name
class Indices(Obj):
_COERCE_ATTR = True
def discover(using: Elasticsearch, index: str = "*") -> Indices:
"""
:param using: Elasticsearch client
:param index: Comma-separated list or wildcard expression of index names used to limit the request.
"""
indices = Indices()
for index_name, index_detail in using.indices.get(index=index).items():
indices[index_name] = Index(
client=using,
name=index_name,
mappings=index_detail["mappings"],
settings=index_detail["settings"],
aliases=index_detail["aliases"],
)
return indices
|
406028
|
import os
import numpy as np
from string import ascii_uppercase
from io_spatial import do_rotation, rotate_check
from io_helpers import make_grid, are_you_numpy
from collections import OrderedDict
import gzip
### Written by <NAME>, AG Klebe Marburg University
### 08/2016
class field (object):
"""
This is a class for operation on generic
scalar fields. Like GIST objects, they
must be described in cartesian as well as
in fractional space. That means that we need
origin, frac2real/real2frac matrix(vector)
and/or grid spacing vectors vector.
<NAME>, AG Klebe, 08/2016
"""
def __init__(self, Bins, Frac2Real=None, Delta=None, Origin=None, Center=None):
if type(Frac2Real) == type(None) and type(Delta) == type(None):
raise ValueError("Must provide Frac2Real or Delta.")
if type(Frac2Real) != type(None) and type(Delta) != type(None):
raise ValueError("Must provide either Frac2Real or Delta.")
if type(Frac2Real) == type(None):
self.delta = Delta
self.frac2real = np.eye(3,3) * self.delta
else:
self.frac2real = Frac2Real
self.delta = np.linalg.norm(self.frac2real, axis=0)
self.real2frac = np.linalg.inv(self.frac2real)
self.bins = Bins
self.rotation_matrix = np.eye(3,3)
self.translation_vector = np.zeros(3)
if type(Origin) == type(None) and type(Center) == type(None):
raise ValueError("Must provide origin or Center.")
if type(Origin) != type(None) and type(Center) != type(None):
raise ValueError("Must provide either origin or center.")
if type(Center) == type(None):
self.origin = Origin
self.center = self.get_real(self.bins/2)
else:
self.center = Center
#First we need an auxiliary origin at (0,0,0)
self.origin = np.zeros(3)
#Second translate origin according center displacement
self.origin = self.center - self.get_real(self.bins/2)
self.dim = np.array([ np.linalg.norm(self.get_real([self.bins[0], 0., 0.])-self.origin),
np.linalg.norm(self.get_real([0., self.bins[1], 0.])-self.origin),
np.linalg.norm(self.get_real([0., 0., self.bins[2]])-self.origin)
])
def translate(self, vector=np.zeros(3)):
"""
Translatation vector of unit cell origin
"""
self.translation_vector += vector
def rotate(self, matrix=np.eye(3,3)):
"""
Rotate the unit cell vectors.
"""
rotate_check(matrix)
self.rotation_matrix = matrix.dot(self.rotation_matrix)
def translate_global(self, vector=np.zeros(3)):
"""
Translate global coordinate system
along vector.
"""
self.origin += vector
def rotate_global(self, reference_point=np.zeros(3), matrix=np.eye(3,3)):
"""
Rotate global coordinate system around
reference point.
"""
rotate_check(matrix)
self.origin = do_rotation(self.origin, reference_point, matrix)
self.rotate(matrix)
self.translation_vector = do_rotation(self.translation_vector, np.zeros(3), matrix)
def get_nice_frac2real(self):
return self.rotation_matrix.dot(self.frac2real)
def get_nice_real2frac(self):
return np.linalg.inv(self.get_nice_frac2real())
def get_voxel_volume(self):
"""
Returns the volume per grid voxel.
"""
return np.absolute(np.cross(self.frac2real[:,0], self.frac2real[:,1]).dot(self.frac2real[:,2]))
def get_frac(self, real_array):
#Convert to initial real space by inverse translation and rotation
initial_reals = do_rotation(real_array, self.origin + self.translation_vector, np.linalg.inv(self.rotation_matrix))
#Remove origin
initial_reals -= (self.origin + self.translation_vector)
#Convert to initial fractional space
return initial_reals.dot(self.real2frac)
def get_real(self, frac_array):
#Convert to real space
reals = np.array(frac_array).dot(self.frac2real)
#Perform rotation translation
return do_rotation(reals, np.zeros(3), self.rotation_matrix) + self.origin + self.translation_vector
def get_centers(self):
return self.get_real(make_grid((np.arange(self.bins[0]),\
np.arange(self.bins[1]),\
np.arange(self.bins[2]))))
def get_centers_real(self):
return self.get_centers()
def get_centers_frac(self):
return make_grid((np.arange(self.bins[0]),\
np.arange(self.bins[1]),\
np.arange(self.bins[2])))
class gist (field):
def __init__(self, Bins, Frac2Real=None, Delta=None, Origin=None, Center=None, gist17=False):
field.__init__(self, Bins, Frac2Real, Delta, Origin, Center)
if type(gist17) != bool:
raise IOError("gist17 must be of type bool but is of type %s" %type(gist17))
### Compatibility with cpptraj v.17
self.gist17 = gist17
### Number Of nearest neighbours water neighbours
### within 3.5 Ang in bulk.
### This value is calculted with TIP4PEw
### and should be used only in conjunction
### with this water model.
self.bulk_NN = 5.098076
### Reference energy interaction energy in kcal/mol.
### This value is calculated with TIP4PEW
self.ref_ene = 11.063656
### Reference density in 1/Ang^3
### This value is calculated with TIP4PEW as well.
self.ref_rho = 0.0332
### Numpy arrays for storing all quantities
self.Pop = np.zeros( [ self.bins[0], self.bins[1], self.bins[2] ], dtype=float)
self.gO = np.zeros( [ self.bins[0], self.bins[1], self.bins[2] ], dtype=float)
self.gH = np.zeros( [ self.bins[0], self.bins[1], self.bins[2] ], dtype=float)
self.dTStrans_dens = np.zeros( [ self.bins[0], self.bins[1], self.bins[2] ], dtype=float)
self.dTStrans_norm = np.zeros( [ self.bins[0], self.bins[1], self.bins[2] ], dtype=float)
self.dTSorient_dens = np.zeros( [ self.bins[0], self.bins[1], self.bins[2] ], dtype=float)
self.dTSorient_norm = np.zeros( [ self.bins[0], self.bins[1], self.bins[2] ], dtype=float)
self.dTSsix_dens = np.zeros( [ self.bins[0], self.bins[1], self.bins[2] ], dtype=float)
self.dTSsix_norm = np.zeros( [ self.bins[0], self.bins[1], self.bins[2] ], dtype=float)
self.Esw_dens = np.zeros( [ self.bins[0], self.bins[1], self.bins[2] ], dtype=float)
self.Esw_norm = np.zeros( [ self.bins[0], self.bins[1], self.bins[2] ], dtype=float)
self.Eww_dens = np.zeros( [ self.bins[0], self.bins[1], self.bins[2] ], dtype=float)
self.Eww_norm_unref = np.zeros( [ self.bins[0], self.bins[1], self.bins[2] ], dtype=float)
self.Dipole_x_dens = np.zeros( [ self.bins[0], self.bins[1], self.bins[2] ], dtype=float)
self.Dipole_y_dens = np.zeros( [ self.bins[0], self.bins[1], self.bins[2] ], dtype=float)
self.Dipole_z_dens = np.zeros( [ self.bins[0], self.bins[1], self.bins[2] ], dtype=float)
self.Dipole_dens = np.zeros( [ self.bins[0], self.bins[1], self.bins[2] ], dtype=float)
self.Neighbor_dens = np.zeros( [ self.bins[0], self.bins[1], self.bins[2] ], dtype=float)
self.Neighbor_norm = np.zeros( [ self.bins[0], self.bins[1], self.bins[2] ], dtype=float)
self.Order_norm = np.zeros( [ self.bins[0], self.bins[1], self.bins[2] ], dtype=float)
self._tmp_Pop = np.zeros( [ self.bins[0], self.bins[1], self.bins[2] ], dtype=float)
self._tmp_gO = np.zeros( [ self.bins[0], self.bins[1], self.bins[2] ], dtype=float)
self._tmp_gH = np.zeros( [ self.bins[0], self.bins[1], self.bins[2] ], dtype=float)
self._tmp_dTStrans_dens = np.zeros( [ self.bins[0], self.bins[1], self.bins[2] ], dtype=float)
self._tmp_dTStrans_norm = np.zeros( [ self.bins[0], self.bins[1], self.bins[2] ], dtype=float)
self._tmp_dTSorient_dens = np.zeros( [ self.bins[0], self.bins[1], self.bins[2] ], dtype=float)
self._tmp_dTSorient_norm = np.zeros( [ self.bins[0], self.bins[1], self.bins[2] ], dtype=float)
self._tmp_dTSsix_dens = np.zeros( [ self.bins[0], self.bins[1], self.bins[2] ], dtype=float)
self._tmp_dTSsix_norm = np.zeros( [ self.bins[0], self.bins[1], self.bins[2] ], dtype=float)
self._tmp_Esw_norm = np.zeros( [ self.bins[0], self.bins[1], self.bins[2] ], dtype=float)
self._tmp_Esw_dens = np.zeros( [ self.bins[0], self.bins[1], self.bins[2] ], dtype=float)
self._tmp_Eww_dens = np.zeros( [ self.bins[0], self.bins[1], self.bins[2] ], dtype=float)
self._tmp_Eww_norm_unref = np.zeros( [ self.bins[0], self.bins[1], self.bins[2] ], dtype=float)
self._tmp_Dipole_x_dens = np.zeros( [ self.bins[0], self.bins[1], self.bins[2] ], dtype=float)
self._tmp_Dipole_y_dens = np.zeros( [ self.bins[0], self.bins[1], self.bins[2] ], dtype=float)
self._tmp_Dipole_z_dens = np.zeros( [ self.bins[0], self.bins[1], self.bins[2] ], dtype=float)
self._tmp_Dipole_dens = np.zeros( [ self.bins[0], self.bins[1], self.bins[2] ], dtype=float)
self._tmp_Neighbor_dens = np.zeros( [ self.bins[0], self.bins[1], self.bins[2] ], dtype=float)
self._tmp_Neighbor_norm = np.zeros( [ self.bins[0], self.bins[1], self.bins[2] ], dtype=float)
self._tmp_Order_norm = np.zeros( [ self.bins[0], self.bins[1], self.bins[2] ], dtype=float)
self._update()
def _update(self):
self.Pop = self._tmp_Pop
self.gO = self._tmp_gO
self.gH = self._tmp_gH
self.dTStrans_dens = self._tmp_dTStrans_dens
self.dTStrans_norm = self._tmp_dTStrans_norm
self.dTSorient_dens = self._tmp_dTSorient_dens
self.dTSorient_norm = self._tmp_dTSorient_norm
self.dTSsix_dens = self._tmp_dTSsix_dens
self.dTSsix_norm = self._tmp_dTSsix_norm
self.Esw_dens = self._tmp_Esw_dens
self.Esw_norm = self._tmp_Esw_norm
self.Eww_dens = self._tmp_Eww_dens
self.Eww_norm_unref = self._tmp_Eww_norm_unref
self.Dipole_x_dens = self._tmp_Dipole_x_dens
self.Dipole_y_dens = self._tmp_Dipole_y_dens
self.Dipole_z_dens = self._tmp_Dipole_z_dens
self.Dipole_dens = self._tmp_Dipole_dens
self.Neighbor_dens = self._tmp_Neighbor_dens
self.Neighbor_norm = self._tmp_Neighbor_norm
self.Order_norm = self._tmp_Order_norm
def cut_round_center(self, bins):
__doc__="""
bins is the new self.bins attribute of this class.
All other attributes (i.e. all scalar fields containing Gist data)
will be reshaped and transformed such that the position of the
grid center is preserved. Thereby all edges are cut symmetrically around
the center.
Example:
bins=[20,20,20] with self.bins=[50,50,50]
Here, the original self.bins will be reduced to bins. Thereby, the xedge,
yedge and zedge will be truncated such that [0:15] and [35:49] are cut off.
"""
if bins.shape[0] != 3:
print "Target bins array must habe shape (3,)"
elif self.bins[0] < bins[0] or self.bins[1] < bins[1] or self.bins[2] < bins[2]:
print "Target bins array must be smaller than original one."
else:
cut = (self.bins - bins)/2
cut_bins = np.array([[cut[0], cut[0]],
[cut[1], cut[1]],
[cut[2], cut[2]]])
self.cut(cut_bins)
def cut(self, cut_bins):
__doc__="""
cut_bins must be an array of shape (3,2), with
[[lower_cut_x, upper_cut_x],
[lower_cut_y, upper_cut_y],
[lower_cut_z, upper_cut_z]]
marking the number of bins cut out at lower and upper
boundaries of each dimension.
"""
self.bins[0] = self.bins[0] - cut_bins[0,0] - cut_bins[0,1]
self.bins[1] = self.bins[1] - cut_bins[1,0] - cut_bins[1,1]
self.bins[2] = self.bins[2] - cut_bins[2,0] - cut_bins[2,1]
self.dim = self.bins * self.delta
self.n = np.copy(self.bins)
self.origin = np.zeros(3)
#Second translate origin according to center displacement
self.origin = self.center - self.get_real(self.bins/2)
self._tmp_Pop = np.array(np.zeros( [ self.bins[0], self.bins[1], self.bins[2] ] ), dtype=float)
self._tmp_gO = np.array(np.zeros( [ self.bins[0], self.bins[1], self.bins[2] ] ), dtype=float)
self._tmp_gH = np.array(np.zeros( [ self.bins[0], self.bins[1], self.bins[2] ] ), dtype=float)
self._tmp_dTStrans_dens = np.array(np.zeros( [ self.bins[0], self.bins[1], self.bins[2] ] ), dtype=float)
self._tmp_dTStrans_norm = np.array(np.zeros( [ self.bins[0], self.bins[1], self.bins[2] ] ), dtype=float)
self._tmp_dTSorient_dens = np.array(np.zeros( [ self.bins[0], self.bins[1], self.bins[2] ] ), dtype=float)
self._tmp_dTSorient_norm = np.array(np.zeros( [ self.bins[0], self.bins[1], self.bins[2] ] ), dtype=float)
self._tmp_dTSsix_dens = np.array(np.zeros( [ self.bins[0], self.bins[1], self.bins[2] ] ), dtype=float)
self._tmp_dTSsix_norm = np.array(np.zeros( [ self.bins[0], self.bins[1], self.bins[2] ] ), dtype=float)
self._tmp_Esw_norm = np.array(np.zeros( [ self.bins[0], self.bins[1], self.bins[2] ] ), dtype=float)
self._tmp_Esw_dens = np.array(np.zeros( [ self.bins[0], self.bins[1], self.bins[2] ] ), dtype=float)
self._tmp_Eww_dens = np.array(np.zeros( [ self.bins[0], self.bins[1], self.bins[2] ] ), dtype=float)
self._tmp_Eww_norm_unref = np.array(np.zeros( [ self.bins[0], self.bins[1], self.bins[2] ] ), dtype=float)
self._tmp_Dipole_x_dens = np.array(np.zeros( [ self.bins[0], self.bins[1], self.bins[2] ] ), dtype=float)
self._tmp_Dipole_y_dens = np.array(np.zeros( [ self.bins[0], self.bins[1], self.bins[2] ] ), dtype=float)
self._tmp_Dipole_z_dens = np.array(np.zeros( [ self.bins[0], self.bins[1], self.bins[2] ] ), dtype=float)
self._tmp_Dipole_dens = np.array(np.zeros( [ self.bins[0], self.bins[1], self.bins[2] ] ), dtype=float)
self._tmp_Neighbor_dens = np.array(np.zeros( [ self.bins[0], self.bins[1], self.bins[2] ] ), dtype=float)
self._tmp_Neighbor_norm = np.array(np.zeros( [ self.bins[0], self.bins[1], self.bins[2] ] ), dtype=float)
self._tmp_Order_norm = np.array(np.zeros( [ self.bins[0], self.bins[1], self.bins[2] ] ), dtype=float)
for x_i, x in enumerate(range(cut_bins[0,0], cut_bins[0,0]+self.bins[0])):
for y_i, y in enumerate(range(cut_bins[1,0], cut_bins[1,0]+self.bins[1])):
for z_i, z in enumerate(range(cut_bins[2,0], cut_bins[2,0]+self.bins[2])):
self._tmp_Pop [x_i][y_i][z_i] = self.Pop [x,y,z]
self._tmp_gO [x_i][y_i][z_i] = self.gO [x,y,z]
self._tmp_gH [x_i][y_i][z_i] = self.gH [x,y,z]
self._tmp_dTStrans_dens [x_i][y_i][z_i] = self.dTStrans_dens [x,y,z]
self._tmp_dTStrans_norm [x_i][y_i][z_i] = self.dTStrans_norm [x,y,z]
self._tmp_dTSorient_dens [x_i][y_i][z_i] = self.dTSorient_dens [x,y,z]
self._tmp_dTSorient_norm [x_i][y_i][z_i] = self.dTSorient_norm [x,y,z]
### In newer vesions of GIST, Six dimensional translational entropy is reported after
### dTSorient_norm. For now, we want to skip that.
if self.gist17:
self._tmp_dTSsix_dens [x_i][y_i][z_i] = self.dTSsix_dens [x,y,z]
self._tmp_dTSsix_norm [x_i][y_i][z_i] = self.dTSsix_norm [x,y,z]
self._tmp_Esw_dens [x_i][y_i][z_i] = self.Esw_dens [x,y,z]
self._tmp_Esw_norm [x_i][y_i][z_i] = self.Esw_norm [x,y,z]
self._tmp_Eww_dens [x_i][y_i][z_i] = self.Eww_dens [x,y,z]
self._tmp_Eww_norm_unref [x_i][y_i][z_i] = self.Eww_norm_unref [x,y,z]
self._tmp_Dipole_x_dens [x_i][y_i][z_i] = self.Dipole_x_dens [x,y,z]
self._tmp_Dipole_y_dens [x_i][y_i][z_i] = self.Dipole_y_dens [x,y,z]
self._tmp_Dipole_z_dens [x_i][y_i][z_i] = self.Dipole_z_dens [x,y,z]
self._tmp_Dipole_dens [x_i][y_i][z_i] = self.Dipole_dens [x,y,z]
self._tmp_Neighbor_dens [x_i][y_i][z_i] = self.Neighbor_dens [x,y,z]
self._tmp_Neighbor_norm [x_i][y_i][z_i] = self.Neighbor_norm [x,y,z]
self._tmp_Order_norm [x_i][y_i][z_i] = self.Order_norm [x,y,z]
self._update()
def get_nan(self):
__doc__="""
Return array that contains True whereever Population array
is np.nan and False elsewhere.
"""
tmp = np.zeros(self.bins, dtype=bool)
tmp[np.isnan(self.Pop)] = True
return tmp
def get_pop(self):
__doc__="""
Return array that containes True whereever Population array
is greater than zero.
"""
tmp = np.zeros(self.bins, dtype=bool)
tmp[np.where(self.Pop > 0)] = True
return tmp
def write_maps(self, prefix="gist", pymol=True):
data_dict = OrderedDict()
data_dict["_Pop.dx"] = [ self.Pop , 1.0 ]
data_dict["_gO.dx"] = [ self.gO , 4.0 ]
data_dict["_gH.dx"] = [ self.gH , 4.0 ]
data_dict["_dTStrans_dens.dx"] = [ self.dTStrans_dens , 0.2 ]
data_dict["_dTStrans_norm.dx"] = [ self.dTStrans_norm , 1.0 ]
data_dict["_dTSorient_dens.dx"] = [ self.dTSorient_dens , 0.2 ]
data_dict["_dTSorient_norm.dx"] = [ self.dTSorient_norm , 1.0 ]
if self.gist17:
data_dict["_dTSsix_dens.dx"] = [ self.dTSsix_dens , 0.2 ]
data_dict["_dTSsix_norm.dx"] = [ self.dTSsix_norm , 1.0 ]
data_dict["_Esw_dens.dx"] = [ self.Esw_dens , 0.2 ]
data_dict["_Esw_norm.dx"] = [ self.Esw_norm , 1.0 ]
data_dict["_Eww_dens.dx"] = [ self.Eww_dens , 0.2 ]
data_dict["_Eww_norm_unref.dx"] = [ self.Eww_norm_unref , 1.0 ]
data_dict["_Eww_norm_ref.dx"] = [ self.ref_ene-self.Eww_norm_unref , 1.0 ]
data_dict["_Eww_norm_ref_dens.dx"] = [ (self.ref_ene-self.Eww_norm_unref)*\
self.gO*self.ref_rho , 1.0 ]
data_dict["_Dipole_x_dens.dx"] = [ self.Dipole_x_dens , 1.0 ]
data_dict["_Dipole_y_dens.dx"] = [ self.Dipole_y_dens , 1.0 ]
data_dict["_Dipole_z_dens.dx"] = [ self.Dipole_z_dens , 1.0 ]
data_dict["_Dipole_dens.dx"] = [ self.Dipole_dens , 1.0 ]
data_dict["_Neighbor_dens.dx"] = [ self.Neighbor_dens , 0.5 ]
data_dict["_Neighbor_norm.dx"] = [ self.Neighbor_norm , 1.0 ]
data_dict["_Order_norm.dx"] = [ self.Order_norm , 5.5 ]
data_dict["_dTS_dens.dx"] = [ self.dTStrans_dens+self.dTSorient_dens , 0.2 ]
data_dict["_dTS_norm.dx"] = [ self.dTStrans_norm+self.dTSorient_norm , 1.0 ]
data_dict["_E_dens.dx"] = [ self.Esw_dens+self.Eww_dens , 0.2 ]
data_dict["_E_norm.dx"] = [ self.Esw_norm+self.Eww_norm_unref , 1.0 ]
data_dict["_Neighbor_loss_norm.dx"] = [ self.Neighbor_norm - self.bulk_NN , 0.5 ]
if pymol:
pymol_string = ""
pymol_string += "from pymol import cmd\n"
pymol_string += "from collections import OrderedDict\n"
pymol_string += "\n"
for name, data in data_dict.items():
write_files(Frac2Real=self.get_nice_frac2real(), Bins=self.bins, Origin=self.origin, Value=data[0], Format="DX", Filename=prefix+name, Nan_fill=-999)
if pymol:
new_name = str(prefix+name).replace(".dx", "")
pymol_string += "### %s ###\n" %new_name
pymol_string += "cmd.load(\"./%s\")\n" %(prefix+name)
pymol_string += "cmd.isomesh(\"%s\", \"%s\", level=%s)\n" %(new_name+"_map", new_name, data[1])
pymol_string += "cmd.map_double(\"%s\")\n" %(new_name)
pymol_string += "\n"
if pymol:
pymol_string += "cmd.disable(\"*_map\")\n"
pymol_string += "cmd.do(\"color blue, *_map\")\n"
pymol_string += "cmd.do(\"set mesh_negative_color, red\")\n"
pymol_string += "cmd.do(\"set mesh_negative_visible\")\n"
with open(prefix+"_pymol.py", "w") as f:
f.write(pymol_string)
class loadgist(gist):
def __init__(self, Path, gist17=False):
gist.__init__(self, Bins=np.array([50,50,50]), Origin=np.array([0,0,0]), Delta=np.array([0.5,0.5,0.5]), gist17=gist17)
if not os.path.exists(Path):
raise IOError("File %s not found." %Path)
self.path = Path
if Path.endswith(".gz"):
map_file_ref = gzip.open(Path,"r")
else:
map_file_ref = open(Path,"r")
map_file = map_file_ref.readlines()
map_file_ref.close()
##### Read in the gist-out file produced by the
##### the gist functionality of cpptraj
start_row = -1
for i, item in enumerate(map_file):
if len(item.rstrip().split()) > 1 and item.rstrip().split()[0] == 'voxel' \
and item.rstrip().split()[1] == 'xcoord':
start_row = i + 1
break
z_start = map_file[start_row].rstrip().split()[3]
y_start = map_file[start_row].rstrip().split()[2]
x_start = map_file[start_row].rstrip().split()[1]
found_bins_z = False
found_bins_y = False
found_bins_x = False
# Find out grid dimensions and bin spacing
# We assume that the cell is completely rectangular
#
# The coordinate data is ordered in opendx like fashion:
# The z coordinate is running fastest, then y coordinate,
# then x coordinate. E.g.:
# (x_0, y_0, z_0), (x_0, y_0, z_1), (x_0, y_0, z_2), ...
for i, line in enumerate(map_file[start_row+1:]):
if found_bins_z and not found_bins_y and line.rstrip().split()[2] == y_start:
self.bins[1] = (i+1) / self.bins[2]
break
if not found_bins_z and line.rstrip().split()[3] == z_start:
self.bins[2] = i + 1
found_bins_z = True
### Now we fill the grids...
self.bins[0] = (len(map_file) - start_row) / (self.bins[2] * self.bins[1])
# self.n is only here for historical reasons.
# ... and for compatibility with xplor maps!
self.n = np.copy(self.bins)
self.delta = np.array( [ float(map_file[ start_row + 1 + self.bins[0] * self.bins[1]].rstrip().split()[1] )\
- float(map_file[ start_row ].rstrip().split()[1] ),
float(map_file[ start_row + 1 + self.bins[0] ].rstrip().split()[2] )\
- float(map_file[ start_row ].rstrip().split()[2] ),
float(map_file[ start_row + 1 ].rstrip().split()[3] )\
- float(map_file[ start_row ].rstrip().split()[3] ) ] )
self.dim = self.bins * self.delta
self.origin = -self.delta * 0.5 + np.array( [ float(map_file[ start_row ].rstrip().split()[1] ),
float(map_file[ start_row ].rstrip().split()[2] ),
float(map_file[ start_row ].rstrip().split()[3] ) ] )
### We assume that all cell angles are 90 deg.
### Cell edges can be of different length
self.frac2real = np.array( [ [ self.dim[0] / self.n[0], 0.0, 0.0 ],
[ 0.0, self.dim[1] / self.n[1], 0.0 ],
[ 0.0, 0.0, self.dim[2] / self.n[2] ]
]
)
self.real2frac = np.linalg.inv(self.frac2real)
self.rotation_matrix = np.eye(3,3)
self.translation_vector = np.zeros(3)
self.center = self.get_real(self.bins/2)
###### now we read all the gist-specific data...
i = 0
if self.gist17:
skip_col=2
else:
skip_col=0
for x in range(0,self.bins[0]):
for y in range(0,self.bins[1]):
for z in range(0,self.bins[2]):
self._tmp_Pop [x][y][z] = float(map_file[start_row + i].rstrip().split()[4])
self._tmp_gO [x][y][z] = float(map_file[start_row + i].rstrip().split()[5])
self._tmp_gH [x][y][z] = float(map_file[start_row + i].rstrip().split()[6])
self._tmp_dTStrans_dens [x][y][z] = float(map_file[start_row + i].rstrip().split()[7])
self._tmp_dTStrans_norm [x][y][z] = float(map_file[start_row + i].rstrip().split()[8])
self._tmp_dTSorient_dens [x][y][z] = float(map_file[start_row + i].rstrip().split()[9])
self._tmp_dTSorient_norm [x][y][z] = float(map_file[start_row + i].rstrip().split()[10])
### In newer vesions of GIST, Six dimensional translational entropy is reported after
### dTSorient_norm. For now, we want to skip that.
if self.gist17:
self._tmp_dTSsix_dens [x][y][z] = float(map_file[start_row + i].rstrip().split()[11])
self._tmp_dTSsix_norm [x][y][z] = float(map_file[start_row + i].rstrip().split()[12])
self._tmp_Esw_dens [x][y][z] = float(map_file[start_row + i].rstrip().split()[11+skip_col])
self._tmp_Esw_norm [x][y][z] = float(map_file[start_row + i].rstrip().split()[12+skip_col])
self._tmp_Eww_dens [x][y][z] = float(map_file[start_row + i].rstrip().split()[13+skip_col])
self._tmp_Eww_norm_unref [x][y][z] = float(map_file[start_row + i].rstrip().split()[14+skip_col])
self._tmp_Dipole_x_dens [x][y][z] = float(map_file[start_row + i].rstrip().split()[15+skip_col])
self._tmp_Dipole_y_dens [x][y][z] = float(map_file[start_row + i].rstrip().split()[16+skip_col])
self._tmp_Dipole_z_dens [x][y][z] = float(map_file[start_row + i].rstrip().split()[17+skip_col])
self._tmp_Dipole_dens [x][y][z] = float(map_file[start_row + i].rstrip().split()[18+skip_col])
self._tmp_Neighbor_dens [x][y][z] = float(map_file[start_row + i].rstrip().split()[19+skip_col])
self._tmp_Neighbor_norm [x][y][z] = float(map_file[start_row + i].rstrip().split()[20+skip_col])
self._tmp_Order_norm [x][y][z] = float(map_file[start_row + i].rstrip().split()[21+skip_col])
i += 1
self._update()
class write_files (object):
def __init__(self, Delta=None, Frac2Real=None, Bins=None, Origin=None, \
Value=None, XYZ=None, X=None, Y=None, Z=None, Format='PDB', \
Filename=None, Nan_fill=-1.0):
"""
This class can write different file types.
currently only dx and pdb are supported.
"""
self._delta = Delta
self._frac2real = Frac2Real
self._bins = Bins
self._origin = Origin
self._value = Value
self._x = X
self._y = Y
self._z = Z
self._format = Format
self._filename = Filename
self._xyz = XYZ
self._nan_fill = Nan_fill
if type(self._filename) != str:
self._filename = 'output.'
self._filename += self._format
self._writers = {
'PDB' : self._write_PDB,
'DX' : self._write_DX,
'GIST' : self._write_GIST
}
data = self._writers[self._format]()
o = open(self._filename, "w")
o.write(data)
o.close()
def _merge_x_y_z(self):
return np.stack( ( self._x, self._y, self._z ), axis=1 )
def _write_PDB(self):
"""
Write a PDB file.
This is intended for debugging. It writes all atoms
as HETATM of element X with resname MAP.
"""
if are_you_numpy(self._xyz):
if self._xyz.shape[-1] != 3:
raise TypeError(
"XYZ array has wrong shape.")
else:
if not ( are_you_numpy(self._x) or are_you_numpy(self._y) or are_you_numpy(self._z) ):
raise TypeError(
"If XYZ is not given, x,y and z coordinates\
must be given in separate arrays.")
else:
self._xyz = self._merge_x_y_z()
if self._value == None:
self._value = np.zeros( len(self._xyz), dtype=float )
data = 'REMARK File written by write_files.py\n'
for xyz_i, xyz in enumerate(self._xyz):
#iterate over uppercase letters
chain_id = ascii_uppercase[( len(str(xyz_i+1)) / 5 )]
atom_counts = xyz_i - ( len(str(xyz_i+1)) / 6 ) * 100000
resi_counts = xyz_i - ( len(str(xyz_i+1)) / 5 ) * 10000
data += \
'%-6s%5d %4s%1s%3s %1s%4d%1s %8.3f%8.3f%8.3f%6.2f%6.2f \n' \
%('HETATM',atom_counts+1,'X','', 'MAP', chain_id, resi_counts+1, '', xyz[0], xyz[1], xyz[2], 0.00, float( self._value[xyz_i] ) )
data += 'END\n'
return data
def _write_DX(self):
"""
Writes DX files according to openDX standard.
"""
if not ( are_you_numpy(self._origin) or are_you_numpy(self._bins) ):
raise TypeError(
"Origin and bins must be given.")
#This means not (a XOR b) or not (a or b)
if are_you_numpy(self._delta) == are_you_numpy(self._frac2real) :
raise TypeError(
"Either delta or frac2real must be given.")
if are_you_numpy(self._delta):
self._frac2real = np.zeros((3,3), dtype=float)
np.fill_diagonal(self._frac2real, self._delta)
data = '''object 1 class gridpositions counts %d %d %d
origin %8.4f %8.4f %8.4f
delta %8.4f %8.4f %8.4f
delta %8.4f %8.4f %8.4f
delta %8.4f %8.4f %8.4f
object 2 class gridconnections counts %d %d %d
object 3 class array type float rank 0 items %d data follows
''' %(self._bins[0], self._bins[1], self._bins[2],\
self._origin[0], self._origin[1], self._origin[2],\
self._frac2real[0][0], self._frac2real[0][1], self._frac2real[0][2],\
self._frac2real[1][0], self._frac2real[1][1], self._frac2real[1][2],\
self._frac2real[2][0], self._frac2real[2][1], self._frac2real[2][2],\
self._bins[0], self._bins[1], self._bins[2],\
self._bins[2] * self._bins[1] * self._bins[0])
i = 0
for x_i in range(0, self._bins[0]):
for y_i in range(0, self._bins[1]):
for z_i in range(0, self._bins[2]):
### writing an integer instead of float
### saves us some disk space
if np.isnan(self._value[x_i][y_i][z_i]):
data += str(self._nan_fill) + " "
else:
if self._value[x_i][y_i][z_i] == 0.0:
data += "0 "
else:
data += str(self._value[x_i][y_i][z_i]) + ' '
i += 1
if i == 3:
data += '\n'
i = 0
return data
def _write_GIST(self):
"""
To be implemented...
"""
pass
class PDB(object):
"""
Class that reads a pdb file and provides pdb type data structure.
"""
def __init__(self, Path):
self.path = Path
self.crd = list()
self.B = list()
with open(self.path, "r") as PDB_file:
for i, line in enumerate(PDB_file):
if not (line[0:6].rstrip() == 'ATOM' or line[0:6].rstrip() == 'HETATM'):
continue
if i <= 9999:
#Coordinates
self.crd.append(list())
self.crd[-1].append(float(line.rstrip()[30:38]))
self.crd[-1].append(float(line.rstrip()[38:46]))
self.crd[-1].append(float(line.rstrip()[46:54]))
#B-Factors
self.B.append(line.rstrip()[54:59])
if 9999 < i <= 99999:
#Coordinates
self.crd.append(list())
self.crd[-1].append(float(line.rstrip()[31:39]))
self.crd[-1].append(float(line.rstrip()[39:47]))
self.crd[-1].append(float(line.rstrip()[47:55]))
#B-Factors
self.B.append(line.rstrip()[55:60])
if i > 99999:
#Coordinates
self.crd.append(list())
self.crd[-1].append(float(line.rstrip()[33:41]))
self.crd[-1].append(float(line.rstrip()[41:49]))
self.crd[-1].append(float(line.rstrip()[49:57]))
#B-Factors
self.B.append(line.rstrip()[57:62])
self.crd = np.array(self.crd)
self.B = np.array(self.B)
def guess_field(crds, delta=np.array([0.5,0.5,0.5])):
_c = np.mean(crds, axis=0)
_min = np.min((_c - crds), axis=0)
_max = np.max((_c - crds), axis=0)
_b = np.rint(np.abs(_max - _min)/delta + (5.0 / delta) )
del _min, _max
return field(Bins=_b, Delta=delta, Center=_c)
|
406034
|
import pandas as pd
import os
from typing import Union
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import matplotlib as mpl
def series_to_colors(s: pd.Series, color_palette_map: str = 'husl', cdict: dict = None):
"""
Convert a pandas series to a color_map.
-----------------------
Args:
* s: pd.Series
* color_palette_map: str for color map
* cdict: dict of custom value - color mappings
Ex. cdict = {'Low': 'Green', 'Intermediate':'Yellow', 'High': 'Red'}
Returns:
* pd.Series: series mapped to RGB values
* color_map: dictionary mapping unique values in series to color
"""
if cdict is None:
color_labels = s.unique()
rgb_values = sns.color_palette(color_palette_map, len(color_labels))
color_map = dict(zip(color_labels, rgb_values))
else:
color_map = cdict
for key in color_map:
if isinstance(key, type(None)) or (isinstance(key, float) and np.isnan(key)):
color_map[key] = (1.0,1.0,1.0,1.0)
return s.map(color_map), color_map
def color_list_to_matrix_and_cmap(colors: list, order_dict: dict = None):
"""
Stripped from Seaborn.
-----------------------
Turns a list of colors into a numpy matrix and matplotlib colormap
These arguments can now be plotted using heatmap(matrix, cmap)
and the provided colors will be plotted.
Args:
* colors : list of matplotlib colors
Colors to label the rows or columns of a dataframe.
* order_dict: explicit color ordering to provide
Ex. order_dict = {'Green': 0, 'Yellow': 1, 'Red': 2}
Returns:
* matrix : numpy.array
A numpy array of integer values, where each corresponds to a color
from the originally provided list of colors
* cmap : matplotlib.colors.ListedColormap
"""
all_colors = set(colors)
m = len(colors)
colors = [colors]
if order_dict is None:
color_to_value = dict((col, i) for i, col in enumerate(all_colors))
else:
color_to_value = order_dict
matrix = np.array([color_to_value[c] for color in colors for c in color])
matrix = matrix.reshape((1,m))
return matrix, mpl.colors.ListedColormap(color_to_value.keys())
|
406097
|
import json
import sys
import os
from typing import List
import semver
ROOT_DIR = os.path.dirname(os.path.join(os.path.dirname(os.path.abspath(__file__)), os.path.pardir))
APACHE_AIRFLOW_ARCHIVE = os.path.join(ROOT_DIR, "docs-archive")
def get_all_versions(directory: str) -> List[str]:
return sorted(
(f for f in os.listdir(directory) if os.path.isdir(os.path.join(directory, f))),
key=lambda d: semver.VersionInfo.parse(d),
)
def get_stable_version(directory: str):
with open(os.path.join(directory, "stable.txt")) as f:
return f.read().strip()
def dump_docs_package_metadata():
all_packages_infos = [
{
"package-name": package_name,
"stable-version": get_stable_version(os.path.join(APACHE_AIRFLOW_ARCHIVE, package_name)),
"all-versions": get_all_versions(os.path.join(APACHE_AIRFLOW_ARCHIVE, package_name)),
}
for package_name in os.listdir(APACHE_AIRFLOW_ARCHIVE)
if (
not package_name.startswith(".") and # Exclude .DS_Store/
os.path.isfile(os.path.join(os.path.join(APACHE_AIRFLOW_ARCHIVE, package_name, 'stable.txt')))
)
]
json.dump(all_packages_infos, sys.stdout, indent=2)
dump_docs_package_metadata()
|
406107
|
import os
import pytest
import yaml
from dbt.tests.util import run_dbt
from tests.functional.sources.fixtures import (
models__schema_yml,
models__view_model_sql,
models__ephemeral_model_sql,
models__descendant_model_sql,
models__multi_source_model_sql,
models__nonsource_descendant_sql,
seeds__source_csv,
seeds__other_table_csv,
seeds__expected_multi_source_csv,
seeds__other_source_table_csv,
)
class BaseSourcesTest:
@pytest.fixture(scope="class", autouse=True)
def setEnvVars(self):
os.environ["DBT_TEST_SCHEMA_NAME_VARIABLE"] = "test_run_schema"
yield
del os.environ["DBT_TEST_SCHEMA_NAME_VARIABLE"]
@pytest.fixture(scope="class")
def models(self):
return {
"schema.yml": models__schema_yml,
"view_model.sql": models__view_model_sql,
"ephemeral_model.sql": models__ephemeral_model_sql,
"descendant_model.sql": models__descendant_model_sql,
"multi_source_model.sql": models__multi_source_model_sql,
"nonsource_descendant.sql": models__nonsource_descendant_sql,
}
@pytest.fixture(scope="class")
def seeds(self):
return {
"source.csv": seeds__source_csv,
"other_table.csv": seeds__other_table_csv,
"expected_multi_source.csv": seeds__expected_multi_source_csv,
"other_source_table.csv": seeds__other_source_table_csv,
}
@pytest.fixture(scope="class")
def project_config_update(self):
return {
"config-version": 2,
"seed-paths": ["seeds"],
"quoting": {"database": True, "schema": True, "identifier": True},
"seeds": {
"quote_columns": True,
},
}
def run_dbt_with_vars(self, project, cmd, *args, **kwargs):
vars_dict = {
"test_run_schema": project.test_schema,
"test_loaded_at": project.adapter.quote("updated_at"),
}
cmd.extend(["--vars", yaml.safe_dump(vars_dict)])
return run_dbt(cmd, *args, **kwargs)
|
406148
|
import numpy as np
def net_2_deeper_net(bias, noise_std=0.01):
"""
This is a similar idea to net 2 deeper net from http://arxiv.org/pdf/1511.05641.pdf
Assumes that this is a linear layer that is being extended and also adds some noise
Args:
bias (numpy.array): The bias for the layer we are adding after
noise_std (Optional float): The amount of normal noise to add to the layer.
If None then no noise is added
Default is 0.01
Returns:
(numpy.matrix, numpy.array)
The first item is the weights for the new layer
Second item is the bias for the new layer
"""
new_weights = np.matrix(np.eye(bias.shape[0], dtype=bias.dtype))
new_bias = np.zeros(bias.shape, dtype=bias.dtype)
if noise_std:
new_weights = new_weights + np.random.normal(scale=noise_std, size=new_weights.shape)
new_bias = new_bias + np.random.normal(scale=noise_std, size=new_bias.shape)
return new_weights, new_bias
|
406170
|
import os
from typing import Union, Tuple, List, NamedTuple
import torch
from torch import nn, Tensor
from torch.nn import functional as F
from torch.optim import Optimizer
from torch.optim.lr_scheduler import _LRScheduler
from backbone.base import Base as BackboneBase
from bbox import BBox
from nms.nms import NMS
from roi.wrapper import Wrapper as ROIWrapper
from rpn.region_proposal_network import RegionProposalNetwork
class Model(nn.Module):
class ForwardInput(object):
class Train(NamedTuple):
image: Tensor
gt_classes: Tensor
gt_bboxes: Tensor
class Eval(NamedTuple):
image: Tensor
class ForwardOutput(object):
class Train(NamedTuple):
anchor_objectness_loss: Tensor
anchor_transformer_loss: Tensor
proposal_class_loss: Tensor
proposal_transformer_loss: Tensor
class Eval(NamedTuple):
detection_bboxes: Tensor
detection_classes: Tensor
detection_probs: Tensor
def __init__(self, backbone: BackboneBase, num_classes: int, pooling_mode: ROIWrapper.Mode,
anchor_ratios: List[Tuple[int, int]], anchor_scales: List[int], rpn_pre_nms_top_n: int, rpn_post_nms_top_n: int):
super().__init__()
conv_layers, lateral_layers, dealiasing_layers, num_features_out = backbone.features()
self.conv1, self.conv2, self.conv3, self.conv4, self.conv5 = conv_layers
self.lateral_c2, self.lateral_c3, self.lateral_c4, self.lateral_c5 = lateral_layers
self.dealiasing_p2, self.dealiasing_p3, self.dealiasing_p4 = dealiasing_layers
self._bn_modules = [it for it in self.conv1.modules() if isinstance(it, nn.BatchNorm2d)] + \
[it for it in self.conv2.modules() if isinstance(it, nn.BatchNorm2d)] + \
[it for it in self.conv3.modules() if isinstance(it, nn.BatchNorm2d)] + \
[it for it in self.conv4.modules() if isinstance(it, nn.BatchNorm2d)] + \
[it for it in self.conv5.modules() if isinstance(it, nn.BatchNorm2d)] + \
[it for it in self.lateral_c2.modules() if isinstance(it, nn.BatchNorm2d)] + \
[it for it in self.lateral_c3.modules() if isinstance(it, nn.BatchNorm2d)] + \
[it for it in self.lateral_c4.modules() if isinstance(it, nn.BatchNorm2d)] + \
[it for it in self.lateral_c5.modules() if isinstance(it, nn.BatchNorm2d)] + \
[it for it in self.dealiasing_p2.modules() if isinstance(it, nn.BatchNorm2d)] + \
[it for it in self.dealiasing_p3.modules() if isinstance(it, nn.BatchNorm2d)] + \
[it for it in self.dealiasing_p4.modules() if isinstance(it, nn.BatchNorm2d)]
self.num_classes = num_classes
self.rpn = RegionProposalNetwork(num_features_out, anchor_ratios, anchor_scales, rpn_pre_nms_top_n, rpn_post_nms_top_n)
self.detection = Model.Detection(pooling_mode, self.num_classes)
def forward(self, forward_input: Union[ForwardInput.Train, ForwardInput.Eval]) -> Union[ForwardOutput.Train, ForwardOutput.Eval]:
# freeze batch normalization modules for each forwarding process just in case model was switched to `train` at any time
for bn_module in self._bn_modules:
bn_module.eval()
for parameter in bn_module.parameters():
parameter.requires_grad = False
image = forward_input.image.unsqueeze(dim=0)
image_height, image_width = image.shape[2], image.shape[3]
# Bottom-up pathway
c1 = self.conv1(image)
c2 = self.conv2(c1)
c3 = self.conv3(c2)
c4 = self.conv4(c3)
c5 = self.conv5(c4)
# Top-down pathway and lateral connections
p5 = self.lateral_c5(c5)
p4 = self.lateral_c4(c4) + F.interpolate(input=p5, size=(c4.shape[2], c4.shape[3]), mode='nearest')
p3 = self.lateral_c3(c3) + F.interpolate(input=p4, size=(c3.shape[2], c3.shape[3]), mode='nearest')
p2 = self.lateral_c2(c2) + F.interpolate(input=p3, size=(c2.shape[2], c2.shape[3]), mode='nearest')
# Reduce the aliasing effect
p4 = self.dealiasing_p4(p4)
p3 = self.dealiasing_p3(p3)
p2 = self.dealiasing_p2(p2)
p6 = F.max_pool2d(input=p5, kernel_size=1, stride=2)
# NOTE: We define the anchors to have areas of {32^2, 64^2, 128^2, 256^2, 512^2} pixels on {P2, P3, P4, P5, P6} respectively
anchor_objectnesses = []
anchor_transformers = []
anchor_bboxes = []
proposal_bboxes = []
for p, anchor_size in zip([p2, p3, p4, p5, p6], [32, 64, 128, 256, 512]):
p_anchor_objectnesses, p_anchor_transformers = self.rpn.forward(features=p, image_width=image_width, image_height=image_height)
p_anchor_bboxes = self.rpn.generate_anchors(image_width, image_height,
num_x_anchors=p.shape[3], num_y_anchors=p.shape[2],
anchor_size=anchor_size).cuda()
p_proposal_bboxes = self.rpn.generate_proposals(p_anchor_bboxes, p_anchor_objectnesses, p_anchor_transformers,
image_width, image_height)
anchor_objectnesses.append(p_anchor_objectnesses)
anchor_transformers.append(p_anchor_transformers)
anchor_bboxes.append(p_anchor_bboxes)
proposal_bboxes.append(p_proposal_bboxes)
anchor_objectnesses = torch.cat(anchor_objectnesses, dim=0)
anchor_transformers = torch.cat(anchor_transformers, dim=0)
anchor_bboxes = torch.cat(anchor_bboxes, dim=0)
proposal_bboxes = torch.cat(proposal_bboxes, dim=0)
if self.training:
forward_input: Model.ForwardInput.Train
anchor_sample_fg_indices, anchor_sample_selected_indices, gt_anchor_objectnesses, gt_anchor_transformers = self.rpn.sample(anchor_bboxes, forward_input.gt_bboxes, image_width, image_height)
anchor_objectnesses = anchor_objectnesses[anchor_sample_selected_indices]
anchor_transformers = anchor_transformers[anchor_sample_fg_indices]
anchor_objectness_loss, anchor_transformer_loss = self.rpn.loss(anchor_objectnesses, anchor_transformers, gt_anchor_objectnesses, gt_anchor_transformers)
proposal_sample_fg_indices, proposal_sample_selected_indices, gt_proposal_classes, gt_proposal_transformers = self.detection.sample(proposal_bboxes, forward_input.gt_classes, forward_input.gt_bboxes)
proposal_bboxes = proposal_bboxes[proposal_sample_selected_indices]
proposal_classes, proposal_transformers = self.detection.forward(p2, p3, p4, p5, proposal_bboxes, image_width, image_height)
proposal_class_loss, proposal_transformer_loss = self.detection.loss(proposal_classes, proposal_transformers, gt_proposal_classes, gt_proposal_transformers)
forward_output = Model.ForwardOutput.Train(anchor_objectness_loss, anchor_transformer_loss, proposal_class_loss, proposal_transformer_loss)
else:
proposal_classes, proposal_transformers = self.detection.forward(p2, p3, p4, p5, proposal_bboxes, image_width, image_height)
detection_bboxes, detection_classes, detection_probs = self.detection.generate_detections(proposal_bboxes, proposal_classes, proposal_transformers, image_width, image_height)
forward_output = Model.ForwardOutput.Eval(detection_bboxes, detection_classes, detection_probs)
return forward_output
def save(self, path_to_checkpoints_dir: str, step: int, optimizer: Optimizer, scheduler: _LRScheduler) -> str:
path_to_checkpoint = os.path.join(path_to_checkpoints_dir, f'model-{step}.pth')
checkpoint = {
'state_dict': self.state_dict(),
'step': step,
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict()
}
torch.save(checkpoint, path_to_checkpoint)
return path_to_checkpoint
def load(self, path_to_checkpoint: str, optimizer: Optimizer = None, scheduler: _LRScheduler = None) -> 'Model':
checkpoint = torch.load(path_to_checkpoint)
self.load_state_dict(checkpoint['state_dict'])
step = checkpoint['step']
if optimizer is not None:
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
if scheduler is not None:
scheduler.load_state_dict(checkpoint['scheduler_state_dict'])
return step
class Detection(nn.Module):
def __init__(self, pooling_mode: ROIWrapper.Mode, num_classes: int):
super().__init__()
self._pooling_mode = pooling_mode
self._hidden = nn.Sequential(
nn.Linear(256 * 7 * 7, 1024),
nn.ReLU(),
nn.Linear(1024, 1024),
nn.ReLU()
)
self.num_classes = num_classes
self._class = nn.Linear(1024, num_classes)
self._transformer = nn.Linear(1024, num_classes * 4)
self._transformer_normalize_mean = torch.tensor([0., 0., 0., 0.], dtype=torch.float).cuda()
self._transformer_normalize_std = torch.tensor([.1, .1, .2, .2], dtype=torch.float).cuda()
def forward(self, p2: Tensor, p3: Tensor, p4: Tensor, p5: Tensor, proposal_bboxes: Tensor, image_width: int, image_height: int) -> Tuple[Tensor, Tensor]:
w = proposal_bboxes[:, 2] - proposal_bboxes[:, 0]
h = proposal_bboxes[:, 3] - proposal_bboxes[:, 1]
k0 = 4
k = torch.floor(k0 + torch.log2(torch.sqrt(w * h) / 224)).long()
k = torch.clamp(k, min=2, max=5)
k_to_p_dict = {2: p2, 3: p3, 4: p4, 5: p5}
unique_k = torch.unique(k)
# NOTE: `picked_indices` is for recording the order of selection from `proposal_bboxes`
# so that `pools` can be then restored to make it have a consistent correspondence
# with `proposal_bboxes`. For example:
#
# proposal_bboxes => B0 B1 B2
# picked_indices => 1 2 0
# pools => BP1 BP2 BP0
# sorted_indices => 2 0 1
# pools => BP0 BP1 BP2
pools = []
picked_indices = []
for uk in unique_k:
uk = uk.item()
p = k_to_p_dict[uk]
uk_indices = (k == uk).nonzero().view(-1)
uk_proposal_bboxes = proposal_bboxes[uk_indices]
pool = ROIWrapper.apply(p, uk_proposal_bboxes, mode=self._pooling_mode, image_width=image_width, image_height=image_height)
pools.append(pool)
picked_indices.append(uk_indices)
pools = torch.cat(pools, dim=0)
picked_indices = torch.cat(picked_indices, dim=0)
_, sorted_indices = torch.sort(picked_indices)
pools = pools[sorted_indices]
pools = pools.view(pools.shape[0], -1)
hidden = self._hidden(pools)
classes = self._class(hidden)
transformers = self._transformer(hidden)
return classes, transformers
def sample(self, proposal_bboxes: Tensor, gt_classes: Tensor, gt_bboxes: Tensor) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
sample_fg_indices = torch.arange(end=len(proposal_bboxes), dtype=torch.long)
sample_selected_indices = torch.arange(end=len(proposal_bboxes), dtype=torch.long)
# find labels for each `proposal_bboxes`
labels = torch.ones(len(proposal_bboxes), dtype=torch.long).cuda() * -1
ious = BBox.iou(proposal_bboxes, gt_bboxes)
proposal_max_ious, proposal_assignments = ious.max(dim=1)
labels[proposal_max_ious < 0.5] = 0
labels[proposal_max_ious >= 0.5] = gt_classes[proposal_assignments[proposal_max_ious >= 0.5]]
# select 128 samples
fg_indices = (labels > 0).nonzero().view(-1)
bg_indices = (labels == 0).nonzero().view(-1)
fg_indices = fg_indices[torch.randperm(len(fg_indices))[:min(len(fg_indices), 32)]]
bg_indices = bg_indices[torch.randperm(len(bg_indices))[:128 - len(fg_indices)]]
selected_indices = torch.cat([fg_indices, bg_indices])
selected_indices = selected_indices[torch.randperm(len(selected_indices))]
proposal_bboxes = proposal_bboxes[selected_indices]
gt_proposal_transformers = BBox.calc_transformer(proposal_bboxes, gt_bboxes[proposal_assignments[selected_indices]])
gt_proposal_classes = labels[selected_indices]
gt_proposal_transformers = (gt_proposal_transformers - self._transformer_normalize_mean) / self._transformer_normalize_std
gt_proposal_transformers = gt_proposal_transformers.cuda()
gt_proposal_classes = gt_proposal_classes.cuda()
sample_fg_indices = sample_fg_indices[fg_indices]
sample_selected_indices = sample_selected_indices[selected_indices]
return sample_fg_indices, sample_selected_indices, gt_proposal_classes, gt_proposal_transformers
def loss(self, proposal_classes: Tensor, proposal_transformers: Tensor, gt_proposal_classes: Tensor, gt_proposal_transformers: Tensor) -> Tuple[Tensor, Tensor]:
cross_entropy = F.cross_entropy(input=proposal_classes, target=gt_proposal_classes)
proposal_transformers = proposal_transformers.view(-1, self.num_classes, 4)
proposal_transformers = proposal_transformers[torch.arange(end=len(proposal_transformers), dtype=torch.long).cuda(), gt_proposal_classes]
fg_indices = gt_proposal_classes.nonzero().view(-1)
# NOTE: The default of `reduction` is `elementwise_mean`, which is divided by N x 4 (number of all elements), here we replaced by N for better performance
smooth_l1_loss = F.smooth_l1_loss(input=proposal_transformers[fg_indices], target=gt_proposal_transformers[fg_indices], reduction='sum')
smooth_l1_loss /= len(gt_proposal_transformers)
return cross_entropy, smooth_l1_loss
def generate_detections(self, proposal_bboxes: Tensor, proposal_classes: Tensor, proposal_transformers: Tensor, image_width: int, image_height: int) -> Tuple[Tensor, Tensor, Tensor]:
proposal_transformers = proposal_transformers.view(-1, self.num_classes, 4)
mean = self._transformer_normalize_mean.repeat(1, self.num_classes, 1)
std = self._transformer_normalize_std.repeat(1, self.num_classes, 1)
proposal_transformers = proposal_transformers * std - mean
proposal_bboxes = proposal_bboxes.view(-1, 1, 4).repeat(1, self.num_classes, 1)
detection_bboxes = BBox.apply_transformer(proposal_bboxes.view(-1, 4), proposal_transformers.view(-1, 4))
detection_bboxes = detection_bboxes.view(-1, self.num_classes, 4)
detection_bboxes[:, :, [0, 2]] = detection_bboxes[:, :, [0, 2]].clamp(min=0, max=image_width)
detection_bboxes[:, :, [1, 3]] = detection_bboxes[:, :, [1, 3]].clamp(min=0, max=image_height)
proposal_probs = F.softmax(proposal_classes, dim=1)
detection_bboxes = detection_bboxes.cpu()
proposal_probs = proposal_probs.cpu()
generated_bboxes = []
generated_classes = []
generated_probs = []
for c in range(1, self.num_classes):
detection_class_bboxes = detection_bboxes[:, c, :]
proposal_class_probs = proposal_probs[:, c]
_, sorted_indices = proposal_class_probs.sort(descending=True)
detection_class_bboxes = detection_class_bboxes[sorted_indices]
proposal_class_probs = proposal_class_probs[sorted_indices]
kept_indices = NMS.suppress(detection_class_bboxes.cuda(), threshold=0.3)
detection_class_bboxes = detection_class_bboxes[kept_indices]
proposal_class_probs = proposal_class_probs[kept_indices]
generated_bboxes.append(detection_class_bboxes)
generated_classes.append(torch.ones(len(kept_indices), dtype=torch.int) * c)
generated_probs.append(proposal_class_probs)
generated_bboxes = torch.cat(generated_bboxes, dim=0)
generated_classes = torch.cat(generated_classes, dim=0)
generated_probs = torch.cat(generated_probs, dim=0)
return generated_bboxes, generated_classes, generated_probs
|
406198
|
import pytest
from configmanager import Config, Item, RequiredValueMissing, Types, NotFound
def test_simple_config():
# Initialisation of a config manager
config = Config({
'greeting': 'Hello, world!',
'threads': 1,
'throttling_enabled': False,
})
# Attribute-based and key-based access to config items
assert config.greeting is config['greeting']
# Every config item is an instance of Item
assert isinstance(config.greeting, Item)
# Value and other attribute access on Item
assert config.greeting.value == 'Hello, world!'
assert config.threads.value == 1
assert config.threads.type == Types.int
assert config.throttling_enabled.value is False
assert config.throttling_enabled.type == Types.bool
# If you are working with items which don't have default values, you can use .get() method
# which accepts fallback value:
assert config.greeting.get() == 'Hello, world!'
assert config.greeting.get('Hey!') == 'Hello, world!'
# Can check if a config item is managed by the manager
assert 'greeting' in config
assert 'warning' not in config
# Can change values
config.greeting.value = 'Good evening!'
assert config.greeting.value == 'Good evening!'
# Can inspect default value
assert config.greeting.default == 'Hello, world!'
# Can export all values to a dictionary
assert config.dump_values() == {
'greeting': 'Good evening!',
'threads': 1,
'throttling_enabled': False,
}
# Can iterate over all items
items = dict(config.iter_items(recursive=True))
assert len(items) == 3
assert items[('greeting',)] is config.greeting
assert items[('threads',)] is config.threads
assert items[('throttling_enabled',)] is config.throttling_enabled
# Requesting unknown config raises NotFound
with pytest.raises(NotFound):
assert not config.other_things
# Cannot change item value incorrectly
with pytest.raises(TypeError):
config.greeting = 'Bye!'
with pytest.raises(TypeError):
config['greeting'] = 'Bye!'
def test_nested_config():
"""
This demonstrates how an application config can be created from multiple
sections (which in turn can be created from others).
"""
# Declaration of a config section may be a plain dictionary
db_config = {
'host': 'localhost',
'user': 'admin',
'password': '<PASSWORD>',
}
# Or, it may be an already functional instance of Config
server_config = Config({
'port': 8080,
})
#
# All these sections can be combined into one config:
#
config = Config({
'db': db_config,
'server': server_config,
'greeting': 'Hello', # and you can have plain config items next to sections
})
# You can load values
assert config.greeting.value == 'Hello'
# Your original schemas are safe -- db_config dictionary won't be changed
config.db.user.value = 'root'
assert config.db.user.value == 'root'
assert db_config['user'] == 'admin'
# You can also change values by reading them from a dictionary.
# Unknown names will be ignored unless you pass as_defaults=True
# but in that case you will overwrite any previously existing items.
config.load_values({'greeting': 'Good morning!', 'comments': {'enabled': False}})
assert config.greeting.value == 'Good morning!'
assert 'comments' not in config
# You can check if config value is the default value
assert not config.db.user.is_default
assert config.server.port.is_default
# Or if it has any value at all
assert config.server.port.has_value
# Iterate over all items (recursively)
all = dict(config.iter_items(recursive=True))
assert all[('db', 'host')] is config.db.host
assert all[('server', 'port')] is config.server.port
# Export all values
config_dict = config.dump_values()
assert config_dict['db'] == {'host': 'localhost', 'user': 'root', 'password': '<PASSWORD>'}
# Each section is a Config instance too, so you can export those separately too:
assert config.server.dump_values() == config_dict['server']
# You can reset individual items to their default values
assert config.db.user.value == 'root'
config.db.user.reset()
assert config.db.user.value == 'admin'
# Or sections
config.db.user.value = 'root_again'
assert config.db.user.value == 'root_again'
config.db.reset()
assert config.db.user.value == 'admin'
# Or you can reset all configuration and you can make sure all values match defaults
assert not config.is_default
config.reset()
assert config.is_default
def test_exceptions():
# Items marked as required raise ConfigValueMissing when their value is accessed
password = Item('password', required=True)
with pytest.raises(RequiredValueMissing):
assert not password.value
def test_configparser_integration(tmpdir):
defaults_ini = tmpdir.join('defaults.ini')
defaults_ini.write('')
defaults_ini_path = defaults_ini.strpath
custom_ini = tmpdir.join('custom.ini')
custom_ini.write('')
custom_ini_path = custom_ini.strpath
# Config sections expose ConfigParser adapter as configparser property:
config = Config()
# assuming that defaults.ini exists, this would initialise Config
# with all values mentioned in defaults.ini set as defaults.
# Just like with ConfigParser, this won't fail if the file does not exist.
config.configparser.load(defaults_ini_path, as_defaults=True)
# if you have already declared defaults, you can load custom
# configuration without specifying as_defaults=True:
config.configparser.load(custom_ini_path)
# when you are done setting config values, you can write them to a file.
config.configparser.dump(custom_ini_path)
# Note that default values won't be written unless you explicitly request it
# by passing with_defaults=True
config.configparser.dump(custom_ini_path, with_defaults=True)
|
406237
|
from django.http.response import HttpResponse
from django.shortcuts import render, get_object_or_404
from .models import *
# Create your views here.
def index(request):
warehouses = Warehouse.objects.all()
orders = Order.objects.filter(tracking="Pending")
context = {
"warehouse": warehouses,
"orders": orders
}
return render(request, 'app/index.html', context=context)
def process_order(request, pk):
if request.GET.get('get'):
house = request.GET.get('house')
warehouse = Warehouse.objects.get(pk=house)
# item = Item.objects.get(request)
# print(pk)
# print(item.pick_up_from())
# print(item.warehouse.all())
# if house not in item.warehouse.values('pk'):
# item.warehouse.add(warehouse)
# item.save()
order = get_object_or_404(Order, pk=pk)
order_dict = {}
for item in order.items.all():
order_dict[item.product.sku] = Warehouse.objects.filter(products__sku=item.product.sku)
context = {
"order": order,
"order_dict": order_dict
}
return render(request, 'app/order.html', context)
|
406265
|
deleteObject = True
editObject = True
getObject = {'id': 1234,
'fingerprint': 'aa:bb:cc:dd',
'label': 'label',
'notes': 'notes',
'key': 'ssh-rsa AAAAB3N...pa67 <EMAIL>'}
createObject = getObject
getAllObjects = [getObject]
|
406306
|
import sys
from optparse import OptionParser
from qrencode import encode_scaled, QR_ECLEVEL_H
from prettyqr.blobgrid import BlobGrid
from prettyqr.logos import clear_logo_space, get_svg_logo
from prettyqr.svg import svg_start, svg_end
def main():
parser = OptionParser(
usage="usage: %prog [options] text")
parser.add_option("-o", "--output", dest="output_filename",
help="write output to FILE", metavar="FLIE", default="output.svg")
parser.add_option("-l", "--logo", dest="logo_svg",
help="load logo (partial svg file) from FILE", metavar="FILE")
parser.add_option("-L", "--logo-raster", dest="logo_png",
help="load rasterized logo (png) from FILE", metavar="FILE")
parser.add_option("-c", "--color", dest="colour", default="#a54024",
help="use COLOR as secondary color")
parser.add_option("-m", "--min-size", type="int", dest="min_size",
default="40", help="pad output to minimum size for final QR image")
(options, args) = parser.parse_args()
if not args:
parser.print_help()
sys.exit()
qr = encode_scaled(args[0], options.min_size, level=QR_ECLEVEL_H)
image = qr[-1]
array = image.load()
# assume squares
size = image.size[0]
class BlogGridQR(BlobGrid):
def get_value(self, x, y):
if not (0 <= x < size) or not (0 <= y < size):
return 0
return 1 - array[x, y] / 255
clear_logo_space(array, size, options.logo_png)
blob_grid = BlogGridQR(size)
output = svg_start(size, options.colour)
output += blob_grid.draw_blobs()
output += get_svg_logo(options.logo_svg)
output += svg_end()
output_file = open(options.output_filename, 'w')
output_file.write(output)
output_file.close()
if __name__ == '__main__':
main()
|
406327
|
import medic
from maya import OpenMaya
class NonUniqueName(medic.PyTester):
def __init__(self):
super(NonUniqueName, self).__init__()
def Name(self):
return "NonUniqueName"
def Description(self):
return "Non unique name(s) exists"
def Match(self, node):
return node.object().hasFn(OpenMaya.MFn.kDagNode)
def IsFixable(self):
return True
def GetParameters(self):
con = medic.ParamContainer.Create()
p = medic.Parameter.Create("pattern", "Rename Pattern", medic.Types.String, "")
con.append(p)
return con
def test(self, node):
if node.dg().hasUniqueName():
return None
return medic.PyReport(node)
def __setNewName(self, node, base):
i = 1
sel = OpenMaya.MSelectionList()
while (True):
try:
sel.add("%s%d" % (base, i))
except:
break
i += 1
node.dg().setName("%s%d" % (base, i))
def fix(self, report, params):
node = report.node()
if node.dg().isFromReferencedFile():
return False
rename_target = node
if not node.dg().hasUniqueName():
if node.object().hasFn(OpenMaya.MFn.kShape) and node.isDag():
parents = node.parents()
if parents:
parent = parents[0]
if not parent.dg().hasUniqueName():
rename_target = parent
pattern = params.get("pattern")
base = ""
if pattern:
base = pattern
else:
base = rename_target.name().split("|")[-1] + "_"
self.__setNewName(rename_target, base)
return True
def Create():
return NonUniqueName()
|
406365
|
import os
import subprocess
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--profiling', type=str, default='NO', help='Run with profiler? - (YES/NO)')
parser.add_argument('--case_start', type=str, default='0', help='Testing range starting case # - (0-83)')
parser.add_argument('--case_end', type=str, default='83', help='Testing range ending case # - (0-83)')
args = parser.parse_args()
profilingOption = args.profiling
caseStart = args.case_start
caseEnd = args.case_end
if caseEnd < caseStart:
print("Ending case# must be greater than starting case#. Aborting!")
exit(0)
if caseStart < "0" or caseStart > "83":
print("Starting case# must be in the 0-83 range. Aborting!")
exit(0)
if caseEnd < "0" or caseEnd > "83":
print("Ending case# must be in the 0-83 range. Aborting!")
exit(0)
if profilingOption == "NO":
subprocess.call(["./rawLogsGenScript.sh", "0", caseStart, caseEnd])
log_file_list = [
"../OUTPUT_PERFORMANCE_LOGS_HIP_NEW/BatchPD_hip_pkd3_hip_raw_performance_log.txt",
"../OUTPUT_PERFORMANCE_LOGS_HIP_NEW/BatchPD_hip_pln3_hip_raw_performance_log.txt",
"../OUTPUT_PERFORMANCE_LOGS_HIP_NEW/BatchPD_hip_pln1_hip_raw_performance_log.txt",
"../OUTPUT_PERFORMANCE_LOGS_HIP_NEW/Tensor_hip_pkd3_hip_raw_performance_log.txt",
"../OUTPUT_PERFORMANCE_LOGS_HIP_NEW/Tensor_hip_pln3_hip_raw_performance_log.txt",
"../OUTPUT_PERFORMANCE_LOGS_HIP_NEW/Tensor_hip_pln1_hip_raw_performance_log.txt"
]
functionality_group_list = [
"image_augmentations",
"statistical_functions",
"geometry_transforms",
"advanced_augmentations",
"fused_functions",
"morphological_transforms",
"color_model_conversions",
"filter_operations",
"arithmetic_operations",
"logical_operations",
"computer_vision"
]
for log_file in log_file_list:
# Open log file
try:
f = open(log_file,"r")
print("\n\n\nOpened log file -> ", log_file)
except IOError:
print("Skipping file -> ", log_file)
continue
stats = []
maxVals = []
minVals = []
avgVals = []
functions = []
frames = []
prevLine = ""
funcCount = 0
# Loop over each line
for line in f:
for functionality_group in functionality_group_list:
if functionality_group in line:
functions.extend([" ", functionality_group, " "])
frames.extend([" ", " ", " "])
maxVals.extend([" ", " ", " "])
minVals.extend([" ", " ", " "])
avgVals.extend([" ", " ", " "])
if "max,min,avg" in line:
split_word_start = "Running "
split_word_end = " 100"
prevLine = prevLine.partition(split_word_start)[2].partition(split_word_end)[0]
if prevLine not in functions:
functions.append(prevLine)
frames.append("100")
split_word_start = "max,min,avg = "
split_word_end = "\n"
stats = line.partition(split_word_start)[2].partition(split_word_end)[0].split(",")
maxVals.append(stats[0])
minVals.append(stats[1])
avgVals.append(stats[2])
funcCount += 1
if line != "\n":
prevLine = line
# Print log lengths
print("Functionalities - ", funcCount)
# Print summary of log
print("\n\nFunctionality\t\t\t\t\t\tFrames Count\tmax(s)\t\tmin(s)\t\tavg(s)\n")
if len(functions) != 0:
maxCharLength = len(max(functions, key=len))
functions = [x + (' ' * (maxCharLength - len(x))) for x in functions]
for i, func in enumerate(functions):
print(func, "\t", frames[i], "\t\t", maxVals[i], "\t", minVals[i], "\t", avgVals[i])
else:
print("No variants under this category")
# Close log file
f.close()
elif profilingOption == "YES":
NEW_FUNC_GROUP_LIST = [0, 15, 20, 29, 36, 40, 42, 49, 56, 65, 69]
# Functionality group finder
def func_group_finder(case_number):
if case_number == 0:
return "image_augmentations"
elif case_number == 15:
return "statistical_functions"
elif case_number == 20:
return "geometry_transforms"
elif case_number == 29:
return "advanced_augmentations"
elif case_number == 36:
return "fused_functions"
elif case_number == 40:
return "morphological_transforms"
elif case_number == 42:
return "color_model_conversions"
elif case_number == 49:
return "filter_operations"
elif case_number == 56:
return "arithmetic_operations"
elif case_number == 65:
return "logical_operations"
elif case_number == 69:
return "computer_vision"
subprocess.call(["./rawLogsGenScript.sh", "1", caseStart, caseEnd])
RESULTS_DIR = "../OUTPUT_PERFORMANCE_LOGS_HIP_NEW"
print("RESULTS_DIR = " + RESULTS_DIR)
CONSOLIDATED_FILE_BATCHPD_PKD3 = RESULTS_DIR + "/consolidated_results_BatchPD_PKD3.stats.csv"
CONSOLIDATED_FILE_BATCHPD_PLN1 = RESULTS_DIR + "/consolidated_results_BatchPD_PLN1.stats.csv"
CONSOLIDATED_FILE_BATCHPD_PLN3 = RESULTS_DIR + "/consolidated_results_BatchPD_PLN3.stats.csv"
CONSOLIDATED_FILE_TENSOR_PKD3 = RESULTS_DIR + "/consolidated_results_Tensor_PKD3.stats.csv"
CONSOLIDATED_FILE_TENSOR_PLN1 = RESULTS_DIR + "/consolidated_results_Tensor_PLN1.stats.csv"
CONSOLIDATED_FILE_TENSOR_PLN3 = RESULTS_DIR + "/consolidated_results_Tensor_PLN3.stats.csv"
TYPE_LIST = ["BatchPD_PKD3", "BatchPD_PLN1", "BatchPD_PLN3", "Tensor_PKD3", "Tensor_PLN1", "Tensor_PLN3"]
BATCHPD_TYPE_LIST = ["BatchPD_PKD3", "BatchPD_PLN1", "BatchPD_PLN3"]
TENSOR_TYPE_LIST = ["Tensor_PKD3", "Tensor_PLN1", "Tensor_PLN3"]
CASE_NUM_LIST = range(int(caseStart), int(caseEnd) + 1, 1)
BIT_DEPTH_LIST = range(0, 7, 1)
OFT_LIST = range(0, 2, 1)
d_counter = {"BatchPD_PKD3":0, "BatchPD_PLN1":0, "BatchPD_PLN3":0, "Tensor_PKD3":0, "Tensor_PLN1":0, "Tensor_PLN3":0}
for TYPE in TYPE_LIST:
# Open csv file
new_file = open(RESULTS_DIR + "/consolidated_results_" + TYPE + ".stats.csv",'w')
new_file.write('"HIP Kernel Name","Calls","TotalDurationNs","AverageNs","Percentage"\n')
prev=""
# Loop through cases
for CASE_NUM in CASE_NUM_LIST:
# Add functionality group header
if CASE_NUM in NEW_FUNC_GROUP_LIST:
FUNC_GROUP = func_group_finder(CASE_NUM)
new_file.write("0,0,0,0,0\n")
new_file.write(FUNC_GROUP + ",0,0,0,0\n")
new_file.write("0,0,0,0,0\n")
# Set results directory
CASE_RESULTS_DIR = RESULTS_DIR + "/" + TYPE + "/case_" + str(CASE_NUM)
print("CASE_RESULTS_DIR = " + CASE_RESULTS_DIR)
# Loop through bit depths
for BIT_DEPTH in BIT_DEPTH_LIST:
# Loop through output format toggle cases
for OFT in OFT_LIST:
if (CASE_NUM == 40 or CASE_NUM == 41 or CASE_NUM == 49) and TYPE.startswith("Tensor"):
KSIZE_LIST = [3, 5, 7, 9]
# Loop through extra param kSize for box_filter
for KSIZE in KSIZE_LIST:
# Write into csv file
CASE_FILE_PATH = CASE_RESULTS_DIR + "/output_case" + str(CASE_NUM) + "_bitDepth" + str(BIT_DEPTH) + "_oft" + str(OFT) + "_kSize" + str(KSIZE) + ".stats.csv"
print("CASE_FILE_PATH = " + CASE_FILE_PATH)
try:
case_file = open(CASE_FILE_PATH,'r')
for line in case_file:
print(line)
if not(line.startswith('"Name"')):
if TYPE in TENSOR_TYPE_LIST:
new_file.write(line)
d_counter[TYPE] = d_counter[TYPE] + 1
elif TYPE in BATCHPD_TYPE_LIST:
if prev != line.split(",")[0]:
new_file.write(line)
prev = line.split(",")[0]
d_counter[TYPE] = d_counter[TYPE] + 1
case_file.close()
except IOError:
print("Unable to open case results")
continue
else:
# Write into csv file
CASE_FILE_PATH = CASE_RESULTS_DIR + "/output_case" + str(CASE_NUM) + "_bitDepth" + str(BIT_DEPTH) + "_oft" + str(OFT) + ".stats.csv"
print("CASE_FILE_PATH = " + CASE_FILE_PATH)
try:
case_file = open(CASE_FILE_PATH,'r')
for line in case_file:
print(line)
if not(line.startswith('"Name"')):
if TYPE in TENSOR_TYPE_LIST:
new_file.write(line)
d_counter[TYPE] = d_counter[TYPE] + 1
elif TYPE in BATCHPD_TYPE_LIST:
if prev != line.split(",")[0]:
new_file.write(line)
prev = line.split(",")[0]
d_counter[TYPE] = d_counter[TYPE] + 1
case_file.close()
except IOError:
print("Unable to open case results")
continue
new_file.close()
os.system('chown $USER:$USER ' + RESULTS_DIR + "/consolidated_results_" + TYPE + ".stats.csv")
try:
import pandas as pd
pd.options.display.max_rows = None
# Generate performance report
for TYPE in TYPE_LIST:
print("\n\n\nKernels tested - ", d_counter[TYPE], "\n\n")
df = pd.read_csv(RESULTS_DIR + "/consolidated_results_" + TYPE + ".stats.csv")
df["AverageMs"] = df["AverageNs"] / 1000000
dfPrint = df.drop(['Percentage'], axis=1)
dfPrint["HIP Kernel Name"] = dfPrint.iloc[:,0].str.lstrip("Hip_")
dfPrint_noIndices = dfPrint.astype(str)
dfPrint_noIndices.replace(['0', '0.0'], '', inplace=True)
dfPrint_noIndices = dfPrint_noIndices.to_string(index=False)
print(dfPrint_noIndices)
except ImportError:
print("\nPandas not available! Results of GPU profiling experiment are available in the following files:\n" + \
CONSOLIDATED_FILE_BATCHPD_PKD3 + "\n" + \
CONSOLIDATED_FILE_BATCHPD_PLN1 + "\n" + \
CONSOLIDATED_FILE_BATCHPD_PLN3 + "\n" + \
CONSOLIDATED_FILE_TENSOR_PKD3 + "\n" + \
CONSOLIDATED_FILE_TENSOR_PLN1 + "\n" + \
CONSOLIDATED_FILE_TENSOR_PLN3 + "\n")
except IOError:
print("Unable to open results in " + RESULTS_DIR + "/consolidated_results_" + TYPE + ".stats.csv")
|
406427
|
import os
import datetime
import numpy as np
import tensorflow as tf
from sacred import Experiment
from sacred.observers import FileStorageObserver, RunObserver
from deep_rlsp.model import LatentSpaceModel, InverseDynamicsMDN
from deep_rlsp.run import get_problem_parameters
# changes the run _id and thereby the path that the FileStorageObserver
# writes the results
# cf. https://github.com/IDSIA/sacred/issues/174
class SetID(RunObserver):
priority = 50 # very high priority to set id
def started_event(
self, ex_info, command, host_info, start_time, config, meta_info, _id
):
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
custom_id = "{}_learn_dynamics_{}_{}".format(
timestamp, config["env_name"], config["problem_spec"]
)
return custom_id # started_event returns the _run._id
ex = Experiment("learn_dynamics_model")
ex.observers = [SetID(), FileStorageObserver.create("results")]
def _get_log_folders(checkpoint_base, tensorboard_base, label):
checkpoint_folder = os.path.join(checkpoint_base, label)
tensorboard_folder = os.path.join(tensorboard_base, label)
os.makedirs(checkpoint_folder, exist_ok=True)
os.makedirs(tensorboard_folder, exist_ok=True)
return checkpoint_folder, tensorboard_folder
def train_latent_space_model(
env,
hidden_layer_size,
rnn_state_size,
n_rollouts,
n_epochs,
batch_size,
learning_rate,
tensorboard_folder,
checkpoint_folder,
):
model = LatentSpaceModel(
env,
tensorboard_log=tensorboard_folder,
checkpoint_folder=checkpoint_folder,
learning_rate=learning_rate,
hidden_layer_size=hidden_layer_size,
rnn_state_size=rnn_state_size,
)
loss = model.learn(
n_rollouts=n_rollouts,
n_epochs=n_epochs,
batch_size=batch_size,
print_evaluation=True,
)
return model, loss
def train_inverse_dynamics_model(
env,
latent_model,
hidden_layer_size,
n_hidden_layers,
n_rollouts,
n_epochs,
batch_size,
learning_rate,
tensorboard_folder,
checkpoint_folder,
):
model = InverseDynamicsMDN(
env,
hidden_layer_size=hidden_layer_size,
n_hidden_layers=n_hidden_layers,
learning_rate=learning_rate,
tensorboard_log=tensorboard_folder,
checkpoint_folder=checkpoint_folder,
latent_space=latent_model,
)
loss = model.learn(
n_rollouts=n_rollouts,
n_epochs=n_epochs,
batch_size=batch_size,
print_evaluation=True,
)
return model, loss
@ex.config
def config():
env_name = "room" # noqa:F841
problem_spec = "default" # noqa:F841
n_rollouts_latent = 100 # noqa:F841
n_epochs_latent = 1 # noqa:F841
batch_size_latent = 32 # noqa:F841
learning_rate_latent = 1e-4 # noqa:F841
hidden_layer_size_latent = 200 # noqa:F841
rnn_state_size_latent = 30 # noqa:F841
n_rollouts_backward = 100 # noqa:F841
n_epochs_backward = 1 # noqa:F841
batch_size_backward = 32 # noqa:F841
learning_rate_backward = 1e-4 # noqa:F841
hidden_layer_size_backward = 512 # noqa:F841
n_hidden_layers_backward = 3 # noqa:F841
checkpoint_folder = "tf_ckpt" # noqa:F841
tensorboard_folder = "tf_logs" # noqa:F841
label_latent = None # noqa:F841
label_backward = None # noqa:F841
@ex.automain
def main(
_run,
seed,
env_name,
problem_spec,
n_rollouts_latent,
n_epochs_latent,
batch_size_latent,
learning_rate_latent,
hidden_layer_size_latent,
rnn_state_size_latent,
n_rollouts_backward,
n_epochs_backward,
batch_size_backward,
learning_rate_backward,
hidden_layer_size_backward,
n_hidden_layers_backward,
checkpoint_folder,
tensorboard_folder,
label_latent,
label_backward,
):
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S") # noqa:F841
if label_latent is None:
label_latent = "{}_{}_{}_latent".format(env_name, problem_spec, timestamp)
if label_backward is None:
label_backward = "{}_{}_{}_backward".format(env_name, problem_spec, timestamp)
np.random.seed(seed)
tf.random.set_random_seed(seed)
print("Loading environment:", env_name)
env, _, _, _ = get_problem_parameters(env_name, problem_spec)
g1, g2 = tf.Graph(), tf.Graph()
print("Learning latent model")
checkpoint_folder_latent, tensorboard_folder_latent = _get_log_folders(
checkpoint_folder, tensorboard_folder, label_latent
)
with g1.as_default():
latent_model, latent_loss = train_latent_space_model(
env,
hidden_layer_size_latent,
rnn_state_size_latent,
n_rollouts_latent,
n_epochs_latent,
batch_size_latent,
learning_rate_latent,
tensorboard_folder_latent,
checkpoint_folder_latent,
)
print("Learning inverse model")
checkpoint_folder_backward, tensorboard_folder_backward = _get_log_folders(
checkpoint_folder, tensorboard_folder, label_backward
)
with g2.as_default():
inverse_dynamics_model, inverse_dynamics_loss = train_inverse_dynamics_model(
env,
latent_model,
hidden_layer_size_backward,
n_hidden_layers_backward,
n_rollouts_backward,
n_epochs_backward,
batch_size_backward,
learning_rate_backward,
tensorboard_folder_backward,
checkpoint_folder_backward,
)
results = {
"checkpoint_folder_latent": checkpoint_folder_latent,
"tensorboard_folder_latent": tensorboard_folder_latent,
"latent_loss": latent_loss,
"checkpoint_folder_backward": checkpoint_folder_backward,
"tensorboard_folder_backward": tensorboard_folder_backward,
"inverse_dynamics_loss": inverse_dynamics_loss,
}
print()
print("-------------------------")
for key, val in results.items():
print("{}: {}".format(key, val))
print("-------------------------")
print()
return results
|
406445
|
import os
import numpy as np
import matplotlib.pyplot as plt
x = np.linspace(-1.0, 1.0, 10000, endpoint=True)
ws1 = np.zeros_like(x)
ws2 = np.zeros_like(x)
ws1[np.abs(x) <= 1e-4] = 1
ws2[x > 0.99] = 1
plt.subplot(211)
plt.title("ws1")
plt.grid()
plt.xlim(-1, 1)
plt.ylim(-1.2, +1.2)
plt.plot(x, ws1)
plt.subplot(212)
plt.title("ws2")
plt.grid()
plt.xlim(-1, 1)
plt.ylim(-1.2, +1.2)
plt.plot(x, ws2)
plt.savefig("%s/img/IsZeroNodeWaveShape.png" % os.path.dirname(__file__))
|
406463
|
import pandas as pd
import numpy as np
#########################################################################################################
'''
Feature Engineering
'''
def create_name_feat(train, test):
for i in [train, test]:
i['Name_Len'] = i['Name'].apply(lambda x: len(x))
i['Name_Title'] = i['Name'].apply(lambda x: x.split(',')[1]).apply(lambda x: x.split()[0])
del i['Name']
return train, test
# There are 177 null values for Age, and those ones have a 10% lower survival rate than the non-nulls.
# Before imputing values for the nulls, we are including an Age_null flag just to make
# sure we can account for this characteristic of the data.
def age_impute(train, test):
for i in [train, test]:
i['Age_Null_Flag'] = i['Age'].apply(lambda x: 1 if pd.isnull(x) else 0)
data = train.groupby(['Name_Title', 'Pclass'])['Age']
i['Age'] = data.transform(lambda x: x.fillna(x.mean()))
return train, test
# We combine the SibSp and Parch columns to create get family size and break it to three levels
def fam_size(train, test):
for i in [train, test]:
i['Fam_Size'] = np.where((i['SibSp']+i['Parch']) == 0, 'One',
np.where((i['SibSp']+i['Parch']) <= 3, 'Small', 'Big'))
del i['SibSp']
del i['Parch']
return train, test
# We are using the Ticket column to create Ticket_Letr, which indicates the first letter
# of each ticket # and Ticket_Len, which indicates the length of the Ticket field
def ticket_grouped(train, test):
for i in [train, test]:
i['Ticket_Letr'] = i['Ticket'].apply(lambda x: str(x)[0])
i['Ticket_Letr'] = i['Ticket_Letr'].apply(lambda x: str(x))
i['Ticket_Letr'] = np.where((i['Ticket_Letr']).isin(['1', '2', '3', 'S', 'P', 'C', 'A']),
i['Ticket_Letr'],
np.where((i['Ticket_Letr']).isin(['W', '4', '7', '6', 'L', '5', '8']),
'Low_ticket', 'Other_ticket'))
i['Ticket_Len'] = i['Ticket'].apply(lambda x: len(x))
del i['Ticket']
return train, test
# Extract the first letter of the Cabin column
def cabin(train, test):
for i in [train, test]:
i['Cabin_Letter'] = i['Cabin'].apply(lambda x: str(x)[0])
del i['Cabin']
return train, test
# We fill the null values in the Embarked column with the most commonly occuring value, which is 'S'
def embarked_impute(train, test):
for i in [train, test]:
i['Embarked'] = i['Embarked'].fillna('S')
return train, test
# Convert our categorical columns into dummy variables
def dummies(train, test,
columns = ['Pclass', 'Sex', 'Embarked', 'Ticket_Letr', 'Cabin_Letter', 'Name_Title', 'Fam_Size']):
for column in columns:
train[column] = train[column].apply(lambda x: str(x))
test[column] = test[column].apply(lambda x: str(x))
good_cols = [column+'_'+i for i in train[column].unique() if i in test[column].unique()]
train = pd.concat((train, pd.get_dummies(train[column], prefix=column)[good_cols]), axis=1)
test = pd.concat((test, pd.get_dummies(test[column], prefix=column)[good_cols]), axis=1)
del train[column]
del test[column]
return train, test
def PrepareTarget(data):
return np.array(data.Survived, dtype='int8').reshape(-1, 1)
|
406476
|
from torch import nn
class ARCNN(nn.Module):
def __init__(self, n_colors=3):
super(ARCNN, self).__init__()
self.base = nn.Sequential(
nn.Conv2d(n_colors, 64, kernel_size=9, padding=4),
nn.PReLU(),
nn.Conv2d(64, 32, kernel_size=7, padding=3),
nn.PReLU(),
nn.Conv2d(32, 16, kernel_size=1),
nn.PReLU()
)
self.last = nn.Conv2d(16, n_colors, kernel_size=5, padding=2)
self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, std=0.001)
def forward(self, x):
x = self.base(x)
x = self.last(x)
return x
class FastARCNN(nn.Module):
def __init__(self, n_colors=3):
super(FastARCNN, self).__init__()
self.base = nn.Sequential(
nn.Conv2d(n_colors, 64, kernel_size=9, stride=2, padding=4),
nn.PReLU(),
nn.Conv2d(64, 32, kernel_size=1),
nn.PReLU(),
nn.Conv2d(32, 32, kernel_size=7, padding=3),
nn.PReLU(),
nn.Conv2d(32, 64, kernel_size=1),
nn.PReLU()
)
self.last = nn.ConvTranspose2d(64, n_colors, kernel_size=9, stride=2, padding=4, output_padding=1)
self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, std=0.001)
def forward(self, x):
x = self.base(x)
x = self.last(x)
return x
|
406492
|
import pendulum
def test_year():
d = pendulum.Date(1234, 5, 6)
assert d.year == 1234
def test_month():
d = pendulum.Date(1234, 5, 6)
assert d.month == 5
def test_day():
d = pendulum.Date(1234, 5, 6)
assert d.day == 6
def test_day_of_week():
d = pendulum.Date(2012, 5, 7)
assert d.day_of_week == pendulum.MONDAY
def test_day_of_year():
d = pendulum.Date(2015, 12, 31)
assert d.day_of_year == 365
d = pendulum.Date(2016, 12, 31)
assert d.day_of_year == 366
def test_days_in_month():
d = pendulum.Date(2012, 5, 7)
assert d.days_in_month == 31
def test_age():
d = pendulum.Date.today()
assert d.age == 0
assert d.add(years=1).age == -1
assert d.subtract(years=1).age == 1
def test_is_leap_year():
assert pendulum.Date(2012, 1, 1).is_leap_year()
assert not pendulum.Date(2011, 1, 1).is_leap_year()
def test_is_long_year():
assert pendulum.Date(2015, 1, 1).is_long_year()
assert not pendulum.Date(2016, 1, 1).is_long_year()
def test_week_of_month():
assert pendulum.Date(2012, 9, 30).week_of_month == 5
assert pendulum.Date(2012, 9, 28).week_of_month == 5
assert pendulum.Date(2012, 9, 20).week_of_month == 4
assert pendulum.Date(2012, 9, 8).week_of_month == 2
assert pendulum.Date(2012, 9, 1).week_of_month == 1
assert pendulum.date(2020, 1, 1).week_of_month == 1
assert pendulum.date(2020, 1, 7).week_of_month == 2
assert pendulum.date(2020, 1, 14).week_of_month == 3
def test_week_of_year_first_week():
assert pendulum.Date(2012, 1, 1).week_of_year == 52
assert pendulum.Date(2012, 1, 2).week_of_year == 1
def test_week_of_year_last_week():
assert pendulum.Date(2012, 12, 30).week_of_year == 52
assert pendulum.Date(2012, 12, 31).week_of_year == 1
def test_is_future():
d = pendulum.Date.today()
assert not d.is_future()
d = d.add(days=1)
assert d.is_future()
def test_is_past():
d = pendulum.Date.today()
assert not d.is_past()
d = d.subtract(days=1)
assert d.is_past()
|
406519
|
import unittest
from programy.extensions.scheduler.scheduler import SchedulerExtension
from programytest.client import TestClient
class SchedulerExtensionClient(TestClient):
def __init__(self, mock_scheduler=None):
self._mock_scheduler = mock_scheduler
TestClient.__init__(self)
def load_configuration(self, arguments):
super(SchedulerExtensionClient, self).load_configuration(arguments)
def load_scheduler(self):
if self._mock_scheduler is not None:
self._scheduler = self._mock_scheduler
else:
super(SchedulerExtensionClient, self).load_scheduler()
class MockJob:
def __init__(self, id, userid):
self.args = [id, userid]
@property
def id(self):
return self.args[0]
class MockScheduler:
def __init__(self):
self._jobs = ()
def add_jobs(self, jobs):
self._jobs = jobs
def list_jobs(self):
return self._jobs
def pause_job (self, id):
pass
def resume_job (self, id):
pass
def stop_job (self, id):
pass
def schedule_every_n_seconds(self, userid, clientid, action, text, seconds):
pass
def schedule_every_n_minutes(self, userid, clientid, action, text, minutes):
pass
def schedule_every_n_hours(self, userid, clientid, action, text, hours):
pass
def schedule_every_n_days(self, userid, clientid, action, text, days):
pass
def schedule_every_n_weeks(self, userid, clientid, action, text, weeks):
pass
def schedule_every_n(self, userid, clientid, action, text, weeks=0, days=0, hours=0, minutes=0, seconds=0):
pass
def schedule_in_n_weeks(self, userid, clientid, action, text, weeks):
pass
def schedule_in_n_days(self, userid, clientid, action, text, days):
pass
def schedule_in_n_hours(self, userid, clientid, action, text, hours):
pass
def schedule_in_n_minutes(self, userid, clientid, action, text, minutes):
pass
def schedule_in_n_seconds(self, userid, clientid, action, text, seconds):
pass
class SchedulerExtensionTests(unittest.TestCase):
# SCHEDULE IN|EVERY X SECS|MINS|HOURS|DAYS|WEEKS TEXT|SRAI TEXT ...........
# PAUSE ALL|JOBID
# RESUME ALL|JOBID
# STOP ALL|JOBID
# LIST
def test_schedule_invalid(self):
client = SchedulerExtensionClient()
client_context = client.create_client_context("testid")
extension = SchedulerExtension()
self.assertEquals("ERR", extension.execute(client_context, "OTHER"))
self.assertEquals("ERR", extension.execute(client_context, "SCHEDULE OTHER"))
def test_schedule_in_invalid(self):
client = SchedulerExtensionClient()
client_context = client.create_client_context("testid")
extension = SchedulerExtension()
self.assertEquals("ERR", extension.execute(client_context, "SCHEDULE IN"))
self.assertEquals("ERR", extension.execute(client_context, "SCHEDULE IN 10"))
self.assertEquals("ERR", extension.execute(client_context, "SCHEDULE IN 10 OTHER"))
self.assertEquals("ERR", extension.execute(client_context, "SCHEDULE IN 10 MINUTES OTHER"))
self.assertEquals("ERR", extension.execute(client_context, "SCHEDULE IN 10 MINUTES TEXT"))
self.assertEquals("ERR", extension.execute(client_context, "SCHEDULE IN 10 MINUTES SRAI"))
def test_schedule_every_invalid(self):
client = SchedulerExtensionClient()
client_context = client.create_client_context("testid")
extension = SchedulerExtension()
self.assertEquals("ERR", extension.execute(client_context, "SCHEDULE EVERY"))
self.assertEquals("ERR", extension.execute(client_context, "SCHEDULE EVER 10"))
self.assertEquals("ERR", extension.execute(client_context, "SCHEDULE EVER 10 OTHER"))
self.assertEquals("ERR", extension.execute(client_context, "SCHEDULE EVER 10 MINUTES OTHER"))
self.assertEquals("ERR", extension.execute(client_context, "SCHEDULE EVER 10 MINUTES TEXT"))
self.assertEquals("ERR", extension.execute(client_context, "SCHEDULE EVER 10 MINUTES SRAI"))
# IN XXXX
def test_schedule_in_n_seconds(self):
client = SchedulerExtensionClient()
client_context = client.create_client_context("testid")
extension = SchedulerExtension()
response = extension.execute(client_context, "SCHEDULE IN 10 SECONDS TEXT WAKEY WAKEY")
self.assertEqual("OK", response)
def test_schedule_in_n_minutes(self):
client = SchedulerExtensionClient()
client_context = client.create_client_context("testid")
extension = SchedulerExtension()
response = extension.execute(client_context, "SCHEDULE IN 10 MINUTES TEXT WAKEY WAKEY")
self.assertEqual("OK", response)
def test_schedule_in_n_hours(self):
client = SchedulerExtensionClient()
client_context = client.create_client_context("testid")
extension = SchedulerExtension()
response = extension.execute(client_context, "SCHEDULE IN 10 HOURS TEXT WAKEY WAKEY")
self.assertEqual("OK", response)
def test_schedule_in_n_days(self):
client = SchedulerExtensionClient()
client_context = client.create_client_context("testid")
extension = SchedulerExtension()
response = extension.execute(client_context, "SCHEDULE IN 10 DAYS TEXT WAKEY WAKEY")
self.assertEqual("OK", response)
def test_schedule_in_n_weeks(self):
client = SchedulerExtensionClient()
client_context = client.create_client_context("testid")
extension = SchedulerExtension()
response = extension.execute(client_context, "SCHEDULE IN 10 WEEKS TEXT WAKEY WAKEY")
self.assertEqual("OK", response)
# EVERY XXX
def test_schedule_every_n_seconds(self):
client = SchedulerExtensionClient()
client_context = client.create_client_context("testid")
client_context.client._scheduler = MockScheduler()
extension = SchedulerExtension()
response = extension.execute(client_context, "SCHEDULE EVERY 10 SECONDS TEXT WAKEY WAKEY")
self.assertEqual("OK", response)
def test_schedule_every_n_minutes(self):
client = SchedulerExtensionClient()
client_context = client.create_client_context("testid")
extension = SchedulerExtension()
response = extension.execute(client_context, "SCHEDULE EVERY 10 MINUTES TEXT WAKEY WAKEY")
self.assertEqual("OK", response)
def test_schedule_every_n_hours(self):
client = SchedulerExtensionClient()
client_context = client.create_client_context("testid")
extension = SchedulerExtension()
response = extension.execute(client_context, "SCHEDULE EVERY 10 HOURS TEXT WAKEY WAKEY")
self.assertEqual("OK", response)
def test_schedule_every_n_days(self):
client = SchedulerExtensionClient()
client_context = client.create_client_context("testid")
extension = SchedulerExtension()
response = extension.execute(client_context, "SCHEDULE EVERY 10 DAYS TEXT WAKEY WAKEY")
self.assertEqual("OK", response)
def test_schedule_every_n_weeks(self):
client = SchedulerExtensionClient()
client_context = client.create_client_context("testid")
extension = SchedulerExtension()
response = extension.execute(client_context, "SCHEDULE EVERY 10 WEEKS TEXT WAKEY WAKEY")
self.assertEqual("OK", response)
# Other commands
def test_pause_all(self):
client = SchedulerExtensionClient()
client_context = client.create_client_context("testid")
client_context.client._scheduler = MockScheduler()
client_context.client._scheduler.add_jobs({1: MockJob(1, "testid")})
extension = SchedulerExtension()
response = extension.execute(client_context, "SCHEDULE PAUSE ALL")
self.assertEquals("OK", response)
def test_pause_all_no_jobs(self):
client = SchedulerExtensionClient()
client_context = client.create_client_context("testid")
client_context.client._scheduler = MockScheduler()
extension = SchedulerExtension()
response = extension.execute(client_context, "SCHEDULE PAUSE ALL")
self.assertEquals("ERR", response)
def test_pause_job(self):
client = SchedulerExtensionClient()
client_context = client.create_client_context("testid")
client_context.client._scheduler = MockScheduler()
client_context.client._scheduler.add_jobs({1: MockJob(1, "testid")})
extension = SchedulerExtension()
response = extension.execute(client_context, "SCHEDULE PAUSE 1")
self.assertEquals("OK", response)
def test_pause_job_diff_id(self):
client = SchedulerExtensionClient()
client_context = client.create_client_context("testid")
client_context.client._scheduler = MockScheduler()
client_context.client._scheduler.add_jobs({1: MockJob(1, "testid")})
extension = SchedulerExtension()
response = extension.execute(client_context, "SCHEDULE PAUSE 2")
self.assertEquals("ERR", response)
def test_pause_job_no_userid(self):
client = SchedulerExtensionClient()
client_context = client.create_client_context("testid")
client_context.client._scheduler = MockScheduler()
client_context.client._scheduler.add_jobs({1: MockJob(1, "testid2")})
extension = SchedulerExtension()
response = extension.execute(client_context, "SCHEDULE PAUSE 1")
self.assertEquals("ERR", response)
def test_pause_job_no_jobs(self):
client = SchedulerExtensionClient()
client_context = client.create_client_context("testid")
client_context.client._scheduler = MockScheduler()
extension = SchedulerExtension()
response = extension.execute(client_context, "SCHEDULE PAUSE 1")
self.assertEquals("ERR", response)
def test_resume_all(self):
client = SchedulerExtensionClient()
client_context = client.create_client_context("testid")
client_context.client._scheduler = MockScheduler()
client_context.client._scheduler.add_jobs({1: MockJob(1, "testid")})
extension = SchedulerExtension()
response = extension.execute(client_context, "SCHEDULE RESUME ALL")
self.assertEquals("OK", response)
def test_resume_all_no_jobs(self):
client = SchedulerExtensionClient()
client_context = client.create_client_context("testid")
client_context.client._scheduler = MockScheduler()
extension = SchedulerExtension()
response = extension.execute(client_context, "SCHEDULE RESUME ALL")
self.assertEquals("ERR", response)
def test_resume_job(self):
client = SchedulerExtensionClient()
client_context = client.create_client_context("testid")
client_context.client._scheduler = MockScheduler()
client_context.client._scheduler.add_jobs({1: MockJob(1, "testid")})
extension = SchedulerExtension()
response = extension.execute(client_context, "SCHEDULE RESUME 1")
self.assertEquals("OK", response)
def test_resume_job_diff_id(self):
client = SchedulerExtensionClient()
client_context = client.create_client_context("testid")
client_context.client._scheduler = MockScheduler()
client_context.client._scheduler.add_jobs({1: MockJob(1, "testid")})
extension = SchedulerExtension()
response = extension.execute(client_context, "SCHEDULE RESUME 2")
self.assertEquals("ERR", response)
def test_resume_job_no_userid(self):
client = SchedulerExtensionClient()
client_context = client.create_client_context("testid")
client_context.client._scheduler = MockScheduler()
client_context.client._scheduler.add_jobs({1: MockJob(1, "testid2")})
extension = SchedulerExtension()
response = extension.execute(client_context, "SCHEDULE RESUME 1")
self.assertEquals("ERR", response)
def test_resume_job_no_jobs(self):
client = SchedulerExtensionClient()
client_context = client.create_client_context("testid")
client_context.client._scheduler = MockScheduler()
extension = SchedulerExtension()
response = extension.execute(client_context, "SCHEDULE RESUME 1")
self.assertEquals("ERR", response)
def test_stop_all(self):
client = SchedulerExtensionClient()
client_context = client.create_client_context("testid")
client_context.client._scheduler = MockScheduler()
client_context.client._scheduler.add_jobs({1: MockJob(1, "testid")})
extension = SchedulerExtension()
response = extension.execute(client_context, "SCHEDULE STOP ALL")
self.assertEquals("OK", response)
def test_stop_all_no_jobs(self):
client = SchedulerExtensionClient()
client_context = client.create_client_context("testid")
client_context.client._scheduler = MockScheduler()
extension = SchedulerExtension()
response = extension.execute(client_context, "SCHEDULE STOP ALL")
self.assertEquals("ERR", response)
def test_stop_job(self):
client = SchedulerExtensionClient()
client_context = client.create_client_context("testid")
client_context.client._scheduler = MockScheduler()
client_context.client._scheduler.add_jobs({1: MockJob(1, "testid")})
extension = SchedulerExtension()
response = extension.execute(client_context, "SCHEDULE STOP 1")
self.assertEquals("OK", response)
def test_stop_job_diff_id(self):
client = SchedulerExtensionClient()
client_context = client.create_client_context("testid")
client_context.client._scheduler = MockScheduler()
client_context.client._scheduler.add_jobs({1: MockJob(1, "testid")})
extension = SchedulerExtension()
response = extension.execute(client_context, "SCHEDULE STOP 2")
self.assertEquals("ERR", response)
def test_stop_job_no_userid(self):
client = SchedulerExtensionClient()
client_context = client.create_client_context("testid")
client_context.client._scheduler = MockScheduler()
client_context.client._scheduler.add_jobs({1: MockJob(1, "testid2")})
extension = SchedulerExtension()
response = extension.execute(client_context, "SCHEDULE STOP 1")
self.assertEquals("ERR", response)
def test_stop_job_no_jobs(self):
client = SchedulerExtensionClient()
client_context = client.create_client_context("testid")
client_context.client._scheduler = MockScheduler()
extension = SchedulerExtension()
response = extension.execute(client_context, "SCHEDULE STOP 1")
self.assertEquals("ERR", response)
def test_list(self):
client = SchedulerExtensionClient()
client_context = client.create_client_context("testid")
client_context.client._scheduler = MockScheduler()
client_context.client._scheduler.add_jobs({1: MockJob(1, "testid")})
extension = SchedulerExtension()
response = extension.execute(client_context, "SCHEDULE LIST")
self.assertEquals("OK <olist><item>1</item></olist>", response)
def test_list_mulit_userids(self):
client = SchedulerExtensionClient()
client_context = client.create_client_context("testid")
client_context.client._scheduler = MockScheduler()
client_context.client._scheduler.add_jobs({1: MockJob(1, "testid"), 2: MockJob(2, "testid2"), 3: MockJob(3, "testid")})
extension = SchedulerExtension()
response = extension.execute(client_context, "SCHEDULE LIST")
self.assertEquals("OK <olist><item>1</item><item>3</item></olist>", response)
def test_list_no_userid_jobs(self):
client = SchedulerExtensionClient()
client_context = client.create_client_context("testid")
client_context.client._scheduler = MockScheduler()
client_context.client._scheduler.add_jobs({1: MockJob(1, "testid2")})
extension = SchedulerExtension()
response = extension.execute(client_context, "SCHEDULE LIST")
self.assertEquals("ERR", response)
|
406521
|
import os
import sys
init = os.path.abspath(__file__)
test = os.path.dirname(init)
project = os.path.dirname(test)
sys.path.insert(0, project)
|
406536
|
import copy
import torch
import torchvision
from fcos_core.config import cfg
from fcos_core.augmentations.image_level_augs.img_level_augs import Img_augs
from fcos_core.augmentations.box_level_augs.box_level_augs import Box_augs
from fcos_core.augmentations.box_level_augs.color_augs import color_aug_func
from fcos_core.augmentations.box_level_augs.geometric_augs import geometric_aug_func
from fcos_core.utils.comm import get_world_size
class SA_Aug(object):
def __init__(self, cfg):
autoaug_list = cfg.AUTOAUG.LIST
num_policies = cfg.AUTOAUG.NUM_SUBPOLICIES
max_iters = cfg.SOLVER.MAX_ITER
scale_splits = cfg.AUTOAUG.SCALE_SPLITS
box_prob = cfg.AUTOAUG.BOX_PROB
img_aug_list = autoaug_list[:4]
img_augs_dict = {'zoom_out':{'prob':img_aug_list[0]*0.05, 'level':img_aug_list[1]},
'zoom_in':{'prob':img_aug_list[2]*0.05, 'level':img_aug_list[3]}}
self.img_augs = Img_augs(img_augs_dict=img_augs_dict)
box_aug_list = autoaug_list[4:]
color_aug_types = list(color_aug_func.keys())
geometric_aug_types = list(geometric_aug_func.keys())
policies = []
for i in range(num_policies):
_start_pos = i * 6
sub_policy = [(color_aug_types[box_aug_list[_start_pos+0]%len(color_aug_types)], box_aug_list[_start_pos+1]* 0.1, box_aug_list[_start_pos+2], ), # box_color policy
(geometric_aug_types[box_aug_list[_start_pos+3]%len(geometric_aug_types)], box_aug_list[_start_pos+4]* 0.1, box_aug_list[_start_pos+5])] # box_geometric policy
policies.append(sub_policy)
_start_pos = num_policies * 6
scale_ratios = {'area': [box_aug_list[_start_pos+0], box_aug_list[_start_pos+1], box_aug_list[_start_pos+2]],
'prob': [box_aug_list[_start_pos+3], box_aug_list[_start_pos+4], box_aug_list[_start_pos+5]]}
box_augs_dict = {'policies': policies, 'scale_ratios': scale_ratios}
self.box_augs = Box_augs(box_augs_dict=box_augs_dict, max_iters=max_iters, scale_splits=scale_splits, box_prob=box_prob)
self.max_iters = max_iters
self.count = 0
num_gpus = get_world_size()
self.batch_size = cfg.SOLVER.IMS_PER_BATCH // num_gpus
self.num_workers = cfg.DATALOADER.NUM_WORKERS
if self.num_workers==0:
self.num_workers += 1
def __call__(self, tensor, target):
iteration = self.count // self.batch_size * self.num_workers
tensor, target = self.img_augs(tensor, target)
tensor_out, target_out = self.box_augs(tensor, target, iteration=iteration)
self.count += 1
return tensor_out, target_out
|
406554
|
class Rectangle:
def __init__(self, dx, dy): # 初期化関数
self.dx = dx
self.dy = dy
def cal_area(self): # 面積を計算する関数
self.area = self.dx * self.dy
return self.area
|
406579
|
import argparse
import os
import sys
import time
import numpy as np
import torch
from torch.autograd import Variable
from torch.optim import Adam
from torch.utils.data import DataLoader
from torchvision import datasets
from torchvision import transforms
import utils
from transformer_net import TransformerNet
from vgg16 import Vgg16
def train(args):
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
kwargs = {'num_workers': 0, 'pin_memory': False}
else:
kwargs = {}
transform = transforms.Compose([transforms.Scale(args.image_size),
transforms.CenterCrop(args.image_size),
transforms.ToTensor(),
transforms.Lambda(lambda x: x.mul(255))])
train_dataset = datasets.ImageFolder(args.dataset, transform)
train_loader = DataLoader(train_dataset, batch_size=args.batch_size, **kwargs)
transformer = TransformerNet()
optimizer = Adam(transformer.parameters(), args.lr)
mse_loss = torch.nn.MSELoss()
vgg = Vgg16()
utils.init_vgg16(args.vgg_model_dir)
vgg.load_state_dict(torch.load(os.path.join(args.vgg_model_dir, "vgg16.weight")))
if args.cuda:
transformer.cuda()
vgg.cuda()
style = utils.tensor_load_rgbimage(args.style_image, size=args.style_size)
style = style.repeat(args.batch_size, 1, 1, 1)
style = utils.preprocess_batch(style)
if args.cuda:
style = style.cuda()
style_v = Variable(style, volatile=True)
style_v = utils.subtract_imagenet_mean_batch(style_v)
features_style = vgg(style_v)
gram_style = [utils.gram_matrix(y) for y in features_style]
for e in range(args.epochs):
transformer.train()
agg_content_loss = 0.
agg_style_loss = 0.
count = 0
for batch_id, (x, _) in enumerate(train_loader):
n_batch = len(x)
count += n_batch
optimizer.zero_grad()
x = Variable(utils.preprocess_batch(x))
if args.cuda:
x = x.cuda()
y = transformer(x)
xc = Variable(x.data.clone(), volatile=True)
y = utils.subtract_imagenet_mean_batch(y)
xc = utils.subtract_imagenet_mean_batch(xc)
features_y = vgg(y)
features_xc = vgg(xc)
f_xc_c = Variable(features_xc[1].data, requires_grad=False)
content_loss = args.content_weight * mse_loss(features_y[1], f_xc_c)
style_loss = 0.
for m in range(len(features_y)):
gram_s = Variable(gram_style[m].data, requires_grad=False)
gram_y = utils.gram_matrix(features_y[m])
style_loss += args.style_weight * mse_loss(gram_y, gram_s[:n_batch, :, :])
total_loss = content_loss + style_loss
total_loss.backward()
optimizer.step()
agg_content_loss += content_loss.data[0]
agg_style_loss += style_loss.data[0]
if (batch_id + 1) % args.log_interval == 0:
mesg = "{}\tEpoch {}:\t[{}/{}]\tcontent: {:.6f}\tstyle: {:.6f}\ttotal: {:.6f}".format(
time.ctime(), e + 1, count, len(train_dataset),
agg_content_loss / (batch_id + 1),
agg_style_loss / (batch_id + 1),
(agg_content_loss + agg_style_loss) / (batch_id + 1)
)
print(mesg)
# save model
transformer.eval()
transformer.cpu()
save_model_filename = "epoch_" + str(args.epochs) + "_" + str(time.ctime()).replace(' ', '_') + "_" + str(
args.content_weight) + "_" + str(args.style_weight) + ".model"
save_model_path = os.path.join(args.save_model_dir, save_model_filename)
torch.save(transformer.state_dict(), save_model_path)
print("\nDone, trained model saved at", save_model_path)
def check_paths(args):
try:
if not os.path.exists(args.vgg_model_dir):
os.makedirs(args.vgg_model_dir)
if not os.path.exists(args.save_model_dir):
os.makedirs(args.save_model_dir)
except OSError as e:
print(e)
sys.exit(1)
def stylize(args):
content_image = utils.tensor_load_rgbimage(args.content_image, scale=args.content_scale)
content_image = content_image.unsqueeze(0)
if args.cuda:
content_image = content_image.cuda()
content_image = Variable(utils.preprocess_batch(content_image), volatile=True)
style_model = TransformerNet()
style_model.load_state_dict(torch.load(args.model))
if args.cuda:
style_model.cuda()
output = style_model(content_image)
utils.tensor_save_bgrimage(output.data[0], args.output_image, args.cuda)
def main():
main_arg_parser = argparse.ArgumentParser(description="parser for fast-neural-style")
subparsers = main_arg_parser.add_subparsers(title="subcommands", dest="subcommand")
train_arg_parser = subparsers.add_parser("train",
help="parser for training arguments")
train_arg_parser.add_argument("--epochs", type=int, default=2,
help="number of training epochs, default is 2")
train_arg_parser.add_argument("--batch-size", type=int, default=4,
help="batch size for training, default is 4")
train_arg_parser.add_argument("--dataset", type=str, required=True,
help="path to training dataset, the path should point to a folder "
"containing another folder with all the training images")
train_arg_parser.add_argument("--style-image", type=str, default="images/style-images/mosaic.jpg",
help="path to style-image")
train_arg_parser.add_argument("--vgg-model-dir", type=str, required=True,
help="directory for vgg, if model is not present in the directory it is downloaded")
train_arg_parser.add_argument("--save-model-dir", type=str, required=True,
help="path to folder where trained model will be saved.")
train_arg_parser.add_argument("--image-size", type=int, default=256,
help="size of training images, default is 256 X 256")
train_arg_parser.add_argument("--style-size", type=int, default=None,
help="size of style-image, default is the original size of style image")
train_arg_parser.add_argument("--cuda", type=int, required=True, help="set it to 1 for running on GPU, 0 for CPU")
train_arg_parser.add_argument("--seed", type=int, default=42, help="random seed for training")
train_arg_parser.add_argument("--content-weight", type=float, default=1.0,
help="weight for content-loss, default is 1.0")
train_arg_parser.add_argument("--style-weight", type=float, default=5.0,
help="weight for style-loss, default is 5.0")
train_arg_parser.add_argument("--lr", type=float, default=1e-3,
help="learning rate, default is 0.001")
train_arg_parser.add_argument("--log-interval", type=int, default=500,
help="number of images after which the training loss is logged, default is 500")
eval_arg_parser = subparsers.add_parser("eval", help="parser for evaluation/stylizing arguments")
eval_arg_parser.add_argument("--content-image", type=str, required=True,
help="path to content image you want to stylize")
eval_arg_parser.add_argument("--content-scale", type=float, default=None,
help="factor for scaling down the content image")
eval_arg_parser.add_argument("--output-image", type=str, required=True,
help="path for saving the output image")
eval_arg_parser.add_argument("--model", type=str, required=True,
help="saved model to be used for stylizing the image")
eval_arg_parser.add_argument("--cuda", type=int, required=True,
help="set it to 1 for running on GPU, 0 for CPU")
args = main_arg_parser.parse_args()
if args.subcommand is None:
print("ERROR: specify either train or eval")
sys.exit(1)
if args.cuda and not torch.cuda.is_available():
print("ERROR: cuda is not available, try running on CPU")
sys.exit(1)
if args.subcommand == "train":
check_paths(args)
train(args)
else:
stylize(args)
if __name__ == "__main__":
main()
|
406588
|
import json
import dash
from dash.dependencies import Input, Output, State
import dash_core_components as dcc
import dash_html_components as html
import dash_cytoscape as cyto
from demos import dash_reusable_components as drc
app = dash.Dash(__name__)
server = app.server
# ###################### DATA PREPROCESSING ######################
# Load data
with open('demos/data/sample_network.txt', 'r') as f:
network_data = f.read().split('\n')
# We select the first 750 edges and associated nodes for an easier visualization
edges = network_data[:750]
nodes = set()
following_node_di = {} # user id -> list of users they are following
following_edges_di = {} # user id -> list of cy edges starting from user id
followers_node_di = {} # user id -> list of followers (cy_node format)
followers_edges_di = {} # user id -> list of cy edges ending at user id
cy_edges = []
cy_nodes = []
for edge in edges:
if " " not in edge:
continue
source, target = edge.split(" ")
cy_edge = {'data': {'id': source+target, 'source': source, 'target': target}}
cy_target = {"data": {"id": target, "label": "User #" + str(target[-5:])}}
cy_source = {"data": {"id": source, "label": "User #" + str(source[-5:])}}
if source not in nodes:
nodes.add(source)
cy_nodes.append(cy_source)
if target not in nodes:
nodes.add(target)
cy_nodes.append(cy_target)
# Process dictionary of following
if not following_node_di.get(source):
following_node_di[source] = []
if not following_edges_di.get(source):
following_edges_di[source] = []
following_node_di[source].append(cy_target)
following_edges_di[source].append(cy_edge)
# Process dictionary of followers
if not followers_node_di.get(target):
followers_node_di[target] = []
if not followers_edges_di.get(target):
followers_edges_di[target] = []
followers_node_di[target].append(cy_source)
followers_edges_di[target].append(cy_edge)
genesis_node = cy_nodes[0]
genesis_node['classes'] = "genesis"
default_elements = [genesis_node]
default_stylesheet = [
{
"selector": 'node',
'style': {
"opacity": 0.65,
'z-index': 9999
}
},
{
"selector": 'edge',
'style': {
"curve-style": "bezier",
"opacity": 0.45,
'z-index': 5000
}
},
{
'selector': '.followerNode',
'style': {
'background-color': '#0074D9'
}
},
{
'selector': '.followerEdge',
"style": {
"mid-target-arrow-color": "blue",
"mid-target-arrow-shape": "vee",
"line-color": "#0074D9"
}
},
{
'selector': '.followingNode',
'style': {
'background-color': '#FF4136'
}
},
{
'selector': '.followingEdge',
"style": {
"mid-target-arrow-color": "red",
"mid-target-arrow-shape": "vee",
"line-color": "#FF4136",
}
},
{
"selector": '.genesis',
"style": {
'background-color': '#B10DC9',
"border-width": 2,
"border-color": "purple",
"border-opacity": 1,
"opacity": 1,
"label": "data(label)",
"color": "#B10DC9",
"text-opacity": 1,
"font-size": 12,
'z-index': 9999
}
},
{
'selector': ':selected',
"style": {
"border-width": 2,
"border-color": "black",
"border-opacity": 1,
"opacity": 1,
"label": "data(label)",
"color": "black",
"font-size": 12,
'z-index': 9999
}
}
]
# ################################# APP LAYOUT ################################
styles = {
'json-output': {
'overflow-y': 'scroll',
'height': 'calc(50% - 25px)',
'border': 'thin lightgrey solid'
},
'tab': {'height': 'calc(98vh - 80px)'}
}
app.layout = html.Div([
html.Div(className='eight columns', children=[
cyto.Cytoscape(
id='cytoscape',
elements=default_elements,
stylesheet=default_stylesheet,
style={
'height': '95vh',
'width': '100%'
}
)
]),
html.Div(className='four columns', children=[
dcc.Tabs(id='tabs', children=[
dcc.Tab(label='Control Panel', children=[
drc.NamedDropdown(
name='Layout',
id='dropdown-layout',
options=drc.DropdownOptionsList(
'random',
'grid',
'circle',
'concentric',
'breadthfirst',
'cose'
),
value='grid',
clearable=False
),
drc.NamedRadioItems(
name='Expand',
id='radio-expand',
options=drc.DropdownOptionsList(
'followers',
'following'
),
value='followers'
)
]),
dcc.Tab(label='JSON', children=[
html.Div(style=styles['tab'], children=[
html.P('Node Object JSON:'),
html.Pre(
id='tap-node-json-output',
style=styles['json-output']
),
html.P('Edge Object JSON:'),
html.Pre(
id='tap-edge-json-output',
style=styles['json-output']
)
])
])
]),
])
])
# ############################## CALLBACKS ####################################
@app.callback(Output('tap-node-json-output', 'children'),
[Input('cytoscape', 'tapNode')])
def display_tap_node(data):
return json.dumps(data, indent=2)
@app.callback(Output('tap-edge-json-output', 'children'),
[Input('cytoscape', 'tapEdge')])
def display_tap_edge(data):
return json.dumps(data, indent=2)
@app.callback(Output('cytoscape', 'layout'),
[Input('dropdown-layout', 'value')])
def update_cytoscape_layout(layout):
return {'name': layout}
@app.callback(Output('cytoscape', 'elements'),
[Input('cytoscape', 'tapNodeData')],
[State('cytoscape', 'elements'),
State('radio-expand', 'value')])
def generate_elements(nodeData, elements, expansion_mode):
if not nodeData:
return default_elements
# If the node has already been expanded, we don't expand it again
if nodeData.get('expanded'):
return elements
# This retrieves the currently selected element, and tag it as expanded
for element in elements:
if nodeData['id'] == element.get('data').get('id'):
element['data']['expanded'] = True
break
if expansion_mode == 'followers':
followers_nodes = followers_node_di.get(nodeData['id'])
followers_edges = followers_edges_di.get(nodeData['id'])
if followers_nodes:
for node in followers_nodes:
node['classes'] = 'followerNode'
elements.extend(followers_nodes)
if followers_edges:
for follower_edge in followers_edges:
follower_edge['classes'] = 'followerEdge'
elements.extend(followers_edges)
elif expansion_mode == 'following':
following_nodes = following_node_di.get(nodeData['id'])
following_edges = following_edges_di.get(nodeData['id'])
if following_nodes:
for node in following_nodes:
if node['data']['id'] != genesis_node['data']['id']:
node['classes'] = 'followingNode'
elements.append(node)
if following_edges:
for follower_edge in following_edges:
follower_edge['classes'] = 'followingEdge'
elements.extend(following_edges)
return elements
if __name__ == '__main__':
app.run_server(debug=True)
|
406594
|
import py, os, sys
from pytest import raises
from .support import setup_make, pylong, pyunicode, IS_WINDOWS, ispypy
currpath = py.path.local(__file__).dirpath()
test_dct = str(currpath.join("datatypesDict"))
def setup_module(mod):
setup_make("datatypes")
class TestLOWLEVEL:
def setup_class(cls):
import cppyy
cls.test_dct = test_dct
cls.datatypes = cppyy.load_reflection_info(cls.test_dct)
cls.N = cppyy.gbl.N
def test00_import_all(self):
"""Validity of `from cppyy.ll import *`"""
from cppyy import ll
for attr in ll.__all__:
assert hasattr(ll, attr)
def test01_llv_type(self):
"""Existence of LowLevelView type"""
import cppyy.types
assert cppyy.types.LowLevelView
def test02_builtin_cpp_casts(self):
"""C++ casting of builtin types"""
from cppyy import ll
for cast in (ll.cast, ll.static_cast):
assert type(cast[float](1)) == float
assert cast[float](1) == 1.
assert type(cast[int](1.1)) == int
assert cast[int](1.1) == 1
assert len(ll.reinterpret_cast['int*'](0)) == 0
raises(ReferenceError, ll.reinterpret_cast['int*'](0).__getitem__, 0)
def test03_memory(self):
"""Memory allocation and free-ing"""
import cppyy
from cppyy import ll
# regular C malloc/free
mem = cppyy.gbl.malloc(16)
cppyy.gbl.free(mem)
# typed styles
mem = cppyy.ll.malloc[int](self.N)
assert len(mem) == self.N
assert not mem.__cpp_array__
for i in range(self.N):
mem[i] = i+1
assert type(mem[i]) == int
assert mem[i] == i+1
cppyy.ll.free(mem)
# C++ arrays
mem = cppyy.ll.array_new[int](self.N)
assert mem.__cpp_array__
assert len(mem) == self.N
for i in range(self.N):
mem[i] = i+1
assert type(mem[i]) == int
assert mem[i] == i+1
cppyy.ll.array_delete(mem)
mem = cppyy.ll.array_new[int](self.N, managed=True)
assert mem.__python_owns__
mem.__python_owns__ = False
assert not mem.__python_owns__
mem.__python_owns__ = True
assert mem.__python_owns__
def test04_python_casts(self):
"""Casts to common Python pointer encapsulations"""
import cppyy, cppyy.ll
cppyy.cppdef("""namespace pycasts {
struct SomeObject{};
uintptr_t get_address(SomeObject* ptr) { return (intptr_t)ptr; }
uintptr_t get_deref(void* ptr) { return (uintptr_t)(*(void**)ptr); }
}""")
from cppyy.gbl import pycasts
s = pycasts.SomeObject()
actual = pycasts.get_address(s)
assert cppyy.ll.addressof(s) == actual
assert cppyy.ll.as_ctypes(s).value == actual
ptrptr = cppyy.ll.as_ctypes(s, byref=True)
assert pycasts.get_deref(ptrptr) == actual
def test05_array_as_ref(self):
"""Use arrays for pass-by-ref"""
import cppyy, sys
from array import array
ctd = cppyy.gbl.CppyyTestData()
# boolean type
b = array('b', [0]); ctd.set_bool_r(b); assert b[0] == True
# char types (as data)
c = array('B', [0]); ctd.set_uchar_r(c); assert c[0] == ord('d')
# integer types
i = array('h', [0]); ctd.set_short_r(i); assert i[0] == -1
i = array('H', [0]); ctd.set_ushort_r(i); assert i[0] == 2
i = array('i', [0]); ctd.set_int_r(i); assert i[0] == -3
i = array('I', [0]); ctd.set_uint_r(i); assert i[0] == 4
i = array('l', [0]); ctd.set_long_r(i); assert i[0] == -5
i = array('L', [0]); ctd.set_ulong_r(i); assert i[0] == 6
if sys.hexversion >= 0x3000000:
i = array('q', [0]); ctd.set_llong_r(i); assert i[0] == -7
i = array('Q', [0]); ctd.set_ullong_r(i); assert i[0] == 8
# floating point types
f = array('f', [0]); ctd.set_float_r(f); assert f[0] == 5.
f = array('d', [0]); ctd.set_double_r(f); assert f[0] == -5.
def test06_ctypes_as_ref_and_ptr(self):
"""Use ctypes for pass-by-ref/ptr"""
# See:
# https://docs.python.org/2/library/ctypes.html#fundamental-data-types
#
# ctypes type C type Python type
# ------------------------------------------------------------------------------
# c_bool _Bool bool (1)
#
# c_char char 1-character string
# c_wchar wchar_t 1-character unicode string
# c_byte char int/long
# c_ubyte unsigned char int/long
#
# c_short short int/long
# c_ushort unsigned short int/long
# c_int int int/long
# c_uint unsigned int int/long
# c_long long int/long
# c_ulong unsigned long int/long
# c_longlong __int64 or long long int/long
# c_ulonglong unsigned __int64 or unsigned long long int/long
#
# c_float float float
# c_double double float
# c_longdouble long double float
import cppyy, ctypes
ctd = cppyy.gbl.CppyyTestData()
### pass by reference/pointer and set value back
for e in ['_r', '_p']:
# boolean type
b = ctypes.c_bool(False); getattr(ctd, 'set_bool'+e)(b); assert b.value == True
# char types
if e == '_r':
c = ctypes.c_char(b'\0'); getattr(ctd, 'set_char'+e)(c); assert c.value == b'a'
c = ctypes.c_wchar(u'\0'); getattr(ctd, 'set_wchar'+e)(c); assert c.value == u'b'
c = ctypes.c_byte(0); getattr(ctd, 'set_schar'+e)(c); assert c.value == ord('c')
c = ctypes.c_ubyte(0); getattr(ctd, 'set_uchar'+e)(c); assert c.value == ord('d')
# integer types
i = ctypes.c_short(0); getattr(ctd, 'set_short'+e)(i); assert i.value == -1
i = ctypes.c_ushort(0); getattr(ctd, 'set_ushort'+e)(i); assert i.value == 2
i = ctypes.c_int(0); getattr(ctd, 'set_int'+e)(i); assert i.value == -3
i = ctypes.c_uint(0); getattr(ctd, 'set_uint'+e)(i); assert i.value == 4
i = ctypes.c_long(0); getattr(ctd, 'set_long'+e)(i); assert i.value == -5
i = ctypes.c_ulong(0); getattr(ctd, 'set_ulong'+e)(i); assert i.value == 6
i = ctypes.c_longlong(0); getattr(ctd, 'set_llong'+e)(i); assert i.value == -7
i = ctypes.c_ulonglong(0); getattr(ctd, 'set_ullong'+e)(i); assert i.value == 8
# floating point types
f = ctypes.c_float(0); getattr(ctd, 'set_float'+e)(f); assert f.value == 5.
f = ctypes.c_double(0); getattr(ctd, 'set_double'+e)(f); assert f.value == -5.
f = ctypes.c_longdouble(0); getattr(ctd, 'set_ldouble'+e)(f); assert f.value == 10.
### pass by pointer and set value back, now using byref (not recommended)
cb = ctypes.byref
# boolean type
b = ctypes.c_bool(False); ctd.set_bool_p(cb(b)); assert b.value == True
# char types
c = ctypes.c_ubyte(0); ctd.set_uchar_p(cb(c)); assert c.value == ord('d')
# integer types
i = ctypes.c_short(0); ctd.set_short_p(cb(i)); assert i.value == -1
i = ctypes.c_ushort(0); ctd.set_ushort_p(cb(i)); assert i.value == 2
i = ctypes.c_int(0); ctd.set_int_p(cb(i)); assert i.value == -3
i = ctypes.c_uint(0); ctd.set_uint_p(cb(i)); assert i.value == 4
i = ctypes.c_long(0); ctd.set_long_p(cb(i)); assert i.value == -5
i = ctypes.c_ulong(0); ctd.set_ulong_p(cb(i)); assert i.value == 6
i = ctypes.c_longlong(0); ctd.set_llong_p(cb(i)); assert i.value == -7
i = ctypes.c_ulonglong(0); ctd.set_ullong_p(cb(i)); assert i.value == 8
# floating point types
f = ctypes.c_float(0); ctd.set_float_p(cb(f)); assert f.value == 5.
f = ctypes.c_double(0); ctd.set_double_p(cb(f)); assert f.value == -5.
### pass by ptr/ptr with allocation (ptr/ptr is ambiguous in it's task, so many
# types are allowed to pass; this tests allocation into the pointer)
from ctypes import POINTER
# boolean type
b = POINTER(ctypes.c_bool)(); ctd.set_bool_ppa(b);
assert b[0] == True; assert b[1] == False; assert b[2] == True
cppyy.ll.array_delete(b)
# char types
c = POINTER(ctypes.c_ubyte)(); ctd.set_uchar_ppa(c)
assert c[0] == ord('k'); assert c[1] == ord('l'); assert c[2] == ord('m')
cppyy.ll.array_delete(c)
# integer types
i = POINTER(ctypes.c_short)(); ctd.set_short_ppa(i)
assert i[0] == -1; assert i[1] == -2; assert i[2] == -3
cppyy.ll.array_delete(i)
i = POINTER(ctypes.c_ushort)(); ctd.set_ushort_ppa(i)
assert i[0] == 4; assert i[1] == 5; assert i[2] == 6
cppyy.ll.array_delete(i)
i = POINTER(ctypes.c_int)(); ctd.set_int_ppa(i)
assert i[0] == -7; assert i[1] == -8; assert i[2] == -9
cppyy.ll.array_delete(i)
i = POINTER(ctypes.c_uint)(); ctd.set_uint_ppa(i)
assert i[0] == 10; assert i[1] == 11; assert i[2] == 12
cppyy.ll.array_delete(i)
i = POINTER(ctypes.c_long)(); ctd.set_long_ppa(i)
assert i[0] == -13; assert i[1] == -14; assert i[2] == -15
cppyy.ll.array_delete(i)
i = POINTER(ctypes.c_ulong)(); ctd.set_ulong_ppa(i)
assert i[0] == 16; assert i[1] == 17; assert i[2] == 18
cppyy.ll.array_delete(i)
i = POINTER(ctypes.c_longlong)(); ctd.set_llong_ppa(i)
assert i[0] == -19; assert i[1] == -20; assert i[2] == -21
cppyy.ll.array_delete(i)
i = POINTER(ctypes.c_ulonglong)(); ctd.set_ullong_ppa(i)
assert i[0] == 22; assert i[1] == 23; assert i[2] == 24
cppyy.ll.array_delete(i)
# floating point types
f = POINTER(ctypes.c_float)(); ctd.set_float_ppa(f)
assert f[0] == 5; assert f[1] == 10; assert f[2] == 20
cppyy.ll.array_delete(f)
f = POINTER(ctypes.c_double)(); ctd.set_double_ppa(f)
assert f[0] == -5; assert f[1] == -10; assert f[2] == -20
cppyy.ll.array_delete(f)
f = POINTER(ctypes.c_longdouble)(); ctd.set_ldouble_ppa(f)
assert f[0] == 5; assert f[1] == 10; assert f[2] == 20
cppyy.ll.array_delete(f)
def test07_ctypes_pointer_types(self):
"""Use ctypes for pass-by-ptr/ptr-ptr"""
if ispypy:
py.test.skip('memory corruption')
# See:
# https://docs.python.org/2/library/ctypes.html#fundamental-data-types
#
# ctypes type C type Python type
# ------------------------------------------------------------------------------
# c_char_p char* (NULL terminated) string or None
# c_wchar_p wchar_t* (NULL terminated) unicode or None
# c_void_p void* int/long or None
import cppyy, ctypes
ctd = cppyy.gbl.CppyyTestData()
ptr = ctypes.c_char_p()
for meth in ['char', 'cchar']:
val = getattr(ctd, 'set_'+meth+'_ppm')(ptr)
assert ctd.freeit(ptr) == val
ptr = ctypes.c_wchar_p()
for meth in ['wchar', 'cwchar']:
val = getattr(ctd, 'set_'+meth+'_ppm')(ptr)
assert ctd.freeit(ptr) == val
ptr = ctypes.c_void_p()
val = ctd.set_void_ppm(ptr)
assert ctd.freeit(ptr) == val
def test08_ctypes_type_correctness(self):
"""If types don't match with ctypes, expect exceptions"""
import cppyy, ctypes
ctd = cppyy.gbl.CppyyTestData()
meth_types = ['bool', 'double']
if not IS_WINDOWS:
meth_types.append('long')
i = ctypes.c_int(0);
for ext in ['_r', '_p']:
for meth in meth_types:
with raises(TypeError): getattr(ctd, 'set_'+meth+ext)(i)
def test09_numpy_bool_array(self):
"""Test passing of numpy bool array"""
import cppyy
try:
import numpy as np
except ImportError:
py.test.skip('numpy is not installed')
cppyy.cppdef('int convert_bool(bool* x) {return *x;}')
x = np.array([True], dtype=bool)
assert cppyy.gbl.convert_bool(x)
def test10_array_of_const_char_star(self):
"""Test passting of const char*[]"""
import cppyy, ctypes
def py2c(pyargs):
cargsn = (ctypes.c_char_p * len(pyargs))(*pyargs)
return ctypes.POINTER(ctypes.c_char_p)(cargsn)
pyargs = [b'hello', b'world']
cargs = py2c(pyargs)
v = cppyy.gbl.ArrayOfCStrings.takes_array_of_cstrings(cargs, len(pyargs))
assert len(v) == len(pyargs)
assert list(v) == [x.decode() for x in pyargs]
for t in (tuple, list):
for pyargs in (t(['aap', 'noot', 'mies']), t([b'zus', 'jet', 'tim'])):
v = cppyy.gbl.ArrayOfCStrings.takes_array_of_cstrings(pyargs, len(pyargs))
assert len(v) == len(pyargs)
assert t(v) == t([type(x) == str and x or x.decode() for x in pyargs])
# debatable, but the following works:
pyargs = ['aap', 1, 'mies']
with raises(TypeError):
cppyy.gbl.ArrayOfCStrings.takes_array_of_cstrings(pyargs, len(pyargs))
pyargs = ['aap', None, 'mies']
with raises(TypeError):
cppyy.gbl.ArrayOfCStrings.takes_array_of_cstrings(pyargs, len(pyargs))
def test11_array_of_const_char_ref(self):
"""Test passting of const char**&"""
import cppyy, ctypes
import cppyy.ll
# IN parameter case
cppyy.cppdef("""\
namespace ConstCharStarStarRef {
int initialize(int& argc, char**& argv) {
argv[0][0] = 'H';
argv[1][0] = 'W';
return argc;
} }""")
initialize = cppyy.gbl.ConstCharStarStarRef.initialize
def py2c(pyargs):
cargsn = (ctypes.c_char_p * len(pyargs))(*pyargs)
return ctypes.POINTER(ctypes.c_char_p)(cargsn)
pyargs = [b'hello', b'world']
cargs = py2c(pyargs)
assert initialize(ctypes.c_int(len(pyargs)), py2c(pyargs)) == len(pyargs)
assert cargs[0] == b'Hello'
assert cargs[1] == b'World'
# OUT parameter case
cppyy.cppdef("""\
namespace ConstCharStarStarRef {
void fill(int& argc, char**& argv) {
argc = 2;
argv = new char*[argc];
argv[0] = new char[6]; strcpy(argv[0], "Hello");
argv[1] = new char[6]; strcpy(argv[1], "World");
} }""")
fill = cppyy.gbl.ConstCharStarStarRef.fill
argc = ctypes.c_int(0)
ptr = ctypes.c_void_p()
fill(argc, ptr)
assert argc.value == 2
argv = ctypes.cast(ptr, ctypes.POINTER(ctypes.c_char_p))
assert argv[0] == b"Hello"
assert argv[1] == b"World"
voidpp = ctypes.cast(ptr, ctypes.POINTER(ctypes.c_void_p))
for i in range(argc.value):
cppyy.ll.free(ctypes.cast(voidpp[i], ctypes.c_void_p))
cppyy.ll.free(ptr)
def test12_null_array(self):
"""Null low level view as empty list"""
import cppyy
cppyy.cppdef("""\
namespace NullArray {
double* gime_null() { return nullptr; }
}""")
ns = cppyy.gbl.NullArray
assert not ns.gime_null()
assert list(ns.gime_null()) == []
def test13_array_interface(self):
"""Test usage of __array__ from numpy"""
import cppyy
try:
import numpy as np
except ImportError:
py.test.skip('numpy is not installed')
cppyy.cppdef("""\
namespace ArrayConversions {
int ivals[] = {1, 2, 3};
}""")
ns = cppyy.gbl.ArrayConversions
a = ns.ivals
b = np.array(a, copy=True) # default behavior
assert len(a) == len(b)
a[0] = 4
assert a[0] == 4
assert b[0] == 1
b = np.array(a, copy=False)
assert b[0] == 4
a[0] = 1
assert b[0] == 1
b = np.array(a, dtype=np.int32, copy=False)
assert b[0] == 1
a[0] = 5
assert b[0] == 5
def test14_templated_arrays(self):
"""Use of arrays in template types"""
import cppyy
assert cppyy.gbl.std.vector[int].value_type == 'int'
assert cppyy.gbl.std.vector[cppyy.gbl.std.vector[int]].value_type == 'std::vector<int>'
assert cppyy.gbl.std.vector['int[1]'].value_type == 'int[1]'
def test15_templated_arrays_gmpxx(self):
"""Use of gmpxx array types in templates"""
import cppyy
try:
cppyy.include("gmpxx.h")
cppyy.load_library('gmpxx')
except ImportError:
py.test.skip("gmpxx not installed")
assert cppyy.gbl.std.vector[cppyy.gbl.mpz_class].value_type
cppyy.cppdef("""\
namespace test15_templated_arrays_gmpxx::vector {
template <typename T>
using value_type = typename T::value_type;
}""")
g = cppyy.gbl
assert g.test15_templated_arrays_gmpxx.vector.value_type[g.std.vector[g.mpz_class]]
class TestMULTIDIMARRAYS:
def setup_class(cls):
import cppyy
cls.test_dct = test_dct
cls.datatypes = cppyy.load_reflection_info(cls.test_dct)
cls.numeric_builtin_types = [
'short', 'unsigned short', 'int', 'unsigned int', 'long', 'unsigned long',
'long long', 'unsigned long long', 'float', 'double'
]
cls.nbt_short_names = [
'short', 'ushort', 'int', 'uint', 'long', 'ulong', 'llong', 'ullong', 'float', 'double'
]
try:
import numpy as np
if IS_WINDOWS:
np_long, np_ulong = np.int32, np.uint32
else:
np_long, np_ulong = np.int64, np.uint64
cls.numpy_builtin_types = [
np.short, np.ushort, np.int32, np.uint32, np_long, np_ulong,
np.longlong, np.ulonglong, np.float32, np.double
]
except ImportError:
pass
def _data_m(self, lbl):
return [('m_'+tp.replace(' ', '_')+lbl, tp) for tp in self.numeric_builtin_types]
def test01_2D_arrays(self):
"""Access and use of 2D data members"""
import cppyy
ns = cppyy.gbl.MultiDimArrays
h = ns.DataHolder()
data2a = self._data_m('2a')
for m, tp in data2a:
getattr(h, m).reshape((5, 7))
arr = getattr(h, m)
assert arr.shape == (5, 7)
elem_tp = getattr(cppyy.gbl, tp)
for i in range(5):
for j in range(7):
val = elem_tp(5*i+j)
assert arr[i][j] == val
assert arr[i, j] == val
for i in range(5):
for j in range(7):
arr[i][j] = elem_tp(4+5*i+j)
for i in range(5):
for j in range(7):
val = elem_tp(4+5*i+j)
assert arr[i][j] == val
assert arr[i, j] == val
data2c = self._data_m('2c')
for m, tp in data2c:
arr = getattr(h, m)
assert arr.shape == (3, 5)
elem_tp = getattr(cppyy.gbl, tp)
for i in range(3):
for j in range(5):
val = elem_tp(3*i+j)
assert arr[i][j] == val
assert arr[i, j] == val
def test02_assign_2D_arrays(self):
"""Direct assignment of 2D arrays"""
import cppyy
try:
import numpy as np
except ImportError:
py.test.skip('numpy is not installed')
ns = cppyy.gbl.MultiDimArrays
h = ns.DataHolder()
# copy assignment
data2c = self._data_m('2c')
for itp, (m, tp) in enumerate(data2c):
setattr(h, m, np.ones((3, 5), dtype=self.numpy_builtin_types[itp]))
arr = getattr(h, m)
assert arr.shape == (3, 5)
val = getattr(cppyy.gbl, tp)(1)
for i in range(3):
for j in range(5):
assert arr[i][j] == val
assert arr[i, j] == val
# size checking for copy assignment
for itp, (m, tp) in enumerate(data2c):
with raises(ValueError):
setattr(h, m, np.ones((5, 5), dtype=self.numpy_builtin_types[itp]))
with raises(ValueError):
setattr(h, m, np.ones((3, 7), dtype=self.numpy_builtin_types[itp]))
# pointer assignment
N, M = 11, 7
data2b = self._data_m('2b')
for itp, (m, tp) in enumerate(data2b):
setattr(h, m, getattr(h, 'new_'+self.nbt_short_names[itp]+'2d')(N, M))
arr = getattr(h, m)
elem_tp = getattr(cppyy.gbl, tp)
for i in range(N):
for j in range(M):
val = elem_tp(7*i+j)
assert arr[i][j] == val
assert arr[i, j] == val
assert arr[2][3] != 10
arr[2][3] = 10
assert arr[2][3] == 10
def test03_3D_arrays(self):
"""Access and use of 3D data members"""
import cppyy
ns = cppyy.gbl.MultiDimArrays
h = ns.DataHolder()
data3a = self._data_m('3a')
for m, tp in data3a:
getattr(h, m).reshape((5, 7, 11))
arr = getattr(h, m)
assert arr.shape == (5, 7, 11)
elem_tp = getattr(cppyy.gbl, tp)
for i in range(5):
for j in range(7):
for k in range(11):
val = elem_tp(7*i+3*j+k)
assert arr[i][j][k] == val
assert arr[i, j, k] == val
for i in range(5):
for j in range(7):
for k in range(11):
arr[i][j][k] = elem_tp(4+7*i+3*j+k)
for i in range(5):
for j in range(7):
for k in range(11):
val = elem_tp(4+7*i+3*j+k)
assert arr[i][j][k] == val
assert arr[i, j, k] == val
data3c = self._data_m('3c')
for m, tp in data3c:
arr = getattr(h, m)
assert arr.shape == (3, 5, 7)
elem_tp = getattr(cppyy.gbl, tp)
for i in range(3):
for j in range(5):
for k in range(7):
val = elem_tp(3*i+2*j+k)
assert arr[i][j][k] == val
assert arr[i, j, k] == val
def test04_malloc(self):
"""Use of malloc to create multi-dim arrays"""
import cppyy
import cppyy.ll
cppyy.cppdef("""\
namespace MallocChecker {
template<typename T>
struct Foo {
T* bar;
Foo() {}
Foo(T* other) : bar(other) {}
bool eq(T* other) { return bar == other; }
};
template<typename T>
auto create(T* other) {
return Foo<T>(other);
} }""")
ns = cppyy.gbl.MallocChecker
for dtype in ["int", "int*", "int**",]:
bar = cppyy.ll.malloc[dtype](4)
assert len(bar) == 4
# variable assignment
foo = ns.Foo[dtype]()
foo.bar = bar
assert foo.eq(bar)
# pointer passed to the constructor
foo2 = ns.Foo[dtype](bar)
assert foo2.eq(bar)
# pointer passed to a function
foo3 = ns.create[dtype](bar)
assert foo3.eq(bar)
cppyy.ll.free(bar)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.