id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
90755
|
from methods import load_json, save_json, chunks
from collections import Counter
from math import ceil
# External libs:
from tabulate import tabulate
from matplotlib import pyplot as plt
from matplotlib.lines import Line2D
import seaborn as sns
sns.set_style("white")
sns.set_context('paper', font_scale=7)
my_palette = sns.color_palette("cubehelix", 10)
sns.set_palette(my_palette)
################################################################################
# Main score
def coverage(name, target):
"""
Compute coverage for a specific system.
This function is agnostic to whether you want coverage over entire Val or only
the set of learnable types.
"""
base = './Data/Systems/'
path = base + name + '/Val/stats.json'
system = load_json(path)
gen = set(system['types'])
recalled = gen & target
return {"recalled": recalled,
"score": len(recalled)/len(target),
"not_in_val": gen - target}
################################################################################
# Ranking
def most_frequent_omissions(recalled, ref_stats, n=None):
"""
Rank the most frequent omissions.
This function is agnostic to whether you want to use test or val as reference.
"""
counts = Counter((word, ref_stats['total_counts'][word]) for word in recalled)
if n:
return counts.most_common(n)
else:
return counts.most_common()
################################################################################
# Percentile coverage
def chunk_retrieval_score(chunk, retrieved):
"Compute retrieval scores for one chunk."
overlap = set(chunk) & retrieved
percentage = (len(overlap)/len(chunk)) * 100
return percentage
def retrieval_scores(original, retrieved, chunk_size):
"Compute retrieval scores for all chunks."
return [chunk_retrieval_score(chunk, retrieved)
for chunk in chunks(original, chunk_size)]
def percentiles(val_count_list, retrieved):
"Compute retrieval scores for each percentile."
val_ordered = [word for word, count in val_count_list]
chunk_size = ceil(float(len(val_ordered))/10)
return {'val_scores': retrieval_scores(val_ordered, retrieved, chunk_size),
'num_percentiles': 10}
def get_count_list(stats):
"Get count list from a ref stats file."
c = Counter(stats['total_counts'])
return c.most_common()
def plot_percentiles(results):
fig, ax = plt.subplots(figsize=(28,20))
lw = 8.0
ms = 25.0
ordered_systems = sorted(results.items(),
key=lambda pair:pair[1]['percentiles']['val_scores'][1],
reverse=True)
plt.axvline(x=2.5, linestyle='dashed', linewidth=5, color='gray')
plt.axvline(x=6.5, linestyle='dashed', linewidth=5, color='gray')
plt.text(1.35, 90, 'A', color='gray')
plt.text(4.3, 90, 'B', color='gray')
plt.text(8.5, 90, 'C', color='gray')
for name, entry in ordered_systems:
# nums = list(reversed(range(1,11)))
# plt.plot(entry['percentiles']['val_scores'],nums,'o-',label=name,linewidth=5.0,markersize=15.0)
nums = range(1,11)
plt.plot(nums, entry['percentiles']['val_scores'],'o-', label=system2label[name], linewidth=lw, markersize=ms, color=system2color[name])
#plt.legend(ncol=2, loc=1, bbox_to_anchor=(1.05, 1))
labels = [system2label[name] for name,_ in ordered_systems]
legend_markers = [Line2D(range(1), range(1),
linewidth=0, # Invisible line
marker='o',
markersize=40,
markerfacecolor=system2color[name]) for name,_ in ordered_systems]
plt.legend(legend_markers, labels, numpoints=1, loc=1, handletextpad=-0.3, bbox_to_anchor=(1.05, 0.85))
# labels = ['-'.join(map(str,tup)) for tup in zip(range(0,100,10),range(10,110,10))]
# labels = list(reversed(labels))
labels = [str(i * 10) for i in range(1,11)]
plt.xticks(range(1,11), labels)
sns.despine()
plt.tick_params(direction='in', length=10, width=4, bottom=True, left=True)
plt.ylabel('Coverage')
plt.xlabel('Top N percentile')
plt.savefig('./Data/Output/percentiles.pdf')
################################################################################
# Main definitions.
if __name__ == "__main__":
system2label = {'Dai-et-al-2017': 'Dai et al. 2017',
'Liu-et-al-2017': 'Liu et al. 2017',
'Mun-et-al-2017': 'Mun et al. 2017',
'Shetty-et-al-2016': 'Shetty et al. 2016',
'Shetty-et-al-2017': 'Shetty et al. 2017',
'Tavakoli-et-al-2017': 'Tavakoli et al. 2017',
'Vinyals-et-al-2017': 'Vinyals et al. 2017',
'Wu-et-al-2016': 'Wu et al. 2016',
'Zhou-et-al-2017': 'Zhou et al. 2017'}
system2color = dict(zip(sorted(system2label),my_palette))
train_stats = load_json('./Data/COCO/Processed/train_stats.json')
val_stats = load_json('./Data/COCO/Processed/val_stats.json')
train = set(train_stats['types'])
val = set(val_stats['types'])
learnable = train & val
limit = len(learnable)/len(val)
size_limit = len(val) - len(learnable)
print(f'The limit is: {limit}. This means {size_limit} words in Val cannot be learned.')
################################################################################
# Run the script.
systems = ['Dai-et-al-2017',
'Liu-et-al-2017',
'Mun-et-al-2017',
'Shetty-et-al-2016',
'Shetty-et-al-2017',
'Tavakoli-et-al-2017',
'Vinyals-et-al-2017',
'Wu-et-al-2016',
'Zhou-et-al-2017']
# Get coverage results
coverage_results = {system:coverage(system, learnable) for system in systems}
# Add global omission ranking
for entry in coverage_results.values():
entry['omissions'] = most_frequent_omissions(entry['recalled'],
val_stats, # Use validation set as reference.
n=None) # Rank everything
# Add percentile scores.
val_count_list = get_count_list(val_stats)
for entry in coverage_results.values():
recalled = entry['recalled']
entry['percentiles'] = percentiles(val_count_list, recalled)
plot_percentiles(coverage_results)
# Save the data
save_json(coverage_results, './Data/Output/global_recall.json')
# Show a table with the results.
table = tabulate(tabular_data=[(system, entry['score']) for system, entry in coverage_results.items()],
headers=['System', 'Coverage'],
tablefmt='latex_booktabs',
floatfmt='.2f')
print(table)
with open('./Data/Output/global_recall_table.txt','w') as f:
f.write(table)
f.write('\n\n')
f.write(f'The limit is: {limit}. This means {size_limit} words in Val cannot be learned.')
|
90762
|
from setuptools import setup
setup(
name="memrepl",
version="1.0",
url="https://github.com/agustingianni/memrepl",
author="<NAME>",
author_email="<EMAIL>",
description=("Memory inspection REPL interface"),
license="MIT",
keywords="memory debugger repl reverse engineering",
py_modules=["memrepl"],
install_requires=[
"frida",
"ipython<6.0", # iPython 6 needs python 3.
"hexdump"
],
entry_points="""
[console_scripts]
memrepl=memrepl:main
"""
)
|
90803
|
from rpython.rlib import jit
from rpython.rlib.cache import Cache
from rpython.rlib.objectmodel import specialize, import_from_mixin
from rsqueakvm.util.version import Version
class QuasiConstantCache(Cache):
def _build(self, obj):
class NewQuasiConst(object):
import_from_mixin(QuasiConstantMixin)
return NewQuasiConst
cache = QuasiConstantCache()
class QuasiConstantMixin(object):
"""Mixin for constant values that can be edited, but will be promoted
to a constant when jitting."""
_immutable_fields_ = ["value?"]
def __init__(self, initial_value):
self.value = initial_value
def set(self, value):
self.value = value
def get(self):
if isinstance(self.value, str):
return jit.promote_string(self.value)
else:
return jit.promote(self.value)
def is_set(self):
return self.get()
def activate(self):
self.set(True)
def deactivate(self):
self.set(False)
def changed(self):
assert isinstance(self.get(), Version)
self.set(Version())
@specialize.memo()
def QuasiConstant(initial_value, cls=None):
if cls is not None:
return cache.getorbuild(cls)(initial_value)
return cache.getorbuild(type(initial_value))(initial_value)
@specialize.arg(1)
@specialize.argtype(0)
def Cell(initial_value, type=object):
class NewCell(object):
_attrs_ = ["value"]
def __init__(self, value): self.value = value
def set(self, v): self.value = v
def get(self): return self.value
return NewCell(initial_value)
|
90830
|
from maneuvers.maneuver import Maneuver
from rlutilities.linear_algebra import norm
from rlutilities.simulation import Car
FIRST_JUMP_DURATION = 0.1
BETWEEN_JUMPS_DELAY = 0.1
SECOND_JUMP_DURATION = 0.05
TIMEOUT = 2.0
class SpeedFlip(Maneuver):
def __init__(self, car: Car, right_handed=True, use_boost=True):
super().__init__(car)
self.direction = 1 if right_handed else -1
self.use_boost = use_boost
self.timer = 0.0
def interruptible(self) -> bool:
return False
def step(self, dt: float):
# Always throttle.
self.controls.throttle = 1.0
# Use boost if should after first jump and not supersonic.
speed = norm(self.car.velocity)
self.controls.boost = (
# self.use_boost and self.timer > FIRST_JUMP_DURATION and speed < 2250
self.use_boost and speed < 2290
)
if self.timer < FIRST_JUMP_DURATION:
self.controls.jump = True
self.controls.pitch = 1.0
elif self.timer < FIRST_JUMP_DURATION + BETWEEN_JUMPS_DELAY:
self.controls.jump = False
self.controls.pitch = 1.0
elif (
self.timer
< FIRST_JUMP_DURATION + BETWEEN_JUMPS_DELAY + SECOND_JUMP_DURATION
):
self.controls.jump = True
self.controls.pitch = -1.0
self.controls.roll = -0.3 * self.direction
else:
self.controls.jump = False
self.controls.pitch = 1.0
self.controls.roll = -1.0 * self.direction
self.controls.yaw = -1.0 * self.direction
self.timer += dt
self.finished = (self.timer > TIMEOUT) or (
self.car.on_ground and self.timer > 0.5
)
|
90843
|
from __future__ import print_function
import numpy as np
np.set_printoptions(threshold='nan')
import h5py
import theano
import argparse
import itertools
import subprocess
import logging
import time
import codecs
import os
from copy import deepcopy
import math
import sys
from data_generator import VisualWordDataGenerator
import models
# Set up logger
logging.basicConfig(level=logging.INFO, stream=sys.stdout)
logger = logging.getLogger(__name__)
# Dimensionality of image feature vector
IMG_FEATS = 4096
MULTEVAL_DIR = '../multeval-0.5.1' if "util" in os.getcwd() else "multeval-0.5.1"
class cd:
"""Context manager for changing the current working directory"""
"""http://stackoverflow.com/questions/431684/how-do-i-cd-in-python"""
def __init__(self, newPath):
self.newPath = newPath
def __enter__(self):
self.savedPath = os.getcwd()
os.chdir(self.newPath)
def __exit__(self, etype, value, traceback):
os.chdir(self.savedPath)
class GroundedTranslationGenerator:
def __init__(self, args):
self.args = args
self.vocab = dict()
self.unkdict = dict()
self.counter = 0
self.maxSeqLen = 0
# consistent with models.py
self.use_sourcelang = args.source_vectors is not None
self.use_image = not args.no_image
self.model = None
self.prepare_datagenerator()
# this results in two file handlers for dataset (here and
# data_generator)
if not self.args.dataset:
logger.warn("No dataset given, using flickr8k")
self.dataset = h5py.File("flickr8k/dataset.h5", "r")
else:
self.dataset = h5py.File("%s/dataset.h5" % self.args.dataset, "r")
if self.args.debug:
theano.config.optimizer = 'None'
theano.config.exception_verbosity = 'high'
def prepare_datagenerator(self):
self.data_gen = VisualWordDataGenerator(self.args,
self.args.dataset)
self.args.checkpoint = self.find_best_checkpoint()
self.data_gen.set_vocabulary(self.args.checkpoint)
self.vocab_len = len(self.data_gen.index2word)
self.index2word = self.data_gen.index2word
self.word2index = self.data_gen.word2index
def generate(self):
'''
Entry point for this module.
Loads up a data generator to get the relevant image / source features.
Builds the relevant model, given the command-line arguments.
Generates sentences for the images in the val / test data.
Calculates BLEU and PPLX, unless requested.
'''
if self.use_sourcelang:
# HACK FIXME unexpected problem with input_data
self.hsn_size = self.data_gen.hsn_size
else:
self.hsn_size = 0
if self.model == None:
self.build_model(generate=True)
self.generate_sentences(self.args.checkpoint, val=not self.args.test)
if not self.args.without_scores:
score = self.bleu_score(self.args.checkpoint, val=not self.args.test)
if self.args.multeval:
score, _, _ = self.multeval_scores(self.args.checkpoint,
val=not self.args.test)
if not self.args.no_pplx:
self.build_model(generate=False)
self.calculate_pplx(self.args.checkpoint, val=not self.args.test)
return score
def generate_sentences(self, filepath, val=True):
"""
Generates descriptions of images for --generation_timesteps
iterations through the LSTM. Each input description is clipped to
the first <BOS> token, or, if --generate_from_N_words is set, to the
first N following words (N + 1 BOS token).
This process can be additionally conditioned
on source language hidden representations, if provided by the
--source_vectors parameter.
The output is clipped to the first EOS generated, if it exists.
TODO: duplicated method with generate.py
"""
if self.args.beam_width > 1:
prefix = "val" if val else "test"
handle = codecs.open("%s/%sGenerated" % (filepath, prefix), "w",
'utf-8')
logger.info("Generating %s descriptions", prefix)
start_gen = self.args.generate_from_N_words # Default 0
start_gen = start_gen + 1 # include BOS
generator = self.data_gen.generation_generator(prefix, batch_size=1)
seen = 0
# we are going to beam search for the most probably sentence.
# let's do this one sentence at a time to make the logging output
# easier to understand
for data in generator:
text = data[0]['text']
# Append the first start_gen words to the complete_sentences list
# for each instance in the batch.
complete_sentences = [[] for _ in range(text.shape[0])]
for t in range(start_gen): # minimum 1
for i in range(text.shape[0]):
w = np.argmax(text[i, t])
complete_sentences[i].append(self.index2word[w])
del data[0]['text']
text = self.reset_text_arrays(text, start_gen)
Y_target = data[1]['output']
data[0]['text'] = text
max_beam_width = self.args.beam_width
structs = self.make_duplicate_matrices(data[0], max_beam_width)
# A beam is a 2-tuple with the probability of the sequence and
# the words in that sequence. Start with empty beams
beams = [(0.0, [])]
# collects beams that are in the top candidates and
# emitted a <E> token.
finished = []
for t in range(start_gen, self.args.generation_timesteps):
# Store the candidates produced at timestep t, will be
# pruned at the end of the timestep
candidates = []
# we take a view of the datastructures, which means we're only
# ever generating a prediction for the next word. This saves a
# lot of cycles.
preds = self.model.predict(structs, verbose=0)
# The last indices in preds are the predicted words
next_word_indices = preds[:, t-1]
sorted_indices = np.argsort(-next_word_indices, axis=1)
# Each instance in structs is holding the history of a
# beam, and so there is a direct connection between the
# index of a beam in beams and the index of an instance in
# structs.
for beam_idx, b in enumerate(beams):
# get the sorted predictions for the beam_idx'th beam
beam_predictions = sorted_indices[beam_idx]
for top_idx in range(self.args.beam_width):
wordIndex = beam_predictions[top_idx]
wordProb = next_word_indices[beam_idx][beam_predictions[top_idx]]
# For the beam_idxth beam, add the log probability
# of the top_idxth predicted word to the previous
# log probability of the sequence, and append the
# top_idxth predicted word to the sequence of words
candidates.append([b[0] + math.log(wordProb), b[1] + [wordIndex]])
candidates.sort(reverse = True)
if self.args.verbose:
logger.info("Candidates in the beam")
logger.info("---")
for c in candidates:
logger.info(" ".join([self.index2word[x] for x in c[1]]) + " (%f)" % c[0])
beams = candidates[:max_beam_width] # prune the beams
pruned = []
for b in beams:
# If a top candidate emitted an EOS token then
# a) add it to the list of finished sequences
# b) remove it from the beams and decrease the
# maximum size of the beams.
if b[1][-1] == self.word2index["<E>"]:
finished.append(b)
if max_beam_width >= 1:
max_beam_width -= 1
else:
pruned.append(b)
beams = pruned[:max_beam_width]
if self.args.verbose:
logger.info("Pruned beams")
logger.info("---")
for b in beams:
logger.info(" ".join([self.index2word[x] for x in b[1]]) + "(%f)" % b[0])
if max_beam_width == 0:
# We have sampled max_beam_width sequences with an <E>
# token so stop the beam search.
break
# Reproduce the structs for the beam search so we can keep
# track of the state of each beam
structs = self.make_duplicate_matrices(data[0], max_beam_width)
# Rewrite the 1-hot word features with the
# so-far-predcicted tokens in a beam.
for bidx, b in enumerate(beams):
for idx, w in enumerate(b[1]):
next_word_index = w
structs['text'][bidx, idx+1, w] = 1.
# If none of the sentences emitted an <E> token while
# decoding, add the final beams into the final candidates
if len(finished) == 0:
for leftover in beams:
finished.append(leftover)
# Normalise the probabilities by the length of the sequences
# as suggested by Graves (2012) http://arxiv.org/abs/1211.3711
for f in finished:
f[0] = f[0] / len(f[1])
finished.sort(reverse=True)
if self.args.verbose:
logger.info("Length-normalised samples")
logger.info("---")
for f in finished:
logger.info(" ".join([self.index2word[x] for x in f[1]]) + "(%f)" % f[0])
# Emit the lowest (log) probability sequence
best_beam = finished[0]
complete_sentences[i] = [self.index2word[x] for x in best_beam[1]]
handle.write(' '.join([x for x
in itertools.takewhile(
lambda n: n != "<E>", complete_sentences[i])]) + "\n")
if self.args.verbose:
logger.info("%s (%f)",' '.join([x for x
in itertools.takewhile(
lambda n: n != "<E>",
complete_sentences[i])]),
best_beam[0])
seen += text.shape[0]
if seen == self.data_gen.split_sizes['val']:
# Hacky way to break out of the generator
break
handle.close()
else:
# We are going to arg max decode a sequence.
prefix = "val" if val else "test"
logger.info("Generating %s descriptions", prefix)
start_gen = self.args.generate_from_N_words + 1 # include BOS
handle = codecs.open("%s/%sGenerated" % (filepath, prefix),
"w", 'utf-8')
generator = self.data_gen.generation_generator(prefix)
seen = 0
for data in generator:
text = deepcopy(data[0]['text'])
# Append the first start_gen words to the complete_sentences list
# for each instance in the batch.
complete_sentences = [[] for _ in range(text.shape[0])]
for t in range(start_gen): # minimum 1
for i in range(text .shape[0]):
w = np.argmax(text[i, t])
complete_sentences[i].append(self.index2word[w])
del data[0]['text']
text = self.reset_text_arrays(text, start_gen)
Y_target = data[1]['output']
data[0]['text'] = text
for t in range(start_gen, self.args.generation_timesteps):
logger.debug("Input token: %s" % self.index2word[np.argmax(text[0,t-1])])
preds = self.model.predict(data[0],
verbose=0)
# Look at the last indices for the words.
next_word_indices = np.argmax(preds[:, t-1], axis=1)
logger.debug("Predicted token: %s" % self.index2word[next_word_indices[0]])
# update array[0]/sentence-so-far with generated words.
for i in range(len(next_word_indices)):
data[0]['text'][i, t, next_word_indices[i]] = 1.
next_words = [self.index2word[x] for x in next_word_indices]
for i in range(len(next_words)):
complete_sentences[i].append(next_words[i])
sys.stdout.flush()
# print/extract each sentence until it hits the first end-of-string token
for s in complete_sentences:
if self.args.verbose:
logger.info("%s",' '.join([x for x
in itertools.takewhile(
lambda n: n != "<E>",
complete_sentences[i])]))
decoded_str = ' '.join([x for x
in itertools.takewhile(
lambda n: n != "<E>", s[1:])])
handle.write(decoded_str + "\n")
seen += text.shape[0]
if seen == self.data_gen.split_sizes[prefix]:
# Hacky way to break out of the generator
break
handle.close()
def calculate_pplx(self, path, val=True):
""" Splits the input data into batches of self.args.batch_size to
reduce the memory footprint of holding all of the data in RAM. """
prefix = "val" if val else "test"
logger.info("Calculating pplx over %s data", prefix)
sum_logprobs = 0
y_len = 0
generator = self.data_gen.generation_generator(prefix)
seen = 0
for data in generator:
Y_target = deepcopy(data[1]['output'])
del data[1]['output']
preds = self.model.predict(data[0],
verbose=0,
batch_size=self.args.batch_size)
for i in range(Y_target.shape[0]):
for t in range(Y_target.shape[1]):
target_idx = np.argmax(Y_target[i, t])
target_tok = self.index2word[target_idx]
if target_tok != "<P>":
log_p = math.log(preds[i, t, target_idx],2)
sum_logprobs += -log_p
y_len += 1
seen += data[0]['text'].shape[0]
if seen == self.data_gen.split_sizes[prefix]:
# Hacky way to break out of the generator
break
norm_logprob = sum_logprobs / y_len
pplx = math.pow(2, norm_logprob)
logger.info("PPLX: %.4f", pplx)
handle = open("%s/%sPPLX" % (path, prefix), "w")
handle.write("%f\n" % pplx)
handle.close()
return pplx
def reset_text_arrays(self, text_arrays, fixed_words=1):
""" Reset the values in the text data structure to zero so we cannot
accidentally pass them into the model.
Helper function for generate_sentences().
"""
reset_arrays = deepcopy(text_arrays)
reset_arrays[:,fixed_words:, :] = 0
return reset_arrays
def make_duplicate_matrices(self, generator_data, k):
'''
Prepare K duplicates of the input data for a given instance yielded by
the data generator.
Helper function for the beam search decoder in generation_sentences().
'''
if self.use_sourcelang and self.use_image:
# the data generator yielded a dictionary with the words, the
# image features, and the source features
dupes = [[],[],[]]
words = generator_data['text']
img = generator_data['img']
source = generator_data['src']
for x in range(k):
# Make a deep copy of the word_feats structures
# so the arrays will never be shared
dupes[0].append(deepcopy(words[0,:,:]))
dupes[1].append(source[0,:,:])
dupes[2].append(img[0,:,:])
# Turn the list of arrays into a numpy array
dupes[0] = np.array(dupes[0])
dupes[1] = np.array(dupes[1])
dupes[2] = np.array(dupes[2])
return {'text': dupes[0], 'img': dupes[2], 'src': dupes[1]}
elif self.use_image:
# the data generator yielded a dictionary with the words and the
# image features
dupes = [[],[]]
words = generator_data['text']
img = generator_data['img']
for x in range(k):
# Make a deep copy of the word_feats structures
# so the arrays will never be shared
dupes[0].append(deepcopy(words[0,:,:]))
dupes[1].append(img[0,:,:])
# Turn the list of arrays into a numpy array
dupes[0] = np.array(dupes[0])
dupes[1] = np.array(dupes[1])
return {'text': dupes[0], 'img': dupes[1]}
elif self.use_sourcelang:
# the data generator yielded a dictionary with the words and the
# source features
dupes = [[],[]]
words = generator_data['text']
source= generator_data['src']
for x in range(k):
# Make a deep copy of the word_feats structures
# so the arrays will never be shared
dupes[0].append(deepcopy(words[0,:,:]))
dupes[1].append(source[0,:,:])
# Turn the list of arrays into a numpy array
dupes[0] = np.array(dupes[0])
dupes[1] = np.array(dupes[1])
return {'text': dupes[0], 'src': dupes[1]}
def find_best_checkpoint(self):
'''
Read the summary file from the directory and scrape out the run ID of
the highest BLEU scoring checkpoint. Then do an ls-stlye function in
the directory and return the exact path to the best model.
Assumes only one matching prefix in the model checkpoints directory.
'''
summary_data = open("%s/summary" % self.args.model_checkpoints).readlines()
summary_data = [x.replace("\n", "") for x in summary_data]
best_id = None
target = "Best loss" if self.args.best_pplx else "Best Metric"
for line in summary_data:
if line.startswith(target):
best_id = "%03d" % (int(line.split(":")[1].split("|")[0]))
checkpoint = None
if best_id is not None:
checkpoints = os.listdir(self.args.model_checkpoints)
for c in checkpoints:
if c.startswith(best_id):
checkpoint = c
break
logger.info("Best checkpoint: %s/%s" % (self.args.model_checkpoints, checkpoint))
return "%s/%s" % (self.args.model_checkpoints, checkpoint)
def bleu_score(self, directory, val=True):
'''
PPLX is only weakly correlated with improvements in BLEU,
and thus improvements in human judgements. Let's also track
BLEU score of a subset of generated sentences in the val split
to decide on early stopping, etc.
'''
prefix = "val" if val else "test"
self.extract_references(directory, val)
subprocess.check_call(
['perl multi-bleu.perl %s/%s_reference.ref < %s/%sGenerated | tee %s/%sBLEU'
% (directory, prefix, directory, prefix, directory, prefix)], shell=True)
bleudata = open("%s/%sBLEU" % (directory, prefix)).readline()
data = bleudata.split(",")[0]
bleuscore = data.split("=")[1]
bleu = float(bleuscore.lstrip())
return bleu
def multeval_scores(self, directory, val=True):
'''
Maybe you want to evaluate with Meteor, TER, and BLEU?
'''
prefix = "val" if val else "test"
self.extract_references(directory, val)
with cd(MULTEVAL_DIR):
subprocess.check_call(
['./multeval.sh eval --refs ../%s/%s_reference.* \
--hyps-baseline ../%s/%sGenerated \
--meteor.language %s \
--threads 4 \
2> multevaloutput 1> multevaloutput'
% (directory, prefix, directory, prefix, self.args.meteor_lang)], shell=True)
handle = open("multevaloutput")
multdata = handle.readlines()
handle.close()
for line in multdata:
if line.startswith("RESULT: baseline: BLEU: AVG:"):
mbleu = line.split(":")[4]
mbleu = mbleu.replace("\n","")
mbleu = mbleu.strip()
lr = mbleu.split(".")
mbleu = float(lr[0]+"."+lr[1][0:2])
if line.startswith("RESULT: baseline: METEOR: AVG:"):
mmeteor = line.split(":")[4]
mmeteor = mmeteor.replace("\n","")
mmeteor = mmeteor.strip()
lr = mmeteor.split(".")
mmeteor = float(lr[0]+"."+lr[1][0:2])
if line.startswith("RESULT: baseline: TER: AVG:"):
mter = line.split(":")[4]
mter = mter.replace("\n","")
mter = mter.strip()
lr = mter.split(".")
mter = float(lr[0]+"."+lr[1][0:2])
logger.info("Meteor = %.2f | BLEU = %.2f | TER = %.2f",
mmeteor, mbleu, mter)
return mmeteor, mbleu, mter
def extract_references(self, directory, val=True):
"""
Get reference descriptions for split we are generating outputs for.
Helper function for bleu_score().
"""
prefix = "val" if val else "test"
references = self.data_gen.get_refs_by_split_as_list(prefix)
for refid in xrange(len(references[0])):
codecs.open('%s/%s_reference.ref%d'
% (directory, prefix, refid), 'w', 'utf-8').write('\n'.join([x[refid] for x in references]))
def build_model(self, generate=False):
'''
Build a Keras model if one does not yet exist.
Helper function for generate().
'''
if generate:
t = self.args.generation_timesteps
else:
t = self.data_gen.max_seq_len
if self.args.mrnn:
m = models.MRNN(self.args.embed_size, self.args.hidden_size,
self.vocab_len,
self.args.dropin,
self.args.optimiser, self.args.l2reg,
hsn_size=self.hsn_size,
weights=self.args.checkpoint,
gru=self.args.gru,
clipnorm=self.args.clipnorm,
t=t)
else:
m = models.NIC(self.args.embed_size, self.args.hidden_size,
self.vocab_len,
self.args.dropin,
self.args.optimiser, self.args.l2reg,
hsn_size=self.hsn_size,
weights=self.args.checkpoint,
gru=self.args.gru,
clipnorm=self.args.clipnorm,
t=t)
self.model = m.buildKerasModel(use_sourcelang=self.use_sourcelang,
use_image=self.use_image)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Generate descriptions from a trained model")
# General options
parser.add_argument("--run_string", default="", type=str,
help="Optional string to help you identify the run")
parser.add_argument("--debug", action="store_true",
help="Print debug messages to stdout?")
parser.add_argument("--fixed_seed", action="store_true",
help="Start with a fixed random seed? Useful for\
reproding experiments. (default = False)")
parser.add_argument("--num_sents", default=5, type=int,
help="Number of descriptions/image for training")
parser.add_argument("--model_checkpoints", type=str, required=True,
help="Path to the checkpointed parameters")
parser.add_argument("--best_pplx", action="store_true",
help="Use the best PPLX checkpoint instead of the\
best BLEU checkpoint? Default = False.")
# Define the types of input data the model will receive
parser.add_argument("--dataset", default="", type=str, help="Path to the\
HDF5 dataset to use for training / val input\
(defaults to flickr8k)")
parser.add_argument("--supertrain_datasets", nargs="+", help="Paths to the\
datasets to use as additional training input (defaults\
to None)")
parser.add_argument("--unk", type=int,
help="unknown character cut-off. Default=3", default=3)
parser.add_argument("--maximum_length", type=int, default=50,
help="Maximum length of sequences permissible\
in the training data (Default = 50)")
parser.add_argument("--existing_vocab", type=str, default="",
help="Use an existing vocabulary model to define the\
vocabulary and UNKing in this dataset?\
(default = "", which means we will derive the\
vocabulary from the training dataset")
parser.add_argument("--no_image", action="store_true",
help="Do not use image data.")
parser.add_argument("--source_vectors", default=None, type=str,
help="Path to final hidden representations of\
encoder/source language VisualWordLSTM model.\
(default: None.) Expects a final_hidden_representation\
vector for each image in the dataset")
parser.add_argument("--source_enc", type=str, default=None,
help="Which type of source encoder features? Expects\
either 'mt_enc' or 'vis_enc'. Required.")
parser.add_argument("--source_type", type=str, default=None,
help="Source features over gold or predicted tokens?\
Expects 'gold' or 'predicted'. Required")
parser.add_argument("--source_merge", type=str, default="sum",
help="How to merge source features. Only applies if \
there are multiple feature vectors. Expects 'sum', \
'avg', or 'concat'.")
# Model hyperparameters
parser.add_argument("--batch_size", default=100, type=int)
parser.add_argument("--embed_size", default=256, type=int)
parser.add_argument("--hidden_size", default=256, type=int)
parser.add_argument("--dropin", default=0.5, type=float,
help="Prob. of dropping embedding units. Default=0.5")
parser.add_argument("--gru", action="store_true", help="Use GRU instead\
of LSTM recurrent state? (default = False)")
parser.add_argument("--big_batch_size", default=10000, type=int,
help="Number of examples to load from disk at a time;\
0 loads entire dataset. Default is 10000")
parser.add_argument("--mrnn", action="store_true",
help="Use a Mao-style multimodal recurrent neural\
network?")
parser.add_argument("--peeking_source", action="store_true",
help="Input the source features at every timestep?\
Default=False.")
# Optimisation details
parser.add_argument("--optimiser", default="adam", type=str,
help="Optimiser: rmsprop, momentum, adagrad, etc.")
parser.add_argument("--lr", default=0.001, type=float)
parser.add_argument("--beta1", default=None, type=float)
parser.add_argument("--beta2", default=None, type=float)
parser.add_argument("--epsilon", default=None, type=float)
parser.add_argument("--stopping_loss", default="bleu", type=str,
help="minimise cross-entropy or maximise BLEU?")
parser.add_argument("--l2reg", default=1e-8, type=float,
help="L2 cost penalty. Default=1e-8")
parser.add_argument("--clipnorm", default=-1, type=float,
help="Clip gradients? (default = -1, which means\
don't clip the gradients.")
parser.add_argument("--max_epochs", default=50, type=int,
help="Maxmimum number of training epochs. Used with\
--predefined_epochs")
parser.add_argument("--patience", type=int, default=10, help="Training\
will be terminated if validation BLEU score does not\
increase for this number of epochs")
parser.add_argument("--no_early_stopping", action="store_true")
# Language generation details
parser.add_argument("--generation_timesteps", default=10, type=int,
help="Maximum number of words to generate for unseen\
data (default=10).")
parser.add_argument("--test", action="store_true",
help="Generate for the test images? (Default=False)\
which means we will generate for the val images")
parser.add_argument("--without_scores", action="store_true",
help="Don't calculate BLEU or perplexity. Useful if\
you only want to see the generated sentences or if\
you don't have ground-truth sentences for evaluation.")
parser.add_argument("--beam_width", type=int, default=1,
help="Number of hypotheses to consider when decoding.\
Default=1, which means arg max decoding.")
parser.add_argument("--verbose", action="store_true",
help="Verbose output while decoding? If you choose\
verbose output then you'll see the total beam search\
decoding process. (Default = False)")
parser.add_argument("--multeval", action="store_true",
help="Evaluate using multeval?")
parser.add_argument("--meteor_lang", type=str, required=True,
help="Language of the input dataset. Required for\
correct Meteor evaluation. See\
http://www.cs.cmu.edu/~alavie/METEOR/README.html#languages\
for options.")
parser.add_argument("--no_pplx", action="store_true",
help="Skip perplexity calculation?")
# Legacy options
parser.add_argument("--generate_from_N_words", type=int, default=0,
help="Use N words as starting point when generating\
strings. Useful mostly for mt-only model (in other\
cases, image provides enough useful starting\
context.)")
parser.add_argument("--predefined_epochs", action="store_true",
help="Do you want to stop training after a specified\
number of epochs, regardless of early-stopping\
criteria? Use in conjunction with --max_epochs.")
# Neccesary but unused in this module
parser.add_argument("--h5_writeable", action="store_true",
help="Open the H5 file for write-access? Useful for\
serialising hidden states to disk. (default = False)")
parser.add_argument("--use_predicted_tokens", action="store_true",
help="Generate final hidden state\
activations over oracle inputs or from predicted\
inputs? Default = False ( == Oracle)")
w = GroundedTranslationGenerator(parser.parse_args())
w.generate()
|
90869
|
from copy import deepcopy
from typing import List
from rdkit import Chem
from icolos.core.step_utils.obabel_structconvert import OBabelStructConvert
from icolos.utils.enums.compound_enums import (
CompoundContainerEnum,
EnumerationContainerEnum,
)
from icolos.utils.enums.program_parameters import SchrodingerExecutablesEnum
from icolos.core.step_utils.structconvert import StructConvert
from icolos.utils.general.icolos_exceptions import ContainerCorrupted
from icolos.utils.enums.write_out_enums import WriteOutEnum
from typing import Union
import numpy as np
import os
_WE = WriteOutEnum()
_SEE = SchrodingerExecutablesEnum()
class Conformer:
"""This class is a storage class for individual conformers associated with a given Enumeration."""
def __init__(
self,
conformer: Chem.Mol = None,
conformer_id: int = None,
enumeration_object=None,
):
self._conformer = conformer
self._conformer_id = conformer_id
self._enumeration_object = enumeration_object
self._extra_data_dictionary = {}
def get_compound_name(self) -> str:
if self.get_enumeration_object() is not None:
return self.get_enumeration_object().get_compound_name()
def get_index_string(self) -> str:
enum_obj = self.get_enumeration_object()
enum_str = ""
if enum_obj is not None:
enum_str = enum_obj.get_index_string()
conf_str = ""
if self.get_conformer_id() is not None:
conf_str = str(self.get_conformer_id())
return ":".join([enum_str, conf_str])
def add_extra_data(self, key: str, data):
self._extra_data_dictionary[key] = data
def get_extra_data(self) -> dict:
return self._extra_data_dictionary
def clear_extra_data(self):
self._extra_data_dictionary = {}
def set_enumeration_object(self, enumeration_object):
self._enumeration_object = enumeration_object
def get_enumeration_object(self):
return self._enumeration_object
def get_molecule(self) -> Chem.Mol:
return self._conformer
def set_molecule(self, conformer: Chem.Mol):
self._conformer = conformer
def set_conformer_id(self, conformer_id: int):
self._conformer_id = conformer_id
def get_conformer_id(self) -> int:
return self._conformer_id
def empty(self) -> bool:
if self.get_molecule() is None:
return True
return False
def _clone(self):
clone = Conformer(
conformer=deepcopy(self.get_molecule()),
conformer_id=self.get_conformer_id(),
enumeration_object=self.get_enumeration_object(),
)
clone._extra_data_dictionary = deepcopy(self.get_extra_data())
return clone
def __copy__(self):
return self._clone()
def __deepcopy__(self, memo):
return self._clone()
def __repr__(self):
parent_enumeration_id = (
None
if self.get_enumeration_object() is None
else self.get_enumeration_object().get_enumeration_id()
)
return "<Icolos conformer: id=%s, parent enumeration: %s>" % (
self.get_conformer_id(),
parent_enumeration_id,
)
def __str__(self):
return self.__repr__()
def write(self, path: str, format_=_WE.SDF):
writer = Chem.SDWriter(path)
molecule = self.get_molecule()
molecule.SetProp(_WE.RDKIT_NAME, self.get_index_string())
molecule.SetProp(_WE.INDEX_STRING, self.get_index_string())
writer.write(molecule)
writer.close()
if format_ == _WE.PDB:
pdb_path = path.split(".")[0] + ".pdb"
# convert the written sdf file to a pdb with OB
converter = OBabelStructConvert()
converter.sdf2pdb(sdf_file=path, pdb_file=pdb_path)
os.remove(path)
def update_coordinates(self, path: str):
old = self.get_molecule()
for mol in Chem.SDMolSupplier(path, removeHs=False):
mol.SetProp(_WE.RDKIT_NAME, old.GetProp(_WE.RDKIT_NAME))
for prop in old.GetPropNames():
mol.SetProp(prop, old.GetProp(prop))
self.set_molecule(mol)
# only one molecule expected at this stage, so stop after first run
break
self.write("".join([path, "_out"]))
class Enumeration:
"""This class bundles all information on an enumeration, especially all conformers generated."""
def __init__(
self,
compound_object=None,
smile: str = "",
molecule: Chem.Mol = None,
original_smile: str = None,
enumeration_id: int = None,
):
self._MC = CompoundContainerEnum()
self._EC = EnumerationContainerEnum()
self._smile = smile
self._compound_object = compound_object
self._molecule = molecule
self._original_smile = original_smile
self._enumeration_id = enumeration_id
self._conformers = []
def empty(self) -> bool:
if len(self.get_conformers()) == 0:
return True
return False
def get_compound_name(self) -> str:
if self.get_compound_object() is not None:
return self.get_compound_object().get_name()
def _get_next_conformer_id(self) -> int:
ids = [conf.get_conformer_id() for conf in self.get_conformers()]
if len(ids) == 0:
return 0
else:
return max(ids) + 1
def sort_conformers(
self, by_tag: Union[str, List[str]], reverse: bool = True, aggregation="sum"
):
conformers = self.get_conformers()
if isinstance(by_tag, str):
conformers = sorted(
conformers,
key=lambda x: float(x.get_molecule().GetProp(by_tag)),
reverse=reverse,
)
self._conformers = conformers
self.reset_conformer_ids()
elif isinstance(by_tag, list):
# need to normalise the values, calculate max and min of each tag in the series
def normalise_tag(value, tag):
all_tag_values = [
float(conf.get_molecule().GetProp(tag)) for conf in conformers
]
max_tag = np.max(all_tag_values)
min_tag = np.min(all_tag_values)
return (float(value) - min_tag) / (max_tag - min_tag)
# if we specify multiple tags, aggregate according the the provided aggregation function
if aggregation == "sum":
conformers = sorted(
conformers,
key=lambda x: np.sum(
[
float(normalise_tag(x.get_molecule().GetProp(i), i))
for i in by_tag
]
),
reverse=reverse,
)
self._conformers = conformers
elif aggregation == "product":
conformers = sorted(
conformers,
key=lambda x: np.product(
[
float(normalise_tag(x.get_molecule().GetProp(i), i))
for i in by_tag
]
),
reverse=reverse,
)
self._conformers = conformers
else:
raise AttributeError(
"Only sum or product aggregation modes are currently supported - ABORT"
)
# for ligand in self.ligands:
# ligand.set_conformers(sorted(ligand.get_conformers(),
# key=lambda x: float(x.GetProp(_ROE.GLIDE_DOCKING_SCORE)), reverse=False))
# ligand.add_tags_to_conformers()
def find_conformer(self, conformer_id: int) -> Conformer:
conf = [
conf
for conf in self.get_conformers()
if conf.get_conformer_id() == conformer_id
]
if len(conf) == 0:
raise IndexError(f"Could not find conformer with id {conformer_id}.")
elif len(conf) > 1:
raise ContainerCorrupted(
f"More than one conformer with id {conformer_id} found in the same Enumeration instance (compound_number: {self.get_enumeration_id()})."
)
return conf[0]
def get_conformer_ids(self) -> List[int]:
ids = [conf.get_conformer_id() for conf in self.get_conformers()]
return ids
def reset_conformer_ids(self):
for new_id, conf in enumerate(self.get_conformers()):
conf.set_conformer_id(conformer_id=new_id)
def add_conformer(self, conformer: Conformer, auto_update: bool = True):
"""Add a new conformer. If "auto_update" is True, the Enumeration class will be set to "self" and
the conformer_id will be set to the next free index."""
conformer = deepcopy(conformer)
if auto_update:
conformer.set_enumeration_object(self)
conformer.set_conformer_id(self._get_next_conformer_id())
self._conformers.append(conformer)
def add_conformers(self, conformers: List[Conformer], auto_update: bool = True):
"""Add new conformers. If "auto_update" is True, the Enumeration class will be set to "self" and
the conformer_id will be set to the next free index."""
for conformer in conformers:
self.add_conformer(conformer=conformer, auto_update=auto_update)
def get_index_string(self) -> str:
comp_obj = self.get_compound_object()
comp_str = ""
if comp_obj is not None:
comp_str = comp_obj.get_index_string()
enum_str = ""
if self.get_enumeration_id() is not None:
enum_str = str(self.get_enumeration_id())
return ":".join([comp_str, enum_str])
def clean_failed_conformers(self):
# all conformers, where the molecule has been set to None by a function can be considered to have failed
for idx in list(reversed(range(len(self._conformers)))):
if self._conformers[idx].get_molecule() is None:
del self._conformers[idx]
self.reset_conformer_ids()
def clear_molecule(self):
self._molecule = None
def clear_conformers(self):
self._conformers = []
def get_conformers(self) -> List[Conformer]:
return self._conformers
def clone_conformers(self) -> List[Conformer]:
return [deepcopy(conf) for conf in self._conformers]
def set_compound_object(self, compound_object):
self._compound_object = compound_object
def get_compound_object(self):
return self._compound_object
def set_enumeration_id(self, enumeration_id: int):
self._enumeration_id = enumeration_id
def get_enumeration_id(self) -> int:
return self._enumeration_id
def set_smile(self, smile: str):
self._smile = smile
def get_smile(self) -> str:
return self._smile
def set_molecule(self, molecule: Chem.Mol):
self._molecule = molecule
def get_molecule(self) -> Chem.Mol:
return self._molecule
def set_original_smile(self, original_smile: str):
self._original_smile = original_smile
def get_original_smile(self) -> str:
return self._original_smile
def _clone(self):
clone = Enumeration(
compound_object=self.get_compound_object(),
smile=self.get_smile(),
molecule=deepcopy(self.get_molecule()),
original_smile=self.get_original_smile(),
enumeration_id=self.get_enumeration_id(),
)
for conf in self.get_conformers():
conf = deepcopy(conf)
conf.set_enumeration_object(enumeration_object=clone)
clone.add_conformer(conf, auto_update=False)
return clone
def __copy__(self):
return self._clone()
def __deepcopy__(self, memo):
return self._clone()
def __repr__(self):
parent_compound_id = (
None
if self.get_compound_object() is None
else self.get_compound_object().get_compound_number()
)
return (
"<Icolos enumeration: id=%s, smile=%s, parent compound: %s, num_conformers: %i>"
% (
self.get_enumeration_id(),
self.get_smile(),
parent_compound_id,
len(self._conformers),
)
)
def __str__(self):
return self.__repr__()
def __iter__(self):
return iter(self._conformers)
def __getitem__(self, key: int) -> Conformer:
return self._conformers[key]
def __len__(self) -> int:
return len(self.get_conformers())
class Compound:
"""This class bundles all information on a molecule and serves mainly to group enumerations."""
def __init__(self, name: str = "", compound_number: int = None):
self._CC = CompoundContainerEnum()
self._EC = EnumerationContainerEnum()
self._name = name
self._compound_number = compound_number
self._enumerations = []
def __repr__(self):
return "<Icolos compound: name=%s, compound_number=%s, enumerations=%s>" % (
self.get_name(),
self.get_compound_number(),
len(self.get_enumerations()),
)
def __str__(self):
return self.__repr__()
def get_index_string(self) -> str:
if self.get_compound_number() is not None:
return str(self.get_compound_number())
else:
return ""
def set_name(self, name: str):
self._name = name
def get_name(self) -> str:
return self._name
def set_compound_number(self, compound_number: int):
self._compound_number = compound_number
def get_compound_number(self) -> int:
return self._compound_number
def add_enumeration(self, enumeration: Enumeration, auto_update: bool = True):
"""Add a new enumeration. If "auto_update" is True, the Compound class will be set to "self" and
the enumeration_id will be set to the next free index."""
enumeration = deepcopy(enumeration)
if auto_update:
enumeration.set_compound_object(self)
enumeration.set_enumeration_id(self._get_next_enumeration_id())
self._enumerations.append(enumeration)
def add_enumerations(
self, enumerations: List[Enumeration], auto_update: bool = True
):
"""Add new enumerations. If "auto_update" is True, the Compound class will be set to "self" and
the enumeration_id will be set to the next free index."""
for enumeration in enumerations:
self.add_enumeration(enumeration=enumeration, auto_update=auto_update)
def clear_enumerations(self):
self._enumerations = []
def find_enumeration(self, idx: int):
for enum in self.get_enumerations():
if enum.get_enumeration_id() == idx:
return enum
def get_enumerations(self) -> List[Enumeration]:
return self._enumerations
def _clone(self):
clone = Compound(
name=self.get_name(), compound_number=self.get_compound_number()
)
for enum in self.get_enumerations():
enum = deepcopy(enum)
enum.set_compound_object(compound_object=clone)
clone.add_enumeration(enum, auto_update=False)
return clone
def __iter__(self):
return iter(self._enumerations)
def __copy__(self):
return self._clone()
def __deepcopy__(self, memo):
return self._clone()
def __getitem__(self, key: int) -> Enumeration:
return self._enumerations[key]
def __len__(self) -> int:
return len(self.get_enumerations())
def _get_next_enumeration_id(self):
ids = [enum.get_enumeration_id() for enum in self.get_enumerations()]
if len(ids) == 0:
return 0
else:
return max(ids) + 1
def find_enumeration(self, enumeration_id: int) -> Enumeration:
enum = [
enum
for enum in self.get_enumerations()
if enum.get_enumeration_id() == enumeration_id
]
if len(enum) == 0:
raise IndexError(f"Could not find enumeration with id {enumeration_id}.")
elif len(enum) > 1:
raise ContainerCorrupted(
f"More than one enumeration with id {enumeration_id} found in the same Compound instance (compound_number: {self.get_compound_number()})."
)
return enum[0]
def get_enumeration_ids(self) -> List[int]:
ids = [enum.get_enumeration_id() for enum in self.get_enumerations()]
return ids
def reset_enumeration_ids(self):
for new_id, enum in enumerate(self.get_enumerations()):
enum.set_enumeration_id(enumeration_id=new_id)
def reset_all_ids(self):
self.reset_enumeration_ids()
for enum in self.get_enumerations():
enum.reset_conformer_ids()
def update_all_relations(self):
for enum in self.get_enumerations():
enum.set_compound_object(self)
for conf in enum.get_conformers():
conf.set_enumeration_object(enum)
def empty(self) -> bool:
if len(self.get_enumerations()) == 0:
return True
return False
def unroll_conformers(self) -> List[Conformer]:
conformers = []
for enum in self.get_enumerations():
# guard against empty enumerations that might be used when constructing more complex data flows
if enum.empty():
continue
for conf in enum.get_conformers():
conformers.append(conf)
return conformers
# TODO: Replacing these three functions by a wrapper object
def get_compound_by_id(compounds: List[Compound], id: int) -> Compound:
for compound in compounds:
if compound.get_compound_number() == id:
return compound
raise ValueError(
f"Could not find compound with id {id} in list of length {len(compounds)}."
)
def get_compound_by_name(compounds: List[Compound], name: str) -> Compound:
for compound in compounds:
if compound.get_name() == name:
return compound
raise ValueError(
f"Could not find compound with name {name} in list of length {len(compounds)}."
)
def unroll_conformers(compounds: List[Compound]) -> List[Conformer]:
all_conformers = []
for comp in compounds:
all_conformers = all_conformers + comp.unroll_conformers()
return all_conformers
def unroll_enumerations(compounds: List[Compound]) -> List[Enumeration]:
all_enumerations = []
for comp in compounds:
all_enumerations = all_enumerations + comp.get_enumerations()
return all_enumerations
|
90878
|
from typing import Optional, Dict, Union
import falcon
from falcon_multipart.middleware import MultipartMiddleware
from webtest.http import StopableWSGIServer
from py_fake_server.route import Route
from py_fake_server.endpoint import Endpoint
from py_fake_server.statistic import Statistic
class FakeServer(falcon.API):
def __init__(self, host: str, port: int):
super().__init__(middleware=[MultipartMiddleware()])
self.req_options = self._get_request_options()
self._host: str = host
self._port: int = port
self._server: Optional[StopableWSGIServer] = None
self._endpoints: Dict[Route, Endpoint] = {}
self._statistics: Dict[Route, Statistic] = {}
self.add_sink(self._handle_all)
@staticmethod
def _get_request_options():
options = falcon.RequestOptions()
options.auto_parse_qs_csv = False
return options
def _handle_all(self, request: falcon.Request, response: falcon.Response):
route = Route(request.method, self.base_uri, request.path)
endpoint = self._endpoints.get(route, Endpoint(route))
self._set_response_attributes_from_endpoint(response, endpoint)
self._update_statistics(request, route)
@staticmethod
def _set_response_attributes_from_endpoint(response: falcon.Response, endpoint: Endpoint):
recorded_response = endpoint.pop_response()
response.status = getattr(falcon, f"HTTP_{recorded_response.status}")
response.body = recorded_response.body
if recorded_response.content_type:
response.content_type = recorded_response.content_type
for header_name, header_value in recorded_response.headers.items():
response.set_header(header_name, header_value)
for cookie_name, cookie_value in recorded_response.cookies.items():
response.set_cookie(cookie_name, cookie_value)
def _update_statistics(self, request: falcon.Request, route: Route):
self._statistics.setdefault(route, Statistic(route.method, route.url))
statistic = self._statistics.get(route)
statistic.record_request(request)
@property
def base_uri(self):
return f"http://{self._host}:{self._port}"
def start(self):
self._server = StopableWSGIServer.create(self, host=self._host, port=self._port)
def stop(self):
self._server.shutdown()
def clear(self):
self._endpoints = {}
self._statistics = {}
def on_(self, method: str, url: str) -> Endpoint:
route = Route(method, self.base_uri, url)
new_endpoint = Endpoint(route)
self._endpoints[route] = new_endpoint
return new_endpoint
def was_requested(self, method: str, url: str) -> Statistic:
route = Route(method, self.base_uri, url)
self._statistics.setdefault(route, Statistic(route.method, route.url))
return self._statistics.get(route)
def was_not_requested(self, method: str, url: str) -> Statistic:
route = Route(method, self.base_uri, url)
self._statistics.setdefault(route, Statistic(route.method, route.url))
statistic: Statistic = self._statistics.get(route)
statistic.exactly_0_times()
return statistic
def expect_that(expectation: Union[FakeServer, Statistic]):
if isinstance(expectation, FakeServer):
return expectation
else:
return expectation.check()
|
90884
|
import typing
from PyQt5.QtChart import QValueAxis, QChart, QBarSeries, QBarSet
from ParadoxTrading.Chart.SeriesAbstract import SeriesAbstract
class BarSeries(SeriesAbstract):
def __init__(
self, _name: str,
_x_list: typing.Sequence,
_y_list: typing.Sequence,
_color: typing.Any = None,
_show_value: bool = True,
):
super().__init__(_name, _x_list, _y_list, _color, _show_value)
self.type = SeriesAbstract.BAR
def addSeries(
self, _x2idx: typing.Dict, _idx2x: list, _chart: QChart,
_axis_x: QValueAxis, _axis_y: QValueAxis
):
bar_set = QBarSet(self.name)
tmp_dict = dict(zip(self.x_list, self.y_list))
for k in _idx2x:
if k in tmp_dict.keys():
bar_set.append(tmp_dict[k])
else:
bar_set.append(0)
if self.color is not None:
bar_set.setColor(self.color)
bar_series = QBarSeries()
bar_series.append(bar_set)
_chart.addSeries(bar_series)
_chart.setAxisX(_axis_x, bar_series)
_chart.setAxisY(_axis_y, bar_series)
if self.show_value:
self.createShow()
|
90957
|
from django.conf import settings
# Map of mode -> processor config
# {
# 'js': {
# 'processor': 'damn.processors.ScriptProcessor',
# 'aliases': {},
# },
# }
PROCESSORS = getattr(settings, "DAMN_PROCESSORS", {})
# File extension -> mode name
MODE_MAP = getattr(settings, "DAMN_MODE_MAP", {})
MODE_ORDER = getattr(settings, "DAMN_MODE_ORDER", ["css", "js",])
|
90977
|
def get_user_id(user_or_id):
if type(user_or_id) is str or type(user_or_id) is int:
return str(user_or_id)
elif hasattr(user_or_id, '__getitem__') and 'user_id' in user_or_id:
return str(user_or_id['user_id'])
elif hasattr(user_or_id, 'user_id'):
return str(user_or_id.user_id)
return None
def truncate_list_length(lst, length, *, add_per_element=0):
total_length = 0
for i, elem in enumerate(lst):
total_length += len(elem) + add_per_element
if total_length > length:
return lst[0:i]
return lst
def mention_users(users, max_count, max_length, *, join="\n", prefix=" - "):
trunc_users = users[0:max_count]
trunc_message = '_...and {} more._'
max_trunc_len = len(str(len(users)))
max_message_len = len(trunc_message.format(' ' * max_trunc_len))
final_max_len = max(0, max_length-max_message_len)
user_strs = truncate_list_length(
[f"{prefix}<@{get_user_id(user)}>" for user in trunc_users],
final_max_len,
add_per_element=len(join)
)
trunc_count = (len(users) - len(trunc_users)) + (len(trunc_users) - len(user_strs))
out_msg = join.join(user_strs) + (trunc_message and join + trunc_message.format(trunc_count) or '')
if (len(out_msg) > max_length) and final_max_len >= 3:
return '...'
elif len(out_msg) > max_length:
return ''
else:
return out_msg
def id_from_mention(mention):
try:
return int(mention.replace('<', '').replace('!', '').replace('>', '').replace('@', ''))
except:
return False
|
90979
|
import config
import models.base
from models.task import TaskLock
def main():
app = config.App(config)
with app.app_context():
for lock in models.base.db.session.query(TaskLock).all():
if lock.step is not None:
lock.step.complete('Force release lock')
models.base.db.session.delete(lock)
models.base.db.session.commit()
if __name__ == '__main__':
main()
|
90983
|
from hypothesis import given, example, note
import hypothesis.strategies as st
import hypothesis
import strategies
import warnings
import base64
import json
import six
import blackboxprotobuf
warnings.filterwarnings(
"ignore",
"Call to deprecated create function.*",
)
try:
import Test_pb2
except:
import os
os.system(
"cd tests/payloads; protoc --python_out ../py_test/ Test.proto; cd ../../"
)
import Test_pb2
# TODO: need to find a different way to generate protobuf messages off of this
testMessage_typedef = {
"1": {"type": "double", "name": "testDouble"},
"2": {"type": "float", "name": "testFloat"},
# "4": {"type": "int", "name": "testInt32"},
"8": {"type": "int", "name": "testInt64"},
# "16": {"type": "uint", "name": "testUInt32"},
"32": {"type": "uint", "name": "testUInt64"},
# "64": {"type": "sint", "name": "testSInt32"},
"128": {"type": "sint", "name": "testSInt64"},
"256": {"type": "fixed32", "name": "testFixed32"},
"512": {"type": "fixed64", "name": "testFixed64"},
"1024": {"type": "sfixed32", "name": "testSFixed32"},
"2048": {"type": "sfixed64", "name": "testSFixed64"},
# "4096": {"type": "int", "name": "testBool"},
"8192": {"type": "string", "name": "testString"},
"16384": {"type": "bytes", "name": "testBytes"},
# "32768": {"type": "message", "name": "testEmbed",
# "message_typedef": {
# "3": {"type": "double", "name": "embedDouble"},
# "2": {"type": "bytes", "name": "embedString"}}
# },
# "65536": {"type": "packed_int", "name": "testRepeatedInt32"}
}
# Test decoding from blackboxprotobuf
@given(x=strategies.gen_message_data(testMessage_typedef))
def test_decode(x):
message = Test_pb2.TestMessage()
for key, value in x.items():
setattr(message, key, value)
encoded = message.SerializeToString()
decoded, typedef = blackboxprotobuf.decode_message(encoded, testMessage_typedef)
hypothesis.note("Decoded: %r" % decoded)
for key in decoded.keys():
assert x[key] == decoded[key]
# Test encoding with blackboxprotobuf
@given(x=strategies.gen_message_data(testMessage_typedef))
def test_encode(x):
encoded = blackboxprotobuf.encode_message(x, testMessage_typedef)
message = Test_pb2.TestMessage()
message.ParseFromString(encoded)
for key in x.keys():
assert getattr(message, key) == x[key]
# Try to modify a random key with blackbox and re-encode
# TODO: In the future do more random modifications, like swap the whole value
@given(
x=strategies.gen_message_data(testMessage_typedef),
modify_num=st.sampled_from(sorted(testMessage_typedef.keys())),
)
def test_modify(x, modify_num):
modify_key = testMessage_typedef[modify_num]["name"]
message = Test_pb2.TestMessage()
for key, value in x.items():
setattr(message, key, value)
encoded = message.SerializeToString()
decoded, typedef = blackboxprotobuf.decode_message(encoded, testMessage_typedef)
# eliminate any cases where protobuf defaults out a field
hypothesis.assume(modify_key in decoded)
if isinstance(decoded[modify_key], str):
mod_func = lambda x: "test"
elif six.PY2 and isinstance(decoded[modify_key], unicode):
mod_func = lambda x: six.u("test")
elif isinstance(decoded[modify_key], bytes):
mod_func = lambda x: b"test"
elif isinstance(decoded[modify_key], six.integer_types):
mod_func = lambda x: 10
elif isinstance(decoded[modify_key], float):
mod_func = lambda x: 10
else:
hypothesis.note(
"Failed to modify key: %s (%r)" % (modify_key, type(decoded[modify_key]))
)
assert False
decoded[modify_key] = mod_func(decoded[modify_key])
x[modify_key] = mod_func(x[modify_key])
encoded = blackboxprotobuf.encode_message(decoded, testMessage_typedef)
message = Test_pb2.TestMessage()
message.ParseFromString(encoded)
for key in decoded.keys():
assert getattr(message, key) == x[key]
## Second copies of the above methods that use the protobuf to/from json functions
@given(x=strategies.gen_message_data(testMessage_typedef))
@example(x={"testBytes": b"test123"})
@example(x={"testBytes": b"\x80"})
def test_decode_json(x):
# Test with JSON payload
message = Test_pb2.TestMessage()
for key, value in x.items():
setattr(message, key, value)
encoded = message.SerializeToString()
decoded_json, typedef_json = blackboxprotobuf.protobuf_to_json(
encoded, testMessage_typedef
)
hypothesis.note("Encoded JSON:")
hypothesis.note(decoded_json)
decoded = json.loads(decoded_json)
hypothesis.note("Original value:")
hypothesis.note(x)
hypothesis.note("Decoded valuec:")
hypothesis.note(decoded)
for key in decoded.keys():
if key == "testBytes":
decoded[key] = six.ensure_binary(decoded[key], encoding="latin1")
assert x[key] == decoded[key]
@given(x=strategies.gen_message_data(testMessage_typedef))
@example(x={"testBytes": b"\x80"})
def test_encode_json(x):
# Test with JSON payload
if "testBytes" in x:
x["testBytes"] = x["testBytes"].decode("latin1")
json_str = json.dumps(x)
hypothesis.note("JSON Str Input:")
hypothesis.note(json_str)
hypothesis.note(json.loads(json_str))
encoded = blackboxprotobuf.protobuf_from_json(json_str, testMessage_typedef)
hypothesis.note("BBP decoding:")
test_decode, _ = blackboxprotobuf.decode_message(encoded, testMessage_typedef)
hypothesis.note(test_decode)
message = Test_pb2.TestMessage()
message.ParseFromString(encoded)
hypothesis.note("Message:")
hypothesis.note(message)
for key in x.keys():
hypothesis.note("Message value")
hypothesis.note(type(getattr(message, key)))
hypothesis.note("Original value")
hypothesis.note(type(x[key]))
if key == "testBytes":
x[key] = six.ensure_binary(x[key], encoding="latin1")
assert getattr(message, key) == x[key]
@given(
x=strategies.gen_message_data(testMessage_typedef),
modify_num=st.sampled_from(sorted(testMessage_typedef.keys())),
)
def test_modify_json(x, modify_num):
modify_key = testMessage_typedef[modify_num]["name"]
message = Test_pb2.TestMessage()
for key, value in x.items():
setattr(message, key, value)
encoded = message.SerializeToString()
decoded_json, typedef = blackboxprotobuf.protobuf_to_json(
encoded, testMessage_typedef
)
decoded = json.loads(decoded_json)
# eliminate any cases where protobuf defaults out a field
hypothesis.assume(modify_key in decoded)
if isinstance(decoded[modify_key], str):
mod_func = lambda x: "test"
elif six.PY2 and isinstance(decoded[modify_key], unicode):
mod_func = lambda x: six.u("test")
elif isinstance(decoded[modify_key], bytes):
mod_func = lambda x: b"test"
elif isinstance(decoded[modify_key], six.integer_types):
mod_func = lambda x: 10
elif isinstance(decoded[modify_key], float):
mod_func = lambda x: 10
else:
hypothesis.note(
"Failed to modify key: %s (%r)" % (modify_key, type(decoded[modify_key]))
)
assert False
decoded[modify_key] = mod_func(decoded[modify_key])
x[modify_key] = mod_func(x[modify_key])
encoded = blackboxprotobuf.protobuf_from_json(
json.dumps(decoded), testMessage_typedef
)
message = Test_pb2.TestMessage()
message.ParseFromString(encoded)
for key in decoded.keys():
hypothesis.note("Message value:")
hypothesis.note(type(getattr(message, key)))
hypothesis.note("Orig value:")
hypothesis.note((x[key]))
if key == "testBytes":
x[key] = six.ensure_binary(x[key], encoding="latin1")
assert getattr(message, key) == x[key]
|
90987
|
import sys
import open3d
import numpy as np
import time
import os
from geometric_registration.utils import get_pcd, get_keypts, get_desc, loadlog
import cv2
from functools import partial
def build_correspondence(source_desc, target_desc):
"""
Find the mutually closest point pairs in feature space.
source and target are descriptor for 2 point cloud key points. [5000, 32]
"""
distance = np.sqrt(2 - 2 * (source_desc @ target_desc.T))
source_idx = np.argmin(distance, axis=1)
source_dis = np.min(distance, axis=1)
target_idx = np.argmin(distance, axis=0)
target_dis = np.min(distance, axis=0)
result = []
for i in range(len(source_idx)):
if target_idx[source_idx[i]] == i:
result.append([i, source_idx[i]])
return np.array(result)
def register2Fragments(id1, id2, keyptspath, descpath, resultpath, logpath, gtLog, desc_name, inlier_ratio, distance_threshold):
"""
Register point cloud {id1} and {id2} using the keypts location and descriptors.
"""
cloud_bin_s = f'cloud_bin_{id1}'
cloud_bin_t = f'cloud_bin_{id2}'
write_file = f'{cloud_bin_s}_{cloud_bin_t}.rt.txt'
if os.path.exists(os.path.join(resultpath, write_file)):
return 0, 0, 0
source_keypts = get_keypts(keyptspath, cloud_bin_s)
target_keypts = get_keypts(keyptspath, cloud_bin_t)
source_desc = get_desc(descpath, cloud_bin_s, desc_name)
target_desc = get_desc(descpath, cloud_bin_t, desc_name)
source_desc = np.nan_to_num(source_desc)
target_desc = np.nan_to_num(target_desc)
# Select {num_keypts} points based on the scores. The descriptors and keypts are already sorted based on the detection score.
num_keypts = 250
source_keypts = source_keypts[-num_keypts:, :]
source_desc = source_desc[-num_keypts:, :]
target_keypts = target_keypts[-num_keypts:, :]
target_desc = target_desc[-num_keypts:, :]
# Select {num_keypts} points randomly.
# num_keypts = 250
# source_indices = np.random.choice(range(source_keypts.shape[0]), num_keypts)
# target_indices = np.random.choice(range(target_keypts.shape[0]), num_keypts)
# source_keypts = source_keypts[source_indices, :]
# source_desc = source_desc[source_indices, :]
# target_keypts = target_keypts[target_indices, :]
# target_desc = target_desc[target_indices, :]
key = f'{cloud_bin_s.split("_")[-1]}_{cloud_bin_t.split("_")[-1]}'
if key not in gtLog.keys():
# skip the pairs that have less than 30% overlap.
num_inliers = 0
inlier_ratio = 0
gt_flag = 0
else:
# build correspondence set in feature space.
corr = build_correspondence(source_desc, target_desc)
# calculate the inlier ratio, this is for Feature Matching Recall.
gt_trans = gtLog[key]
frag1 = source_keypts[corr[:, 0]]
frag2_pc = open3d.PointCloud()
frag2_pc.points = open3d.utility.Vector3dVector(target_keypts[corr[:, 1]])
frag2_pc.transform(gt_trans)
frag2 = np.asarray(frag2_pc.points)
distance = np.sqrt(np.sum(np.power(frag1 - frag2, 2), axis=1))
num_inliers = np.sum(distance < distance_threshold)
if num_inliers / len(distance) < inlier_ratio:
print(key)
print("num_corr:", len(corr), "inlier_ratio:", num_inliers / len(distance))
inlier_ratio = num_inliers / len(distance)
gt_flag = 1
# calculate the transformation matrix using RANSAC, this is for Registration Recall.
source_pcd = open3d.PointCloud()
source_pcd.points = open3d.utility.Vector3dVector(source_keypts)
target_pcd = open3d.PointCloud()
target_pcd.points = open3d.utility.Vector3dVector(target_keypts)
s_desc = open3d.registration.Feature()
s_desc.data = source_desc.T
t_desc = open3d.registration.Feature()
t_desc.data = target_desc.T
result = open3d.registration_ransac_based_on_feature_matching(
source_pcd, target_pcd, s_desc, t_desc,
0.05,
open3d.TransformationEstimationPointToPoint(False), 3,
[open3d.CorrespondenceCheckerBasedOnEdgeLength(0.9),
open3d.CorrespondenceCheckerBasedOnDistance(0.05)],
open3d.RANSACConvergenceCriteria(50000, 1000))
# write the transformation matrix into .log file for evaluation.
with open(os.path.join(logpath, f'{desc_name}_{timestr}.log'), 'a+') as f:
trans = result.transformation
trans = np.linalg.inv(trans)
s1 = f'{id1}\t {id2}\t 37\n'
f.write(s1)
f.write(f"{trans[0,0]}\t {trans[0,1]}\t {trans[0,2]}\t {trans[0,3]}\t \n")
f.write(f"{trans[1,0]}\t {trans[1,1]}\t {trans[1,2]}\t {trans[1,3]}\t \n")
f.write(f"{trans[2,0]}\t {trans[2,1]}\t {trans[2,2]}\t {trans[2,3]}\t \n")
f.write(f"{trans[3,0]}\t {trans[3,1]}\t {trans[3,2]}\t {trans[3,3]}\t \n")
# write the result into resultpath so that it can be re-shown.
s = f"{cloud_bin_s}\t{cloud_bin_t}\t{num_inliers}\t{inlier_ratio:.8f}\t{gt_flag}"
with open(os.path.join(resultpath, f'{cloud_bin_s}_{cloud_bin_t}.rt.txt'), 'w+') as f:
f.write(s)
return num_inliers, inlier_ratio, gt_flag
def read_register_result(resultpath, id1, id2):
"""
Read the registration result of {id1} & {id2} from the resultpath
Return values contain the inlier_number, inlier_ratio, flag(indicating whether this pair is a ground truth match).
"""
cloud_bin_s = f'cloud_bin_{id1}'
cloud_bin_t = f'cloud_bin_{id2}'
with open(os.path.join(resultpath, f'{cloud_bin_s}_{cloud_bin_t}.rt.txt'), 'r') as f:
content = f.readlines()
nums = content[0].replace("\n", "").split("\t")[2:5]
return nums
def deal_with_one_scene(inlier_ratio, distance_threshold, scene):
"""
Function to register all the fragments pairs in one scene.
"""
logpath = f"log_result/{scene}-evaluation"
pcdpath = f"../data/3DMatch/fragments/{scene}/"
keyptspath = f"{desc_name}_{timestr}/keypoints/{scene}"
descpath = f"{desc_name}_{timestr}/descriptors/{scene}"
gtpath = f'gt_result/{scene}-evaluation/'
gtLog = loadlog(gtpath)
resultpath = f"pred_result/{scene}/{desc_name}_result_{timestr}"
if not os.path.exists(f"pred_result/{scene}/"):
os.mkdir(f"pred_result/{scene}/")
if not os.path.exists(resultpath):
os.mkdir(resultpath)
if not os.path.exists(logpath):
os.mkdir(logpath)
# register each pair
num_frag = len([filename for filename in os.listdir(pcdpath) if filename.endswith('ply')])
print(f"Start Evaluate Descriptor {desc_name} for {scene}")
start_time = time.time()
for id1 in range(num_frag):
for id2 in range(id1 + 1, num_frag):
register2Fragments(id1, id2, keyptspath, descpath, resultpath, logpath, gtLog, desc_name, inlier_ratio, distance_threshold)
print(f"Finish Evaluation, time: {time.time() - start_time:.2f}s")
if __name__ == '__main__':
scene_list = [
'7-scenes-redkitchen',
'sun3d-home_at-home_at_scan1_2013_jan_1',
'sun3d-home_md-home_md_scan9_2012_sep_30',
'sun3d-hotel_uc-scan3',
'sun3d-hotel_umd-maryland_hotel1',
'sun3d-hotel_umd-maryland_hotel3',
'sun3d-mit_76_studyroom-76-1studyroom2',
'sun3d-mit_lab_hj-lab_hj_tea_nov_2_2012_scan1_erika'
]
# will evaluate the descriptor in `{desc_name}_{timestr}` folder.
desc_name = sys.argv[1]
timestr = sys.argv[2]
# inlier_ratio = float(sys.argv[3])
# distance_threshold = float(sys.argv[4])
inlier_ratio = 0.05 # 5%
distance_threshold = 0.10 # 10cm
# multiprocessing to register each pair in each scene.
# this part is time-consuming
from multiprocessing import Pool
pool = Pool(len(scene_list))
func = partial(deal_with_one_scene, inlier_ratio, distance_threshold)
pool.map(func, scene_list)
pool.close()
pool.join()
# collect all the data and print the results.
inliers_list = []
recall_list = []
inliers_ratio_list = []
pred_match = 0
gt_match = 0
for scene in scene_list:
# evaluate
pcdpath = f"../data/3DMatch/fragments/{scene}/"
resultpath = f"pred_result/{scene}/{desc_name}_result_{timestr}"
num_frag = len([filename for filename in os.listdir(pcdpath) if filename.endswith('ply')])
result = []
for id1 in range(num_frag):
for id2 in range(id1 + 1, num_frag):
line = read_register_result(resultpath, id1, id2)
result.append([int(line[0]), float(line[1]), int(line[2])]) # inlier_number, inlier_ratio, flag.
result = np.array(result)
gt_results = np.sum(result[:, 2] == 1)
pred_results = np.sum(result[:, 1] > inlier_ratio)
pred_match += pred_results
gt_match += gt_results
recall = float(pred_results / gt_results) * 100
print(f"Correct Match {pred_results}, ground truth Match {gt_results}")
print(f"Recall {recall}%")
ave_num_inliers = np.sum(np.where(result[:, 2] == 1, result[:, 0], np.zeros(result.shape[0]))) / pred_results
print(f"Average Num Inliners: {ave_num_inliers}")
ave_inlier_ratio = np.sum(np.where(result[:, 2] == 1, result[:, 1], np.zeros(result.shape[0]))) / pred_results
print(f"Average Num Inliner Ratio: {ave_inlier_ratio}")
recall_list.append(recall)
inliers_list.append(ave_num_inliers)
inliers_ratio_list.append(ave_inlier_ratio)
print("*" * 40)
print(recall_list)
# print(f"True Avarage Recall: {pred_match / gt_match * 100}%")
print(f"Matching Recall Std: {np.std(recall_list)}")
average_recall = sum(recall_list) / len(recall_list)
print(f"All 8 scene, average recall: {average_recall}%")
average_inliers = sum(inliers_list) / len(inliers_list)
print(f"All 8 scene, average num inliers: {average_inliers}")
average_inliers_ratio = sum(inliers_ratio_list) / len(inliers_list)
print(f"All 8 scene, average num inliers ratio: {average_inliers_ratio}")
|
91019
|
import multiprocessing
from fastai.dataset import *
from fasterai.files import *
from pathlib import Path
from itertools import repeat
from PIL import Image
from numpy import ndarray
from datetime import datetime
def generate_image_preprocess_path(source_path: Path, is_x:bool, uid: str):
name = generate_image_preprocess_name(source_path, is_x, uid)
path = source_path.parent/name
return path
def generate_image_preprocess_name(source_path: Path, is_x:bool, uid: str):
return generate_preprocess_name(source_path, is_x, uid)
def transform_image_and_save_new(function, sourcepath: Path, destpath: Path):
try:
with Image.open(sourcepath) as image:
image = function(image)
image.save(destpath)
except Exception as ex:
print(ex)
def transform_images_to_new_directory(function, sourceroot: Path, destroot: Path):
destroot.mkdir(exist_ok=True)
raw_sourcepaths, _, _ = folder_source(sourceroot.parent, sourceroot.name)
#First make the destination directories if they don't already exist- we want the subsequent operations to be threadsafe. Then create
#another generator of destpaths for use in the image generation
generate_folders_for_dest(destpaths=dest_path_generator(sourceroot=sourceroot, raw_sourcepaths=raw_sourcepaths, destroot=destroot))
destpaths = dest_path_generator(sourceroot=sourceroot, raw_sourcepaths=raw_sourcepaths, destroot=destroot)
sourcepaths = (sourceroot.parent/Path(raw_sourcepath) for raw_sourcepath in raw_sourcepaths)
numthreads = multiprocessing.cpu_count()//2
with ThreadPoolExecutor(numthreads) as e:
try:
e.map(partial(transform_image_and_save_new, function), sourcepaths, destpaths)
except Exception as ex:
print(ex)
def resize_image(im: Image, targ: int):
r,c = im.size
ratio = targ/min(r,c)
sz = (scale_to(r, ratio, targ), scale_to(c, ratio, targ))
return im.resize(sz, Image.LINEAR)
def to_grayscale_image(image: Image):
return image.convert('L')
class EasyTensorImage():
def __init__(self, source_tensor: torch.Tensor, ds:FilesDataset):
self.array = self._convert_to_denormed_ndarray(source_tensor, ds=ds)
self.tensor = self._convert_to_denormed_tensor(self.array)
def _convert_to_denormed_ndarray(self, raw_tensor: torch.Tensor, ds:FilesDataset):
raw_array = raw_tensor.clone().data.cpu().numpy()
if raw_array.shape[1] != 3:
array = np.zeros((3, 1, 1))
return array
else:
return ds.denorm(raw_array)[0]
def _convert_to_denormed_tensor(self, denormed_array: ndarray):
return V(np.moveaxis(denormed_array,2,0))
class ModelImageSet():
@staticmethod
def get_list_from_model(ds: FilesDataset, model: nn.Module, idxs:[int]):
image_sets = []
rand = ModelImageSet._is_random_vector(ds[0][0])
training = model.training
model.eval()
for idx in idxs:
x,y=ds[idx]
if rand:
#Making fixed noise, for consistent output
np.random.seed(idx)
orig_tensor = VV(np.random.normal(loc=0.0, scale=1.0, size=(1, x.shape[0],1,1)))
else:
orig_tensor = VV(x[None])
real_tensor = V(y[None])
gen_tensor = model(orig_tensor)
gen_easy = EasyTensorImage(gen_tensor, ds)
orig_easy = EasyTensorImage(orig_tensor, ds)
real_easy = EasyTensorImage(real_tensor, ds)
image_set = ModelImageSet(orig_easy,real_easy,gen_easy)
image_sets.append(image_set)
#reseting noise back to random random
if rand:
np.random.seed()
if training:
model.train()
return image_sets
@staticmethod
def _is_random_vector(x):
return x.shape[0] != 3
def __init__(self, orig: EasyTensorImage, real: EasyTensorImage, gen: EasyTensorImage):
self.orig=orig
self.real=real
self.gen=gen
|
91061
|
from discord.ext import commands
from .utils import utils
import datetime
import asyncio
import discord
import logging
log = logging.getLogger(__name__)
def check_roles(ctx):
if ctx.message.author.id == 1<PASSWORD>:
return True
return utils.check_roles(ctx, "Mod", "admin_roles")
def has_mute_role(ctx):
return utils.redis.smembers("{}:Mod:mute_roles".format(ctx.message.guild.id))
class get_person(commands.MemberConverter):
def __init__(self, *, lower=False):
self.lower = lower
super().__init__()
class Mod(commands.Cog):
"""
A mod tool for Mods.
"""
def __init__(self, bot):
self.bot = bot
self.redis = bot.db.redis
self.bot.say_edit = bot.say
def __local_check(self,ctx):
return utils.is_enable(ctx,"mod") or ctx.message.author.id == self.bot.owner.id
def delete_mine(self,m):
return m.author.id == self.bot.user.id
#########################################
# _____ _ #
# / ____| | | #
# | | | | ___ __ _ _ __ #
# | | | | / _ \ / _` | | '_ \ #
# | |____ | | | __/ | (_| | | | | | #
# \_____| |_| \___| \__,_| |_| |_| #
#########################################
@commands.group(brief="Allow to clean bot itself, have subcommand",invoke_without_command=True)
@commands.check(check_roles)
# async def clean(self, ctx, *, limit:int=100):
async def clean(self, ctx,limit:int = 100,user:commands.MemberConverter or bool = False,):
"""
Is able to clear up it's own messages.
can affect any user's messages by mention it.
"""
if limit > 2000:
return await self.bot.say(ctx,content = "Won't able to delete due to {limit}/2000 message to delete.".format(limit = limit))
if user:
counter = await ctx.message.channel.purge(check=lambda m:m.author.id == user.id,limit=limit)
await self.bot.say(ctx,content = "```py\nI cleared {} posts from {}```".format(len(counter),user.name))
else:
counter = await ctx.message.channel.purge(limit = limit,check=self.delete_mine)
await self.bot.say(ctx,content = "```py\nI cleared {} posts of mine\n```".format(len(counter)))
@clean.command(brief= "Is able to clear a certain role's messages",pass_context=True, invoke_without_command=True)
@commands.check(check_roles)
@commands.bot_has_permissions(manage_messages=True)
async def role(self,ctx,roles : discord.Role,limit : int=100):
"""
<prefix> role <the role> <optional, number of messages, default: 100>
Is able to clear messages of all users who have this role.
"""
def delete_role(m):
return roles.id in [r.id for r in m.author.roles]
counter = await ctx.message.channel.purge(limit=limit,check=delete_role)
await self.bot.say(ctx, content = "```py\nI cleared {} from person who have role of {}\n```".format(len(counter),roles.name))
@clean.command(brief="Is able to clear a certain user's messages",invoke_without_command=True)
@commands.check(check_roles)
@commands.bot_has_permissions(manage_messages=True)
async def person(self,ctx,user: discord.Member,*,limit: int = 100):
"""
<prefix> person <the person> <optional, number of messages, default 100>
Is able to clear the messages of a certain person.
"""
def delete_player(m):
return m.author.id == user.id
counter = await ctx.message.channel.purge(check=delete_player,limit=limit)
await self.bot.say(ctx,content = "```py\nI cleared {} posts from {}```".format(len(counter),user.name))
@clean.command(name = "all",brief="Allow to clear all message", invoke_without_command=True)
@commands.check(check_roles)
@commands.bot_has_permissions(manage_messages=True)
async def _all(self,ctx,*,limit: int=100):
"""
<prefix> all <optional but how many, default 100 message>
Allow to clear all message, nothing can stop it.
"""
counter = await ctx.message.channel.purge(limit =limit)
await self.bot.say(ctx,content = "```py\nI cleared {} posts```".format(len(counter)))
#############################################################
# _ __ _ _ __ ____ #
# | |/ / (_) | | / / | _ \ #
# | ' / _ ___ | | __ / / | |_) | __ _ _ __ #
# | < | | / __| | |/ / / / | _ < / _` | | '_ \ #
# | . \ | | | (__ | < / / | |_) | | (_| | | | | |#
# |_|\_\ |_| \___| |_|\_\ /_/ |____/ \__,_| |_| |_|#
#############################################################
def format_reason(self,ctx,reason):
if reason is None:
reason = "Request by {}".format(ctx.message.author)
else:
reason += " Request by {}".format(ctx.message.author)
return reason
@commands.command(brief="Is able to kick a user")
@commands.check(check_roles)
@commands.bot_has_permissions(kick_members=True)
async def kick(self,ctx,user:discord.Member,*,reason:str = None):
"""
<prefix> kick <user name>
Mentioning is a faster way to get the user.
Is able to kick a user from guild.
"""
await ctx.message.guild.kick(user,reason = self.format_reason(ctx,reason))
await self.bot.say(ctx,content = "I have kicked {}".format(user.name))
@commands.command(brief="Is able to ban a user")
@commands.check(check_roles)
@commands.bot_has_permissions( ban_members=True)
async def ban(self,ctx,user:discord.Member,*,reason:str = None):
"""
<prefix> ban <user name> <optional, number of passed days, for which the user's messages are deleted, default 1>
Mentioning is a faster way to get the user.
Is able to ban a user from the guild, default number of passed days, for which messages are deleted, is 1.
"""
await ctx.message.guild.ban(user,reason = self.format_reason(ctx,reason))
await self.bot.say(ctx,content = "I have banned {}".format(user.name))
@commands.command(brief="Is able to softban a user which is equal to kicking him and deleting his messages")
@commands.check(check_roles)
@commands.bot_has_permissions( ban_members=True)
async def softban(self,ctx,user:discord.Member,*,reason:str = None):
"""
<prefix> softban <user name> <optional, number of passed days, for which the messages are deleted, default is 1>
This is just kicking + deleting messages,
Is able to kick a user and delete his messages.
"""
await ctx.message.guild.ban(user,reason = self.format_reason(ctx,reason))
await ctx.message.guild.unban(user)
await self.bot.say(ctx,content = "I have softbanned {}".format(user.name))
#################################
# _____ _ #
# | __ \ | | #
# | |__) | ___ | | ___ #
# | _ / / _ \ | | / _ \ #
# | | \ \ | (_) | | | | __/ #
# |_| \_\ \___/ |_| \___| #
#################################
@commands.group(name = "role",brief="Multi subcommand related to role",invoke_without_command=True)
@commands.check(check_roles)
@commands.bot_has_permissions(manage_roles=True)
async def _role(self):
"""
A subcommand of it.
do this
<prefix> help role
to see a more infomations of its sub commands
"""
return
@_role.command(brief="Is able to add a role to a user")
@commands.check(check_roles)
@commands.bot_has_permissions(manage_roles=True)
async def add(self,ctx,user:discord.Member,*role:discord.Role):
"""
<prefix> add <user name> <the role>
Is able to add a role to a member, this is useful for people who are on phone.
You can also add multiple roles to a member at the same time.
Note: this is cap-sensitives, if role have cap in, add cap in command
"""
await user.add_roles(*role,reason = "Request by {}".format(ctx.message.author))
await self.bot.say(ctx,content = "Added a role to {}".format(user.name))
@_role.command(brief="Is able to remove a role from a user")
@commands.check(check_roles)
@commands.bot_has_permissions(manage_roles=True)
async def remove(self,ctx,user:discord.Member,*role:discord.Role):
"""
<prefix> remove <user name> <the role>
Is able to remove a role from a member, this is useful for people who are on phone.
You can also remove multiple roles from a member at the same time.
Note: this is cap-sensitives, if role have cap in, add cap in command
"""
await user.remove_roles(*role,reason ="Request by {}".format(ctx.message.author))
await self.bot.say(ctx,content = "Remove role from {}".format(user.name))
##########################
# __ __ _ #
# | \/ |_ _| |_ ___ #
# | |\/| | || | _/ -_)#
# |_| |_|\_,_|\__\___|#
##########################
@commands.command(brief="Mute user")
@commands.check(check_roles)
@commands.check(has_mute_role)
@commands.bot_has_permissions(manage_roles=True)
async def mute(self,ctx,user:discord.Member):
mute_role = await self.redis.smembers("{}:Mod:mute_roles".format(ctx.message.guild.id))
guild = ctx.message.guild
if guild.me.top_role.permissions.manage_roles: # if got Manage roles permission, can grant roles
role = [x for x in guild.roles if str(x.id) in mute_role]
await user.add_roles(*role,reason = "{} requests with mute command".format(ctx.message.author))
await self.bot.say(ctx,content = "Done muting {}".format(user.mention))
@commands.command(brief="Unmute user")
@commands.check(check_roles)
@commands.check(has_mute_role)
@commands.bot_has_permissions(manage_roles=True)
async def unmute(self,ctx,user:discord.Member):
mute_role = await self.redis.smembers("{}:Mod:mute_roles".format(ctx.message.guild.id))
guild = ctx.message.guild
if guild.me.top_role.permissions.manage_roles: # if got Manage roles permission, can grant roles
role = [x for x in guild.roles if str(x.id) in mute_role]
try:
await user.remove_roles(*role,reason = "{} requests with unmute command".format(ctx.message.author))
await self.bot.say(ctx,content = "Done unmuting {}".format(user.mention))
except:
pass
def setup(bot):
bot.add_cog(Mod(bot))
|
91075
|
NUM_VERSION = (0, 1)
VERSION = ".".join(str(nv) for nv in NUM_VERSION)
__version__ = VERSION
try:
from .encbup import (
File,
Key,
Reader,
Writer,
main,
)
except ImportError:
# Ignore if dependencies haven't been satisfied yet (when setting up).
pass
|
91079
|
import torch
from torch import nn
class BaseAligner(nn.Module):
def __init__(self, add_identity = True, add_bias = True):
super(BaseAligner, self).__init__()
self.n_parameters = None
self.add_bias = add_bias
self.add_identity = add_identity
if self.add_bias:
self.bias_slice = None
def get_inverse_matrix(self):
raise NotImplementedError
def get_transformation(self, parameters):
transformation = {"matrix": self.get_matrix(parameters)}
if self.add_bias:
transformation["bias"] = parameters[:, self.bias_slice].contiguous()
transformation["reg"] = torch.norm(
torch.bmm(
transformation["matrix"],
torch.transpose(transformation["matrix"], 1, 2)
) - torch.eye(transformation["matrix"].size(-1)).unsqueeze(0).to(parameters.device),
dim = (1, 2)
)
return transformation
def forward_dim(self, x, parameters, dim, verbose = False):
transformation = self.get_transformation(parameters)
x = torch.matmul(transformation["matrix"].unsqueeze(dim), x)
if self.add_bias:
x = x + transformation["bias"].unsqueeze(2).unsqueeze(dim)
return x, transformation["reg"]
def forward(self, x, parameters, verbose = False):
transformation = self.get_transformation(parameters)
x = torch.bmm(transformation["matrix"], x)
if self.add_bias:
x = x + transformation["bias"].unsqueeze(2)
return x, transformation["reg"]
class AffineAligner(BaseAligner):
"""Affine transformation"""
def __init__(self, add_identity = True, add_bias = True, dim = 3):
super(AffineAligner, self).__init__(add_identity, add_bias)
self.dim = dim
self.n_parameters = dim**2
if self.add_bias:
self.bias_slice = slice(-self.dim, None)
self.n_parameters += dim
def get_inverse_matrix(self, parameters):
return torch.inverse(self.get_matrix(parameters))
def get_matrix(self, parameters):
batch_size, n_parameters = parameters.size()
assert n_parameters == self.n_parameters
matrix = parameters[:, :self.dim*self.dim].contiguous()
matrix = matrix.view(batch_size, self.dim, self.dim).contiguous()
if self.add_identity:
matrix = matrix + torch.eye(self.dim).unsqueeze(0).to(matrix.device)
return matrix
class dAligner(BaseAligner):
def __init__(self, add_identity = True, add_bias = True):
super(dAligner, self).__init__(add_identity, add_bias)
self.n_parameters = 1
if self.add_bias:
self.bias_slice = slice(1, 4)
self.n_parameters += 3
def get_matrix(self, parameters):
d = parameters[:, 0].contiguous()
if self.add_identity:
d = 1. + d
return torch.eye(3).unsqueeze(0).to(parameters.device) * d.unsqueeze(-1).unsqueeze(-1)
class DAligner(BaseAligner):
def __init__(self, add_identity = True, add_bias = True):
super(DAligner, self).__init__(add_identity, add_bias)
self.n_parameters = 3
if self.add_bias:
self.bias_slice = slice(3, 6)
self.n_parameters += 3
def get_matrix(self, parameters):
D = parameters[:, :3].contiguous()
if self.add_identity:
D = 1. + D
return torch.eye(3).unsqueeze(0).to(parameters.device) * D.unsqueeze(-1)
class D6Aligner(BaseAligner):
def __init__(self, add_identity = True, add_bias = True):
super(D6Aligner, self).__init__(add_identity, add_bias)
self.n_parameters = 6
self.epsilon = 10**(-10)
if self.add_bias:
self.bias_slice = slice(6, 9)
self.n_parameters += 3
def get_inverse_matrix(self, parameters):
return self.get_rotation(parameters).permute(0, 2, 1)
def get_matrix(self, parameters):
return self.get_rotation(parameters)
def get_rotation(self, parameters):
# batch*n
def normalize_vector( v, return_mag =False):
batch=v.shape[0]
v_mag = torch.sqrt(v.pow(2).sum(1))# batch
v_mag = torch.max(v_mag, torch.autograd.Variable(torch.FloatTensor([1e-8]).cuda()))
v_mag = v_mag.view(batch,1).expand(batch,v.shape[1])
v = v/v_mag
if(return_mag==True):
return v, v_mag[:,0]
else:
return v
# u, v batch*n
def cross_product( u, v):
batch = u.shape[0]
i = u[:,1]*v[:,2] - u[:,2]*v[:,1]
j = u[:,2]*v[:,0] - u[:,0]*v[:,2]
k = u[:,0]*v[:,1] - u[:,1]*v[:,0]
out = torch.cat((i.view(batch,1), j.view(batch,1), k.view(batch,1)),1)#batch*3
return out
x_raw = parameters[:, 0:3].contiguous()#batch*3
y_raw = parameters[:, 3:6].contiguous()#batch*3
if self.add_identity:
x_raw[:, 0] = 1. + x_raw[:, 0]
y_raw[:, 1] = 1. + y_raw[:, 1]
x = normalize_vector(x_raw) #batch*3
z = cross_product(x,y_raw) #batch*3
z = normalize_vector(z)#batch*3
y = cross_product(z,x)#batch*3
x = x.view(-1,3,1)
y = y.view(-1,3,1)
z = z.view(-1,3,1)
return torch.cat((x,y,z), 2) #batch*3*3
class QAligner(BaseAligner):
def __init__(self, add_identity = True, add_bias = True):
super(QAligner, self).__init__(add_identity, add_bias)
self.n_parameters = 4
self.epsilon = 10**(-10)
if self.add_bias:
self.bias_slice = slice(4, 7)
self.n_parameters += 3
def get_inverse_matrix(self, parameters):
return self.get_rotation(parameters).permute(0, 2, 1)
def get_matrix(self, parameters):
return self.get_rotation(parameters)
def get_rotation(self, parameters):
matrix_params = parameters[:, :4].contiguous()
if self.add_identity:
matrix_params[:, 0] = 1. + matrix_params[:, 0]
norm = torch.sqrt((matrix_params * matrix_params).sum(axis = -1))
matrix_params = (matrix_params / (norm.unsqueeze(-1) + self.epsilon)).permute(1, 0)
t2 = matrix_params[0]*matrix_params[1]
t3 = matrix_params[0]*matrix_params[2]
t4 = matrix_params[0]*matrix_params[3]
t5 = -matrix_params[1]*matrix_params[1]
t6 = matrix_params[1]*matrix_params[2]
t7 = matrix_params[1]*matrix_params[3]
t8 = -matrix_params[2]*matrix_params[2]
t9 = matrix_params[2]*matrix_params[3]
t10 = -matrix_params[3]*matrix_params[3]
return torch.eye(3).unsqueeze(0).to(matrix_params.device) + 2 * torch.cat([
torch.cat([
(t8 + t10).unsqueeze(0),
(t6 - t4).unsqueeze(0),
(t3 + t7).unsqueeze(0)
], 0).unsqueeze(0),
torch.cat([
(t4 + t6).unsqueeze(0),
(t5 + t10).unsqueeze(0),
(t9 - t2).unsqueeze(0)
], 0).unsqueeze(0),
torch.cat([
(t7 - t3).unsqueeze(0),
(t2 + t9).unsqueeze(0),
(t5 + t8).unsqueeze(0)
], 0).unsqueeze(0),
], 0).permute(2, 1, 0)
class QzAligner(QAligner):
def get_rotation(self, parameters):
parameters[:, 1] = 0.
parameters[:, 3] = 0.
return super(QzAligner, self).get_rotation(parameters)
class QdAligner(QAligner):
def __init__(self, *args, **kwargs):
super(QdAligner, self).__init__(*args, **kwargs)
self.n_parameters += 1
def get_d(self, parameters):
d = parameters[:, -1].contiguous()
if self.add_identity:
d = 1. + d
return d.unsqueeze(-1).unsqueeze(-1)
def get_inverse_matrix(self, parameters):
return (1. / self.get_d(parameters)) * self.get_rotation(parameters).permute(0, 2, 1)
def get_matrix(self, parameters):
return self.get_d(parameters) * self.get_rotation(parameters)
class dQAligner(QdAligner):
def __init__(self, *args, **kwargs):
super(dQAligner, self).__init__(*args, **kwargs)
class QDAligner(QAligner):
def __init__(self, *args, **kwargs):
super(QDAligner, self).__init__(*args, **kwargs)
self.n_parameters += 3
def get_D(self, parameters):
D = parameters[:, -3:].contiguous()
if self.add_identity:
D = 1. + D
return D
def get_inverse_matrix(self, parameters):
return (1. / self.get_D(parameters).unsqueeze(-1)) * self.get_rotation(parameters).permute(0, 2, 1)
def get_matrix(self, parameters):
return self.get_D(parameters).unsqueeze(-2) * self.get_rotation(parameters)
class DQAligner(QDAligner):
def __init__(self, *args, **kwargs):
super(DQAligner, self).__init__(*args, **kwargs)
def get_inverse_matrix(self, parameters):
return (1. / self.get_D(parameters).unsqueeze(-2)) * self.get_rotation(parameters).permute(0, 2, 1)
def get_matrix(self, parameters):
return self.get_D(parameters).unsqueeze(-1) * self.get_rotation(parameters)
class DQDAligner(QAligner):
def __init__(self, *args, **kwargs):
super(DQDAligner, self).__init__(*args, **kwargs)
self.n_parameters += 6
def get_Dr(self, parameters):
D = parameters[:, -3:].contiguous()
if self.add_identity:
D = 1. + D
return D
def get_Dl(self, parameters):
D = parameters[:, -6:-3].contiguous()
if self.add_identity:
D = 1. + D
return D
def get_inverse_matrix(self, parameters):
return (1. / self.get_Dl(parameters).unsqueeze(-2)) * (self.get_rotation(parameters).permute(0, 2, 1) * (1. / self.get_Dr(parameters).unsqueeze(-1)))
def get_matrix(self, parameters):
return self.get_Dl(parameters).unsqueeze(-1) * (self.get_rotation(parameters) * self.get_Dr(parameters).unsqueeze(-2))
|
91080
|
import sys
import os
import numpy as np
import cv2
from PIL import Image, ImageDraw, ImageFont
import acl
path = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(path, ".."))
sys.path.append(os.path.join(path, "../../../../common/"))
from acllite_resource import AclLiteResource
from acllite_model import AclLiteModel
from acllite_image import AclLiteImage
from acllite_imageproc import AclLiteImageProc
import constants as const
import utils
currentPath = os.path.join(path, "..")
OUTPUT_DIR = os.path.join(currentPath, 'out')
MODEL_PATH = os.path.join(currentPath,"model/vgg16_cat_dog.om")
MODEL_WIDTH = 224
MODEL_HEIGHT = 256
CLS = ['dog', 'cat']
class Classify(object):
"""
Class for portrait segmentation
"""
def __init__(self, model_path, model_width, model_height):
self._model_path = model_path
self._model_width = model_width
self._model_height = model_height
self._img_width = 0
self._img_height = 0
self._model = None
self._dvpp = None
def init(self):
"""
Initialize
"""
self._dvpp = AclLiteImageProc()
# Load model
self._model = AclLiteModel(self._model_path)
return const.SUCCESS
@utils.display_time
def pre_process(self, image):
"""
preprocess
"""
image_dvpp = image.copy_to_dvpp()
yuv_image = self._dvpp.jpegd(image_dvpp)
resized_image = self._dvpp.resize(yuv_image,
self._model_width, self._model_height)
return resized_image
@utils.display_time
def inference(self, input_data):
"""
model inference
"""
return self._model.execute(input_data)
@utils.display_time
def post_process(self, infer_output, image_file):
"""
Post-processing, analysis of inference results
"""
output_path = os.path.join(OUTPUT_DIR, os.path.basename(image_file))
infer_result = infer_output[0]
vals = infer_result.flatten()
pre_index = vals.argsort()[-1]
origin_img = Image.open(image_file)
draw = ImageDraw.Draw(origin_img)
font = ImageFont.truetype("/usr/share/fonts/truetype/dejavu/DejaVuSans-Bold.ttf", size=20)
draw.text((10, 50), CLS[pre_index], font=font, fill=255)
origin_img.save(output_path)
def main():
"""
main
"""
image_dir = os.path.join(currentPath, "data" )
if not os.path.exists(OUTPUT_DIR):
os.mkdir(OUTPUT_DIR)
acl_resource = AclLiteResource()
acl_resource.init()
classify = Classify(MODEL_PATH, MODEL_WIDTH, MODEL_HEIGHT)
ret = classify.init()
utils.check_ret("Classify init ", ret)
images_list = [os.path.join(image_dir, img)
for img in os.listdir(image_dir)
if os.path.splitext(img)[1] in const.IMG_EXT]
for image_file in images_list:
print('=== ' + os.path.basename(image_file) + '===')
# read image
image = AclLiteImage(image_file)
# Preprocess the picture
resized_image = classify.pre_process(image)
# Inferencecd
result = classify.inference([resized_image, ])
# # Post-processing
classify.post_process(result, image_file)
if __name__ == '__main__':
main()
|
91085
|
import json
import multiprocessing
from multiprocessing import Process
import os
import random
import time
import traceback
import zmq
import zmq.decorators as zmqd
from aser.database.kg_connection import ASERKGConnection
from aser.server.utils import *
from aser.extract.aser_extractor import SeedRuleASERExtractor
from aser.utils.config import ASERCmd
class ASERServer(object):
def __init__(self, opt):
self.opt = opt
self.port = opt.port
self.n_concurrent_back_socks = opt.n_concurrent_back_socks
self.n_workers = opt.n_workers
self.aser_sink = None
self.aser_db = None
self.aser_workers = []
self.run()
def run(self):
self._run()
def close(self):
for corenlp in self.corenlp_servers:
corenlp.close()
self.aser_sink.close()
self.aser_db.close()
for worker in self.aser_workers:
worker.close()
@zmqd.context()
@zmqd.socket(zmq.PULL)
@zmqd.socket(zmq.PAIR)
@zmqd.socket(zmq.PUSH)
def _run(self, ctx, client_msg_receiver, sink_addr_receiver, db_sender):
total_st = time.time()
client_msg_receiver.bind("tcp://*:%d" % self.port)
sink_addr_receiver_addr = sockets_ipc_bind(sink_addr_receiver)
self.aser_sink = ASERSink(self.opt, sink_addr_receiver_addr)
self.aser_sink.start()
sink_receiver_addr = sink_addr_receiver.recv().decode("utf-8")
db_senders = []
db_addr_list = []
for _ in range(self.n_concurrent_back_socks):
_socket = ctx.socket(zmq.PUSH)
addr = sockets_ipc_bind(_socket)
db_senders.append(_socket)
db_addr_list.append(addr)
self.aser_db = ASERDataBase(self.opt, db_addr_list, sink_receiver_addr)
self.aser_db.start()
worker_senders = []
worker_addr_list = []
for _ in range(self.n_concurrent_back_socks):
_socket = ctx.socket(zmq.PUSH)
addr = sockets_ipc_bind(_socket)
worker_senders.append(_socket)
worker_addr_list.append(addr)
for i in range(self.n_workers):
self.aser_workers.append(
ASERWorker(self.opt, i, worker_addr_list, sink_receiver_addr)
)
self.aser_workers[i].start()
print("Loading Server Finished in {:.4f} s".format(time.time() - total_st))
worker_sender_id = -1
db_sender_id = -1
cnt = 0
st = time.time()
while True:
try:
client_msg = client_msg_receiver.recv_multipart()
client_id, req_id, cmd, data = client_msg
if cmd == ASERCmd.extract_events:
worker_sender_id, worker_sender = random.choice(
[(i, sender) for i, sender in enumerate(worker_senders)
if i != worker_sender_id])
worker_sender.send_multipart(client_msg)
else:
db_sender_id, db_sender = random.choice(
[(i, sender) for i, sender in enumerate(db_senders)
if i != db_sender_id])
db_sender.send_multipart(client_msg)
cnt += 1
# print("sender speed: {:.4f} / call".format((time.time() - st) / cnt))
print("Sender cnt {}".format(cnt))
except Exception:
print(traceback.format_exc())
class ASERDataBase(Process):
def __init__(self, opt, db_sender_addr_list, sink_addr):
super().__init__()
self.db_sender_addr_list = db_sender_addr_list
self.sink_addr = sink_addr
print("Connect to the KG...")
st = time.time()
kg_dir = opt.kg_dir
self.ASER_KG = ASERKGConnection(db_path=os.path.join(kg_dir, "KG.db"), mode="cache")
print("Connect to the KG finished in {:.4f} s".format(time.time() - st))
def run(self):
self._run()
def close(self):
self.ASER_KG.close()
self.terminate()
self.join()
@zmqd.context()
@zmqd.socket(zmq.PUSH)
def _run(self, ctx, sink):
receiver_sockets = []
poller = zmq.Poller()
for db_sender_addr in self.db_sender_addr_list:
_socket = ctx.socket(zmq.PULL)
_socket.connect(db_sender_addr)
receiver_sockets.append(_socket)
poller.register(_socket)
sink.connect(self.sink_addr)
print("ASER DB started")
cnt = 0
st = time.time()
while True:
try:
events = dict(poller.poll())
for sock_idx, sock in enumerate(receiver_sockets):
if sock in events:
client_id, req_id, cmd, data = sock.recv_multipart()
# print("DB received msg ({}, {}, {}, {})".format(
# client_id.decode("utf-8"), req_id.decode("utf-8"),
# cmd.decode("utf-8"), data.decode("utf-8")
# ))
if cmd == ASERCmd.exact_match_event:
ret_data = self.handle_exact_match_event(data)
elif cmd == ASERCmd.exact_match_relation:
ret_data = self.handle_exact_match_relation(data)
elif cmd == ASERCmd.fetch_related_events:
ret_data = self.handle_fetch_related_events(data)
else:
raise RuntimeError
sink.send_multipart([client_id, req_id, cmd, ret_data])
cnt += 1
print("DB cnt {}".format(cnt))
# print("DB speed: {:.4f} / call".format((time.time() - st) / cnt))
except Exception:
print(traceback.format_exc())
def handle_exact_match_event(self, data):
eid = data.decode("utf-8")
matched_event = self.ASER_KG.get_exact_match_eventuality(eid)
if matched_event:
ret_data = json.dumps(matched_event.encode(encoding=None)).encode("utf-8")
else:
ret_data = json.dumps(ASERCmd.none).encode(encoding="utf-8")
return ret_data
def handle_exact_match_relation(self, data):
eid1, eid2 = data.decode("utf-8").split("$")
matched_relation = self.ASER_KG.get_exact_match_relation([eid1, eid2])[0]
print(matched_relation)
if matched_relation:
ret_data = json.dumps(matched_relation.encode(encoding=None)).encode("utf-8")
else:
ret_data = json.dumps(ASERCmd.none).encode(encoding="utf-8")
return ret_data
def handle_fetch_related_events(self, data):
h_eid = data.decode("utf-8")
related_events = self.ASER_KG.get_related_eventualities(h_eid)
rst = [(event.encode(encoding=None), relation.encode(encoding=None))
for event, relation in related_events]
ret_data = json.dumps(rst).encode("utf-8")
return ret_data
class ASERWorker(Process):
def __init__(self, opt, id, worker_addr_list, sink_addr):
super().__init__()
self.worker_id = id
self.worker_addr_list = worker_addr_list
self.sink_addr = sink_addr
self.eventuality_extractor = SeedRuleASERExtractor(
corenlp_path = opt.corenlp_path,
corenlp_port=opt.base_corenlp_port + id)
self.is_ready = multiprocessing.Event()
def run(self):
self._run()
def close(self):
self.is_ready.clear()
self.eventuality_extractor.close()
self.terminate()
self.join()
@zmqd.context()
@zmqd.socket(zmq.PUSH)
def _run(self, ctx, sink):
print("ASER Worker %d started" % self.worker_id)
receiver_sockets = []
poller = zmq.Poller()
for worker_addr in self.worker_addr_list:
_socket = ctx.socket(zmq.PULL)
_socket.connect(worker_addr)
receiver_sockets.append(_socket)
poller.register(_socket)
sink.connect(self.sink_addr)
while True:
try:
events = dict(poller.poll())
for sock_idx, sock in enumerate(receiver_sockets):
if sock in events:
client_id, req_id, cmd, data = sock.recv_multipart()
print("Worker {} received msg ({}, {}, {}, {})".format(
self.worker_id,
client_id.decode("utf-8"), req_id.decode("utf-8"),
cmd.decode("utf-8"), data.decode("utf-8")
))
if cmd == ASERCmd.extract_events:
ret_data = self.handle_extract_events(data)
sink.send_multipart([client_id, req_id, cmd, ret_data])
else:
raise RuntimeError
except Exception:
print(traceback.format_exc())
def handle_extract_events(self, data):
sentence = data.decode("utf-8")
eventualities_list = self.eventuality_extractor.extract_eventualities_from_text(sentence)
print(eventualities_list)
rst = [[e.encode(encoding=None) for e in eventualities] for eventualities in eventualities_list]
ret_data = json.dumps(rst).encode("utf-8")
return ret_data
class ASERSink(Process):
def __init__(self, args, sink_addr_receiver_addr):
super().__init__()
self.port_out = args.port_out
self.sink_addr_receiver_addr = sink_addr_receiver_addr
def run(self):
self._run()
@zmqd.context()
@zmqd.socket(zmq.PAIR)
@zmqd.socket(zmq.PULL)
@zmqd.socket(zmq.PUB)
def _run(self, _, addr_sender, receiver, sender):
addr_sender.connect(self.sink_addr_receiver_addr)
receiver_addr = sockets_ipc_bind(receiver).encode("utf-8")
addr_sender.send(receiver_addr)
sender.bind("tcp://*:%d" % self.port_out)
print("ASER Sink started")
cnt = 0
while True:
try:
msg = receiver.recv_multipart()
sender.send_multipart(msg)
cnt += 1
print("Sink cnt {}".format(cnt))
except Exception:
print(traceback.format_exc())
|
91139
|
import zmq
import sys
import threading
import time
from random import randint, random
def tprint(msg):
"""like print, but won't get newlines confused with multiple threads"""
sys.stdout.write(msg + '\n')
sys.stdout.flush()
class ServerTask(threading.Thread):
"""ServerTask"""
def __init__(self):
threading.Thread.__init__ (self)
def run(self):
context = zmq.Context()
frontend = context.socket(zmq.ROUTER)
frontend.bind('tcp://*:5570')
backend = context.socket(zmq.DEALER)
backend.bind('inproc://backend')
workers = []
for i in range(5):
worker = ServerWorker(context)
worker.start()
workers.append(worker)
zmq.proxy(frontend, backend)
frontend.close()
backend.close()
context.term()
class ServerWorker(threading.Thread):
"""ServerWorker"""
def __init__(self, context):
threading.Thread.__init__ (self)
self.context = context
def run(self):
worker = self.context.socket(zmq.DEALER)
worker.connect('inproc://backend')
tprint('Worker started')
while True:
ident, msg = worker.recv_multipart()
tprint('Worker received %s from %s' % (msg, ident))
replies = randint(0,4)
for i in range(replies):
time.sleep(1. / (randint(1,10)))
worker.send_multipart([ident, msg])
worker.close()
def main():
"""main function"""
server = ServerTask()
server.start()
server.join()
if __name__ == "__main__":
main()
|
91146
|
import pytest
from django.conf import settings
from requests import put
from grandchallenge.uploads.models import UserUpload
from tests.algorithms_tests.factories import AlgorithmImageFactory
from tests.factories import UserFactory
from tests.verification_tests.factories import VerificationFactory
@pytest.mark.django_db
def test_user_upload_flow():
# Create User Upload
u = UserFactory()
filename = "foo.bat"
# Create User Upload File
upload = UserUpload.objects.create(creator=u, filename=filename)
assert upload.status == UserUpload.StatusChoices.INITIALIZED
assert upload.s3_upload_id != ""
# Get the presigned url
presigned_url = upload.generate_presigned_url(part_number=0)
assert presigned_url != ""
# PUT the file
response = put(presigned_url, data=b"123")
assert response.status_code == 200
assert response.headers["ETag"] != ""
# Finish the upload
upload.complete_multipart_upload(
parts=[{"ETag": response.headers["ETag"], "PartNumber": 0}]
)
assert upload.status == UserUpload.StatusChoices.COMPLETED
def test_create_multipart_upload():
user = UserFactory.build()
upload = UserUpload(creator=user)
assert upload.s3_upload_id == ""
assert upload.status == UserUpload.StatusChoices.PENDING
upload.create_multipart_upload()
assert upload.s3_upload_id != ""
assert upload.status == UserUpload.StatusChoices.INITIALIZED
assert upload.key == f"uploads/{user.pk}/{upload.pk}"
def test_generate_presigned_urls():
upload = UserUpload(creator=UserFactory.build())
upload.create_multipart_upload()
presigned_urls = upload.generate_presigned_urls(part_numbers=[1, 13, 26])
assert set(presigned_urls.keys()) == {"1", "13", "26"}
assert presigned_urls["1"].startswith(
f"{settings.AWS_S3_ENDPOINT_URL}/{upload.bucket}/{upload.key}?uploadId={upload.s3_upload_id}&partNumber=1&"
)
assert presigned_urls["13"].startswith(
f"{settings.AWS_S3_ENDPOINT_URL}/{upload.bucket}/{upload.key}?uploadId={upload.s3_upload_id}&partNumber=13&"
)
assert presigned_urls["26"].startswith(
f"{settings.AWS_S3_ENDPOINT_URL}/{upload.bucket}/{upload.key}?uploadId={upload.s3_upload_id}&partNumber=26&"
)
def test_abort_multipart_upload():
upload = UserUpload(creator=UserFactory.build())
upload.create_multipart_upload()
assert upload.status == UserUpload.StatusChoices.INITIALIZED
assert upload.s3_upload_id != ""
upload.abort_multipart_upload()
assert upload.status == UserUpload.StatusChoices.ABORTED
assert upload.s3_upload_id == ""
def test_list_parts():
upload = UserUpload(creator=UserFactory.build())
upload.create_multipart_upload()
url = upload.generate_presigned_url(part_number=1)
response = put(url, data=b"123")
parts = upload.list_parts()
assert len(parts) == 1
assert parts[0]["ETag"] == response.headers["ETag"]
assert parts[0]["Size"] == 3
assert parts[0]["PartNumber"] == 1
def test_list_parts_empty():
upload = UserUpload(creator=UserFactory.build())
upload.create_multipart_upload()
parts = upload.list_parts()
assert parts == []
def test_list_parts_truncation():
upload = UserUpload(creator=UserFactory.build())
upload.create_multipart_upload()
presigned_urls = upload.generate_presigned_urls(part_numbers=[1, 2])
responses = {}
for part_number, url in presigned_urls.items():
responses[part_number] = put(url, data=b"123")
upload.LIST_MAX_ITEMS = 1
parts = upload.list_parts()
assert len(parts) == 2
assert parts[0]["ETag"] == responses["1"].headers["ETag"]
assert parts[0]["Size"] == 3
assert parts[0]["PartNumber"] == 1
assert parts[1]["ETag"] == responses["2"].headers["ETag"]
assert parts[1]["Size"] == 3
assert parts[1]["PartNumber"] == 2
@pytest.mark.django_db
def test_upload_copy():
user = UserFactory()
upload = UserUpload.objects.create(creator=user, filename="test.tar.gz")
presigned_urls = upload.generate_presigned_urls(part_numbers=[1])
response = put(presigned_urls["1"], data=b"123")
upload.complete_multipart_upload(
parts=[{"ETag": response.headers["ETag"], "PartNumber": 1}]
)
upload.save()
ai = AlgorithmImageFactory(creator=user, image=None)
assert not ai.image
upload.copy_object(to_field=ai.image)
assert (
ai.image.name
== f"docker/images/algorithms/algorithmimage/{ai.pk}/test.tar.gz"
)
assert ai.image.storage.exists(name=ai.image.name)
with ai.image.open() as f:
assert f.read() == b"123"
@pytest.mark.django_db
def test_file_deleted_with_object():
u = UserFactory()
upload = UserUpload.objects.create(creator=u)
presigned_urls = upload.generate_presigned_urls(part_numbers=[1])
response = put(presigned_urls["1"], data=b"123")
upload.complete_multipart_upload(
parts=[{"ETag": response.headers["ETag"], "PartNumber": 1}]
)
upload.save()
bucket = upload.bucket
key = upload.key
assert upload._client.head_object(Bucket=bucket, Key=key)
UserUpload.objects.filter(pk=upload.pk).delete()
with pytest.raises(upload._client.exceptions.ClientError):
upload._client.head_object(Bucket=bucket, Key=key)
@pytest.mark.django_db
def test_incomplete_deleted_with_object():
u = UserFactory()
upload = UserUpload.objects.create(creator=u)
bucket = upload.bucket
key = upload.key
assert "Uploads" in upload._client.list_multipart_uploads(
Bucket=bucket, Prefix=key
)
UserUpload.objects.filter(pk=upload.pk).delete()
assert "Uploads" not in upload._client.list_multipart_uploads(
Bucket=bucket, Prefix=key
)
def test_size_of_creators_completed_uploads():
def upload_files_for_user(user, n=1):
for _ in range(n):
ul = UserUpload(creator=user)
ul.create_multipart_upload()
presigned_urls = ul.generate_presigned_urls(part_numbers=[1])
response = put(presigned_urls["1"], data=b"123")
ul.complete_multipart_upload(
parts=[{"ETag": response.headers["ETag"], "PartNumber": 1}]
)
u = UserFactory.build(pk=42)
upload = UserUpload(creator=u)
upload.LIST_MAX_ITEMS = 1
initial_upload_size = upload.size_of_creators_completed_uploads
assert type(initial_upload_size) == int
upload_files_for_user(user=u, n=upload.LIST_MAX_ITEMS + 1)
# another users files should not be considered
upload_files_for_user(user=UserFactory.build(pk=u.pk + 1))
assert (
upload.size_of_creators_completed_uploads
== initial_upload_size + (upload.LIST_MAX_ITEMS + 1) * 3
)
def test_size_incomplete():
u = UserFactory.build(pk=42)
upload = UserUpload(creator=u)
upload.create_multipart_upload()
upload.LIST_MAX_ITEMS = 1
assert upload.size == 0
parts = [1, 2]
presigned_urls = upload.generate_presigned_urls(part_numbers=parts)
for part in parts:
put(presigned_urls[str(part)], data=b"123")
assert upload.size == (upload.LIST_MAX_ITEMS + 1) * 3
def test_size_complete():
u = UserFactory.build(pk=42)
upload = UserUpload(creator=u)
upload.create_multipart_upload()
assert upload.size == 0
presigned_urls = upload.generate_presigned_urls(part_numbers=[1])
response = put(presigned_urls["1"], data=b"123")
upload.complete_multipart_upload(
parts=[{"ETag": response.headers["ETag"], "PartNumber": 1}]
)
assert upload.size == 3
@pytest.mark.django_db
def test_can_upload_more_unverified(settings):
upload = UserUpload.objects.create(creator=UserFactory())
presigned_urls = upload.generate_presigned_urls(part_numbers=[1])
put(presigned_urls["1"], data=b"123")
assert upload.can_upload_more is True
settings.UPLOADS_MAX_SIZE_UNVERIFIED = 2
assert upload.can_upload_more is False
@pytest.mark.django_db
def test_can_upload_more_verified(settings):
user = UserFactory()
upload = UserUpload.objects.create(creator=user)
presigned_urls = upload.generate_presigned_urls(part_numbers=[1])
put(presigned_urls["1"], data=b"123")
settings.UPLOADS_MAX_SIZE_UNVERIFIED = 2
assert upload.can_upload_more is False
VerificationFactory(user=user, is_verified=True)
assert upload.can_upload_more is True
settings.UPLOADS_MAX_SIZE_VERIFIED = 2
assert upload.can_upload_more is False
@pytest.mark.django_db
def test_can_upload_more_other_objects(settings):
user = UserFactory()
new_upload = UserUpload.objects.create(creator=user)
settings.UPLOADS_MAX_SIZE_UNVERIFIED = 2
assert new_upload.can_upload_more is True
upload = UserUpload.objects.create(creator=user)
presigned_urls = upload.generate_presigned_urls(part_numbers=[1])
response = put(presigned_urls["1"], data=b"123")
upload.complete_multipart_upload(
parts=[{"ETag": response.headers["ETag"], "PartNumber": 1}]
)
assert upload.can_upload_more is False
assert new_upload.can_upload_more is False
|
91166
|
from vms.models import BackupDefine
from api.decorators import api_view, request_data, setting_required
from api.permissions import IsAdminOrReadOnly
from api.utils.db import get_object
from api.vm.utils import get_vm, get_vms
from api.vm.snapshot.utils import get_disk_id, filter_disk_id
from api.vm.backup.utils import output_extended_backup_count
from api.vm.backup.vm_define_backup import BackupDefineView
from api.vm.backup.vm_backup import VmBackup
from api.vm.backup.vm_backup_list import VmBackupList
__all__ = ('vm_define_backup_list_all', 'vm_define_backup_list', 'vm_define_backup', 'vm_backup_list', 'vm_backup')
#: vm_status: GET:
@api_view(('GET',))
@request_data(permissions=(IsAdminOrReadOnly,)) # get_vms() = IsVmOwner
@setting_required('VMS_VM_BACKUP_ENABLED')
def vm_define_backup_list_all(request, data=None):
"""
List (:http:get:`GET </vm/define/backup>`) all backup definitions for all VMs.
.. http:get:: /vm/define/backup
:DC-bound?:
* |dc-yes|
:Permissions:
* |VmOwner|
:Asynchronous?:
* |async-no|
:arg data.full: Return list of objects with all backup definition details (default: false)
:type data.full: boolean
:arg data.extended: Include total number of backups for each backup definition (default: false)
:type data.extended: boolean
:arg data.order_by: :ref:`Available fields for sorting <order_by>`: ``name``, ``disk_id``, ``hostname``, \
``created`` (default: ``hostname,-created``)
:type data.order_by: string
:status 200: SUCCESS
:status 403: Forbidden
"""
extra = output_extended_backup_count(request, data)
# TODO: check indexes
bkp_define = BackupDefine.objects.select_related('vm', 'vm__dc', 'node', 'zpool', 'periodic_task',
'periodic_task__crontab')\
.filter(vm__in=get_vms(request)).order_by(*BackupDefineView.get_order_by(data))
if extra:
bkp_define = bkp_define.extra(extra)
return BackupDefineView(request, data=data).get(None, bkp_define, many=True, extended=bool(extra))
#: vm_status: GET:
@api_view(('GET',))
@request_data(permissions=(IsAdminOrReadOnly,)) # get_vm() = IsVmOwner
@setting_required('VMS_VM_BACKUP_ENABLED')
def vm_define_backup_list(request, hostname_or_uuid, data=None):
"""
List (:http:get:`GET </vm/(hostname_or_uuid)/define/backup>`) all VM backup definitions.
.. http:get:: /vm/(hostname_or_uuid)/define/backup
:DC-bound?:
* |dc-yes|
:Permissions:
* |VmOwner|
:Asynchronous?:
* |async-no|
:arg hostname_or_uuid: **required** - Server hostname or uuid
:type hostname_or_uuid: string
:arg data.full: Return list of objects with all backup definition details (default: false)
:type data.full: boolean
:arg data.disk_id: Filter by disk number/ID
:type data.disk_id: integer
:arg data.extended: Include total number of backups for each backup definition (default: false)
:type data.extended: boolean
:arg data.order_by: :ref:`Available fields for sorting <order_by>`: ``name``, ``disk_id``, ``created`` \
(default: ``-created``)
:type data.order_by: string
:status 200: SUCCESS
:status 403: Forbidden
:status 404: VM not found
:status 412: Invalid disk_id
"""
vm = get_vm(request, hostname_or_uuid, exists_ok=True, noexists_fail=True, sr=('node', 'owner'))
query_filter = {'vm': vm}
query_filter = filter_disk_id(vm, query_filter, data)
extra = output_extended_backup_count(request, data)
# TODO: check indexes
bkp_define = BackupDefine.objects.select_related('vm', 'vm__dc', 'node', 'zpool', 'periodic_task',
'periodic_task__crontab')\
.filter(**query_filter).order_by(*BackupDefineView.get_order_by(data))
if extra:
bkp_define = bkp_define.extra(extra)
return BackupDefineView(request, data=data).get(vm, bkp_define, many=True, extended=bool(extra))
#: vm_status: GET:
#: vm_status: POST: running, stopped, stopping
#: vm_status: PUT: running, stopped, stopping
#: vm_status:DELETE: running, stopped, stopping
@api_view(('GET', 'POST', 'PUT', 'DELETE'))
@request_data(permissions=(IsAdminOrReadOnly,)) # get_vm() = IsVmOwner
@setting_required('VMS_VM_BACKUP_ENABLED')
def vm_define_backup(request, hostname_or_uuid, bkpdef, data=None):
"""
Show (:http:get:`GET </vm/(hostname_or_uuid)/define/backup/(bkpdef)>`),
create (:http:post:`POST </vm/(hostname_or_uuid)/define/backup/(bkpdef)>`),
remove (:http:delete:`DELETE </vm/(hostname_or_uuid)/define/backup/(bkpdef)>`) or
update (:http:put:`PUT </vm/(hostname_or_uuid)/define/backup/(bkpdef)>`)
a VM backup definition and schedule.
.. http:get:: /vm/(hostname_or_uuid)/define/backup/(bkpdef)
:DC-bound?:
* |dc-yes|
:Permissions:
* |VmOwner|
:Asynchronous?:
* |async-no|
:arg hostname_or_uuid: **required** - Server hostname or uuid
:type hostname_or_uuid: string
:arg bkpdef: **required** - Backup definition name
:type bkpdef: string
:arg data.disk_id: **required** - Disk number/ID (default: 1)
:type data.disk_id: integer
:arg data.extended: Include total number of backups (default: false)
:type data.extended: boolean
:status 200: SUCCESS
:status 403: Forbidden
:status 404: VM not found / Backup definition not found
:status 412: Invalid disk_id
.. http:post:: /vm/(hostname_or_uuid)/define/backup/(bkpdef)
:DC-bound?:
* |dc-yes|
:Permissions:
* |Admin|
:Asynchronous?:
* |async-no|
:arg hostname_or_uuid: **required** - Server hostname or uuid
:type hostname_or_uuid: string
:arg bkpdef: **required** - Backup definition name (predefined: hourly, daily, weekly, monthly)
:type bkpdef: string
:arg data.disk_id: **required** - Disk number/ID (default: 1)
:type data.disk_id: integer
:arg data.type: **required** - Backup type (1 - dataset, 2 - file) (default: 1)
:type: data.type: integer
:arg data.node: **required** - Name of the backup node
:type data.node: string
:arg data.zpool: **required** - The zpool used on the backup node (default: zones)
:type data.zpool: string
:arg data.schedule: **required** - Schedule in UTC CRON format (e.g. 30 4 * * 6)
:type data.schedule: string
:arg data.retention: **required** - Maximum number of backups to keep
:type data.retention: integer
:arg data.active: Enable or disable backup schedule (default: true)
:type data.active: boolean
:arg data.compression: Backup file compression algorithm (0 - none, 1 - gzip, 2 - bzip2, 3 - xz) (default: 0)
:type data.compression: integer
:arg data.bwlimit: Transfer rate limit in bytes (default: null => no limit)
:type data.bwlimit: integer
:arg data.desc: Backup definition description
:type data.desc: string
:arg data.fsfreeze: Whether to send filesystem freeze command to QEMU agent socket before \
creating backup snapshot (requires QEMU Guest Agent) (default: false)
:type data.fsfreeze: boolean
:status 200: SUCCESS
:status 400: FAILURE
:status 403: Forbidden
:status 404: VM not found
:status 406: Backup definition already exists
:status 412: Invalid disk_id
:status 423: Node is not operational / VM is not operational
.. http:put:: /vm/(hostname_or_uuid)/define/backup/(bkpdef)
:DC-bound?:
* |dc-yes|
:Permissions:
* |Admin|
:Asynchronous?:
* |async-no|
:arg hostname_or_uuid: **required** - Server hostname or uuid
:type hostname_or_uuid: string
:arg bkpdef: **required** - Backup definition name
:type bkpdef: string
:arg data.disk_id: **required** - Disk number/ID (default: 1)
:type data.disk_id: integer
:arg data.schedule: Schedule in UTC CRON format (e.g. 30 4 * * 6)
:type data.schedule: string
:arg data.retention: Maximum number of backups to keep
:type data.retention: integer
:arg data.active: Enable or disable backup schedule
:type data.active: boolean
:arg data.compression: Backup file compression algorithm (0 - none, 1 - gzip, 2 - bzip2, 3 - xz)
:type data.compression: integer
:arg data.bwlimit: Transfer rate limit in bytes
:type data.bwlimit: integer
:arg data.desc: Backup definition description
:type data.desc: string
:status 200: SUCCESS
:status 400: FAILURE
:status 403: Forbidden
:status 404: VM not found / Backup definition not found
:status 412: Invalid disk_id
:status 423: Node is not operational / VM is not operational
.. http:delete:: /vm/(hostname_or_uuid)/define/backup/(bkpdef)
:DC-bound?:
* |dc-yes|
:Permissions:
* |Admin|
:Asynchronous?:
* |async-no|
:arg hostname_or_uuid: **required** - Server hostname or uuid
:type hostname_or_uuid: string
:arg bkpdef: **required** - Backup definition name
:type bkpdef: string
:arg data.disk_id: **required** - Disk number/ID (default: 1)
:type data.disk_id: integer
:status 200: SUCCESS
:status 400: FAILURE
:status 403: Forbidden
:status 404: VM not found / Backup definition not found
:status 412: Invalid disk_id
:status 423: Node is not operational / VM is not operational
"""
vm = get_vm(request, hostname_or_uuid, exists_ok=True, noexists_fail=True)
disk_id, real_disk_id, zfs_filesystem = get_disk_id(request, vm, data)
extra = output_extended_backup_count(request, data)
define = get_object(request, BackupDefine, {'name': bkpdef, 'vm': vm, 'disk_id': real_disk_id},
sr=('vm', 'vm__dc', 'node', 'periodic_task', 'periodic_task__crontab'), extra={'select': extra})
return BackupDefineView(request, data=data).response(vm, define, extended=bool(extra))
#: vm_status: GET:
@api_view(('GET', 'DELETE'))
@request_data(permissions=(IsAdminOrReadOnly,)) # get_vm() = IsVmOwner
@setting_required('VMS_VM_BACKUP_ENABLED')
def vm_backup_list(request, hostname_or_uuid, data=None):
"""
List (:http:get:`GET </vm/(hostname_or_uuid)/backup>`) all VM backups.
Delete (:http:delete:`DELETE </vm/(hostname_or_uuid)/backup>`) VM backups specified by the list (data.bkpnames).
.. http:get:: /vm/(hostname_or_uuid)/backup
:DC-bound?:
* |dc-yes|
:Permissions:
* |VmOwner|
:Asynchronous?:
* |async-no|
:arg hostname_or_uuid: **required** - Original server hostname or uuid
:type hostname_or_uuid: string
:arg data.full: Return list of objects with all backup details (default: false)
:type data.full: boolean
:arg data.disk_id: Filter by original disk number/ID
:type data.disk_id: integer
:arg data.define: Filter by backup definition
:type data.define: string
:arg data.order_by: :ref:`Available fields for sorting <order_by>`: ``name``, ``disk_id``, \
``size``, ``time``, ``created`` (default: ``-created``)
:type data.order_by: string
:status 200: SUCCESS
:status 403: Forbidden
:status 412: Invalid disk_id
.. http:delete:: /vm/(hostname_or_uuid)/backup
:DC-bound?:
* |dc-yes|
:Permissions:
* |VmOwner|
:Asynchronous?:
* |async-yes|
:arg hostname_or_uuid: **required** - Original server hostname or uuid
:type hostname_or_uuid: string
:arg data.bkpnames: **required** - List of backups to be deleted
:type data.bkpnames: array
:status 200: SUCCESS
:status 403: Forbidden
:status 404: Backup not found
:status 412: Invalid bkpnames
:status 417: VM backup status is not OK
:status 423: Node is not operational / VM is not operational
"""
return VmBackupList(request, hostname_or_uuid, data).response()
#: vm_status: GET:
#: vm_status: POST: running, stopped, stopping
#: vm_status: PUT: stopped
#: vm_status:DELETE: running, stopped, stopping
@api_view(('GET', 'POST', 'PUT', 'DELETE'))
@request_data(permissions=(IsAdminOrReadOnly,)) # get_vm() = IsVmOwner
@setting_required('VMS_VM_BACKUP_ENABLED')
def vm_backup(request, hostname_or_uuid, bkpname, data=None):
"""
Show (:http:get:`GET </vm/(hostname_or_uuid)/backup/(bkpname)>`),
create (:http:post:`POST </vm/(hostname_or_uuid)/backup/(bkpdef)>`),
delete (:http:delete:`DELETE </vm/(hostname_or_uuid)/backup/(bkpname)>`) or
restore (:http:put:`PUT </vm/(hostname_or_uuid)/backup/(bkpname)>`)
a backup of VM's disk.
.. http:get:: /vm/(hostname_or_uuid)/backup/(bkpname)
:DC-bound?:
* |dc-yes|
:Permissions:
* |VmOwner|
:Asynchronous?:
* |async-no|
:arg hostname_or_uuid: **required** - Original server hostname or uuid
:type hostname_or_uuid: string
:arg bkpname: **required** - Backup name
:type bkpname: string
:arg data.disk_id: **required** - Original disk number/ID (default: 1)
:type data.disk_id: integer
:status 200: SUCCESS
:status 403: Forbidden
:status 404: Backup not found
:status 412: Invalid disk_id
.. http:post:: /vm/(hostname_or_uuid)/backup/(bkpdef)
:DC-bound?:
* |dc-yes|
:Permissions:
* |Admin|
:Asynchronous?:
* |async-yes|
:arg hostname_or_uuid: **required** - Server hostname or uuid
:type hostname_or_uuid: string
:arg bkpname.bkpdef: **required** - Backup definition name
:type bkpname.bkpdef: string
:arg data.disk_id: **required** - Disk number/ID (default: 1)
:type data.disk_id: integer
:arg data.note: Backup comment
:type data.note: string
:status 200: SUCCESS
:status 201: PENDING
:status 400: FAILURE
:status 403: Forbidden
:status 404: VM not found
:status 406: Backup already exists
:status 412: Invalid disk_id
:status 417: DC backup size limit reached
:status 423: Node is not operational / VM is not operational
:status 428: VM is not installed
.. http:put:: /vm/(hostname_or_uuid)/backup/(bkpname)
.. warning:: A backup restore will restore disk data from the backup into target disk; \
All data created after the backup (including all existing snapshots) on target server will be lost!
:DC-bound?:
* |dc-yes|
:Permissions:
* |Admin|
:Asynchronous?:
* |async-yes| - Restore backup
:arg hostname_or_uuid: **required** - Original server hostname or uuid
:type hostname_or_uuid: string
:arg bkpname: **required** - Backup name
:type bkpname: string
:arg data.disk_id: Original disk number/ID (default: 1)
:type data.disk_id: integer
:arg data.target_hostname_or_uuid: **required** - Target server hostname or uuid
:type data.target_hostname_or_uuid: string
:arg data.target_disk_id: **required** - Target disk number/ID
:type data.target_disk_id: integer
:arg data.force: Force restore and delete existing snapshots and backups (default: true)
:type data.force: boolean
:status 200: SUCCESS
:status 201: PENDING
:status 400: FAILURE
:status 403: Forbidden
:status 404: Backup not found
:status 409: VM has pending tasks
:status 412: Invalid disk_id / Invalid target_disk_id
:status 417: VM backup status is not OK / VM has snapshots (force=false)
:status 423: Node is not operational / VM is not operational / VM is not stopped / VM is locked or has slave VMs
:status 428: VM brand mismatch / Disk size mismatch / Not enough free space on target storage
.. http:put:: /vm/(hostname_or_uuid)/backup/(bkpname)
:DC-bound?:
* |dc-yes|
:Permissions:
* |Admin|
:Asynchronous?:
* |async-no| - Update backup note
:arg hostname_or_uuid: **required** - Original server hostname or uuid
:type hostname_or_uuid: string
:arg bkpname: **required** - Backup name
:type bkpname: string
:arg data.note: **required** - Backup comment (change note instead of restore if specified)
:type data.note: string
:status 200: SUCCESS
:status 400: FAILURE
:status 403: Forbidden
:status 404: Backup not found
.. http:delete:: /vm/(hostname_or_uuid)/backup/(bkpname)
:DC-bound?:
* |dc-yes|
:Permissions:
* |Admin|
:Asynchronous?:
* |async-yes|
:arg hostname_or_uuid: **required** - Original server hostname or uuid
:type hostname_or_uuid: string
:arg bkpname: **required** - Backup name
:type bkpname: string
:arg data.disk_id: **required** - Original disk number/ID (default: 1)
:type data.disk_id: integer
:status 200: SUCCESS
:status 201: PENDING
:status 400: FAILURE
:status 403: Forbidden
:status 404: Backup not found
:status 412: Invalid disk_id
:status 417: VM backup status is not OK
:status 423: Node is not operational / VM is not operational
"""
return VmBackup(request, hostname_or_uuid, bkpname, data).response()
|
91187
|
import numpy as np
import pandas as pd
from pathlib import Path
from typing import Dict, List, Union
from collections import OrderedDict
from pathos.multiprocessing import ThreadPool as Pool
from tqdm import tqdm
from src.utils import remap_label, get_type_instances
from .metrics import PQ, AJI, AJI_plus, DICE2, split_and_merge
class Benchmarker:
def compute_metrics(
self,
true_pred: List[np.ndarray]
) -> Dict[str, float]:
"""
Computes metrics for one (inst_map, gt_mask) pair.
If GT does not contain any nuclear objects, returns None
Args:
-----------
true_pred (List[np.ndarray]):
Ground truth annotations in true_pred[1] and
corresponding predicted instance map in true_pred[2]
Returns:
-----------
A Dict[str, float] of the metrics
"""
name = true_pred[0]
true = true_pred[1]
pred = true_pred[2]
# Skip empty GTs
if len(np.unique(true)) > 1:
true = remap_label(true)
pred = remap_label(pred)
pq = PQ(true, pred)
aji = AJI(true, pred)
aji_p = AJI_plus(true, pred)
dice2 = DICE2(true, pred)
splits, merges = split_and_merge(true, pred)
result = {
"name":name,
"AJI": aji,
"AJI_plus": aji_p,
"DICE2": dice2,
"PQ": pq["pq"],
"SQ": pq["sq"],
"DQ": pq["dq"],
"inst_recall": pq["recall"],
"inst_precision": pq["precision"],
"splits": splits,
"merges": merges
}
return result
def benchmark_insts(
self,
inst_maps: Dict[str, np.ndarray],
gt_masks: Dict[str, np.ndarray],
pattern_list: List[str]=None,
save_dir: Union[str, Path]=None,
prefix: str=""
) -> pd.DataFrame:
"""
Run benchmarking metrics for instance maps for all of the files
in the dataset. Note that the inst_maps and gt_masks need to
share exact same keys and be sorted so that they align when
computing metrics.
Args:
-----------
inst_maps (OrderedDict[str, np.ndarray]):
A dict of file_name:inst_map key vals in order
gt_masks (OrderedDict[str, np.ndarray]):
A dict of file_name:gt_inst_map key vals in order
pattern_list (List[str], default=None):
A list of patterns contained in the gt_mask and inst_map
names. Averages for the masks containing these patterns
will be added to the result df.
save_dir (str or Path):
directory where to save the result .csv
prefix (str, default=""):
adds a prefix to the .csv file name
Returns:
----------
a pandas dataframe of the metrics. Samples are rows and
metrics are columns:
_____________________
|sample|PQ|SQ|DQ|AJI|
|img1 |.5|.4|.6|.6 |
|img2 |.5|.4|.6|.6 |
"""
assert isinstance(inst_maps, dict), (
f"inst_maps: {type(inst_maps)} is not a dict of inst_maps"
)
assert isinstance(gt_masks, dict), (
f"inst_maps: {type(gt_masks)} is not a dict of inst_maps"
)
# Sort by file name
inst_maps = OrderedDict(sorted(inst_maps.items()))
gt_masks = OrderedDict(sorted(gt_masks.items()))
assert inst_maps.keys() == gt_masks.keys(), (
"inst_maps have different names as gt masks. insts: ",
f"{inst_maps.keys()}. gt's: {gt_masks.keys()}"
)
masks = list(
zip(inst_maps.keys(), gt_masks.values(), inst_maps.values())
)
metrics = []
with Pool() as pool:
for x in tqdm(
pool.imap_unordered(self.compute_metrics, masks),
total=len(masks),
desc="Runnning metrics"
):
metrics.append(x)
# drop Nones if no nuclei are found in an image
metrics = [metric for metric in metrics if metric]
score_df = pd.DataFrame.from_records(metrics)
score_df = score_df.set_index("name").sort_index()
score_df.loc["averages_for_the_set"] = score_df.mean(axis=0)
# Add averages to the df of files which contain patterns
if pattern_list is not None:
pattern_avgs = {
f"{p}_avg": score_df[score_df.index.str.contains(f"{p}")].mean(axis=0)
for p in pattern_list
}
score_df = pd.concat(
[score_df, pd.DataFrame(pattern_avgs).transpose()]
)
# Save results to .csv
if save_dir is not None:
save_dir = Path(save_dir)
score_df.to_csv(Path(save_dir / f"{prefix}_inst_benchmark.csv"))
return score_df
def benchmark_per_type(
self,
inst_maps: Dict[str, np.ndarray],
type_maps: Dict[str, np.ndarray],
gt_mask_insts: Dict[str, np.ndarray],
gt_mask_types: Dict[str, np.ndarray],
classes: Dict[str, int],
pattern_list: List[str]=None,
save_dir: Union[str, Path]=None,
prefix: str=""
) -> pd.DataFrame:
"""
Run benchmarking metrics per class type for all of the files in
the dataset. Note that the inst_maps and gt_masks need to share
exact same keys and be sorted so that they align when computing
metrics.
Args:
-----------
inst_maps (Dict[str, np.ndarray]):
A dict of file_name:inst_map key vals in order
type_maps (Dict[str, np.ndarray]):
A dict of file_name:panoptic_map key vals in order
gt_masks_insts (Dict[str, np.ndarray]):
A dict of file_name:gt_inst_map key vals in order
gt_masks_types (Dict[str, np.ndarray]):
A dict of file_name:gt_panoptic_map key vals in order
classes (Dict[str, int]):
The class dict e.g. {bg: 0, immune: 1, epithel: 2}.
background must be 0 class
pattern_list (List[str], default=None):
A list of patterns contained in the gt_mask and inst_map
names. Averages for the masks containing these patterns
will be added to the result df.
save_dir (str or Path):
directory where to save the result .csv
prefix (str, default=""):
adds a prefix to the .csv file name
Returns:
-----------
a pandas dataframe of the metrics. Samples are rows and
metrics are columns:
__________________________
|sample |PQ|SQ|DQ|AJI|
|img1_type1 |.5|.4|.6|.6 |
|img1_type2 |.5|.4|.6|.6 |
|img2_type1 |.5|.4|.6|.6 |
|img2_type2 |.5|.4|.6|.6 |
"""
assert isinstance(inst_maps, dict), (
f"inst_maps: {type(inst_maps)} is not a dict of inst_maps"
)
assert isinstance(type_maps, dict), (
f"inst_maps: {type(type_maps)} is not a dict of panoptic_maps"
)
assert isinstance(gt_mask_insts, dict), (
f"inst_maps: {type(gt_mask_insts)} is not a dict of inst_maps"
)
assert isinstance(gt_mask_types, dict), (
f"inst_maps: {type(gt_mask_types)} is not a dict of inst_maps"
)
# sort by name
inst_maps = OrderedDict(sorted(inst_maps.items()))
type_maps = OrderedDict(sorted(type_maps.items()))
gt_mask_insts = OrderedDict(sorted(gt_mask_insts.items()))
gt_mask_types = OrderedDict(sorted(gt_mask_types.items()))
assert inst_maps.keys() == gt_mask_insts.keys(), (
"inst_maps have different names as gt masks. insts: ",
f"{inst_maps.keys()}. gt's: {gt_mask_insts.keys()}"
)
# Loop masks per class
df_total = pd.DataFrame()
for c, ix in list(classes.items())[1:]: # skip bg
gts_per_class = [
get_type_instances(i, t, ix)
for i, t in zip(gt_mask_insts.values(), gt_mask_types.values())
]
insts_per_class = [
get_type_instances(i, t, ix)
for i, t in zip(inst_maps.values(), type_maps.values())
]
masks = list(zip(inst_maps.keys(), gts_per_class, insts_per_class))
metrics = []
with Pool() as pool:
for x in tqdm(
pool.imap_unordered(self.compute_metrics, masks),
total=len(masks), desc=f"Running metrics for {c}"
):
metrics.append(x)
# drop Nones if no classes are found in an image
metrics = [metric for metric in metrics if metric]
score_df = pd.DataFrame.from_records(metrics)
score_df = score_df.set_index("name").sort_index()
score_df.loc[f"{c}_avg_for_the_set"] = score_df.mean(axis=0)
# Add averages to the df of files which contain patterns i
# in the pattern list
if pattern_list is not None:
pattern_avgs = {
f"{c}_{p}_avg": score_df[score_df.index.str.contains(f"{p}")].mean(axis=0)
for p in pattern_list
}
score_df = pd.concat([score_df, pd.DataFrame(pattern_avgs).transpose()])
df_total = pd.concat([df_total, score_df])
# Save results to .csv
if save_dir is not None:
save_dir = Path(save_dir)
df_total.to_csv(Path(save_dir / f"{prefix}_type_benchmark.csv"))
return df_total
|
91202
|
import sys
import contextlib
@contextlib.contextmanager
def stdout_redirect(stringIO):
sys.stdout = stringIO
try:
yield stringIO
finally:
sys.stdout = sys.__stdout__
stringIO.seek(0)
|
91205
|
from abc import ABC, abstractmethod
from typing import AnyStr, List, Dict, Optional, Union
from functools import partial
class AdvancedFeaturizer(ABC):
pass
class SimpleFeaturizer(ABC):
id = None
parameters = []
# def __init__(self, files: Union[str, List[str]], series: Union[str, List[str], None]):
#
# # If a string was provided as the files param (i.e. referencing a single file), convert it to a list
# if isinstance(files, str):
# files = [files]
#
# # If a string was provided as the series param (i.e. referencing a single series), convert it to a list
# if isinstance(series, str):
# series = [series]
#
# # Set files & series as instance properties
# self.files = files
# self.series = series
def __init__(self):
if self.id is None:
raise Exception("Featurizer has no ID.")
if self.name is None:
raise Exception(f"Featurizer {self.id} has no name.")
def getFeaturizeFunction(self, params):
# TODO(gus): Wrap iteratively in try/except?
print(f"Params pre-process: {params}")
preparedParams = self.prepareParams(params)
print(f"Params post-process: {preparedParams}")
return partial(self.featurize, params=preparedParams)
def getFields(self):
"""
Produces and returns a list of field dicts ready to be marshalled to JSON and supplied to Webix as a JavaScript
array representing the featurizer's form fields.
"""
return [field.getField() for field in self.parameters]
@abstractmethod
def featurize(self, data, params={}):
"""
This will be the function which, given a Pandas DataFrame and parameters dict, featurizes and returns the
feature's scalar value output.
:return: Scalar value output of featurization
"""
raise NotImplementedError("Error! Required featurizer method 'featurize' not implemented.")
def prepareParams(self, params):
"""
Returns a processed version of params given web form input.
"""
newParams = {}
for p in self.parameters:
try:
val = params[p.id]
if p.data_type == 'boolean':
if val is None or val=='':
val = p.default
elif not val or val==0 or val=='0' or val=='false' or val=='False':
val = False
else:
val = True
else:
if val is None or val=='':
val = p.default
newParams[p.id] = val
except:
newParams[p.id] = p.default
return newParams
class FeaturizerParameter():
def __init__(self,
id,
name,
description='',
data_type='float',
form_field_type='input',
options=[],
required=True,
default=None,
):
"""
:param id: Parameter ID
:param name: Name of the parameter
:param description: Description of the parameter with usage instructions for users
:param data_type: Indicates the data type of the parameter. Possible values are 'string', 'int', and 'float'.
:param form_field_type: Indicates the type of form field to present to the user. Possible values are 'input',
'dropdown', 'textarea', 'checkbox', 'radio', and 'slider'. Default value is 'input'. If data_type is 'boolean',
then form_field_type and options are ignored.
:param options: Dict (option value=>display value) of options available for user to choose from, if the
form_field_type is 'dropdown'. If data_type is 'boolean', then form_field_type and options are ignored.
:param required: Boolean indicating whether the parameter must be specified by the user.
:param default: Specifies the default value to use for the parameter if a value is not specified by the user.
:return: None
"""
# Validation dictionaries
valid_data_types = ['string', 'int', 'float', 'boolean']
valid_form_field_types = ['input', 'dropdown', 'textarea', 'checkbox', 'radio', 'slider']
# Validate properties
if data_type not in valid_data_types:
raise Exception(f"Invalid data_type provided: {data_type}")
if form_field_type not in valid_form_field_types:
raise Exception(f"Invalid form_field_type provided: {form_field_type}")
if not len(name) > 0:
raise Exception(f"Empty parameter name provided: {name}")
# Boolean-specific properties enforcement
if data_type == 'boolean':
if required:
form_field_type = 'checkbox'
options = []
else:
form_field_type = 'dropdown'
options = {'': '', 'true': 'True', 'false': 'False'}
if not default:
default = False
else:
default = True
# TODO: TEMP!!!!!
default = 'default'
# Set properties
self.id = id
self.name = name
self.description = description
self.data_type = data_type
self.form_field_type = form_field_type
self.options = options
self.required = required
self.default = default
def getField(self):
"""
This function produces and returns a dict ready to be marshalled to JSON and
then supplied to Webix as a JavaScript object representing a form field.
"""
field = {
'name': self.id,
'id': self.id,
#'labelWidth': 230,
'labelWidth': 'auto',
'labelAlign': 'left',
'inputAlign': 'right',
'label': self.name + ('*' if self.required else ''),
'tooltip': self.description + (f"\n\nDefault value: {self.default}" if self.default is not None else ''),
}
if self.form_field_type in ('radio', 'textarea', 'checkbox'):
field['view'] = self.form_field_type
if self.default is not None:
field['value'] = self.default
elif self.form_field_type == 'dropdown':
field['view'] = 'select'
elif self.form_field_type == 'input':
field['view'] = 'text'
elif self.form_field_type == 'slider':
raise NotImplementedError("Slider is not implemented.")
else:
raise Exception("Form field type not recognized.")
if self.form_field_type in ('dropdown', 'radio'):
field['options'] = [{'id': key, 'value': value} for key, value in self.options.items()]
return field
|
91244
|
import socket
import struct
from threading import Thread
from python_qt_binding.QtCore import QMutex, QMutexLocker, QTimer
from qt_gui.plugin import Plugin
from pymavlink.mavutil import mavlink_connection
from pymavlink.dialects.v10.ardupilotmega \
import MAVLink_global_position_int_message
from pymavlink.dialects.v10.ardupilotmega \
import MAVLink_param_value_message
from uctf import GROUND_CONTROL_PORT_BLUE
from uctf import GROUND_CONTROL_PORT_GOLD
from uctf.widget import Widget
class View(Plugin):
def __init__(self, context):
super(View, self).__init__(context)
self.setObjectName('UCTFView')
self._widget = Widget()
if context.serial_number() > 1:
self._widget.setWindowTitle(
self._widget.windowTitle() +
(' (%d)' % context.serial_number()))
context.add_widget(self._widget)
self._update_queue = []
self._mutex = QMutex()
self.subscribers = {}
self._vehicle_types = {}
self._received_msg_types = []
self._subscribe('blue', GROUND_CONTROL_PORT_BLUE)
self._subscribe('gold', GROUND_CONTROL_PORT_GOLD)
self._timer = QTimer()
self._timer.timeout.connect(self._update_model)
self._timer.start(40)
def _subscribe(self, color, port):
device = 'udpin:localhost:%d' % port
print('listening to %s' % device)
conn = mavlink_connection(device)
def run():
while True:
try:
msg = conn.recv_match(blocking=True, timeout=1.0)
except socket.error:
return
if msg:
self._message_callback(color, msg)
thread = Thread(target=run, name=color)
thread.start()
self.subscribers[color] = (conn, thread)
def _message_callback(self, color, msg):
# MAVLink_altitude_message
# MAVLink_attitude_message
# MAVLink_attitude_quaternion_message
# MAVLink_attitude_target_message
# MAVLink_battery_status_message
# MAVLink_extended_sys_state_message
# MAVLink_global_position_int_message
# MAVLink_gps_raw_int_message
# MAVLink_heartbeat_message
# MAVLink_highres_imu_message
# MAVLink_home_position_message
# MAVLink_local_position_ned_message
# MAVLink_nav_controller_output_message
# MAVLink_sys_status_message
# MAVLink_vfr_hud_message
# MAVLink_vibration_message
# MAVLink_wind_cov_message
msg_type_name = type(msg).__name__
if msg_type_name not in self._received_msg_types:
# print(msg_type_name)
self._received_msg_types.append(msg_type_name)
if isinstance(msg, MAVLink_param_value_message):
if msg.param_id == 'MAV_TYPE':
src_system = msg.get_srcSystem()
mav_type = self._float_to_int(msg.param_value)
print('vehicle #%d is of type %s' % (src_system, mav_type))
self._vehicle_types[src_system] = mav_type
if isinstance(msg, MAVLink_global_position_int_message):
src_system = msg.get_srcSystem()
if src_system not in self._vehicle_types:
self._vehicle_types[src_system] = None
self._request_vehicle_type(color, src_system)
return
if self._vehicle_types[src_system]:
update = (
color,
src_system,
self._vehicle_types[src_system],
{
'lat': 1.0 * msg.lat / 10000000,
'lon': 1.0 * msg.lon / 10000000,
}
)
with QMutexLocker(self._mutex):
self._update_queue.append(update)
def _float_to_int(self, value):
p = struct.pack('<f', value)
val = struct.unpack('I', p)
return val[0]
def _request_vehicle_type(self, color, src_system):
print('requesting vehicle type of %s #%d' % (color, src_system))
(conn, _) = self.subscribers[color]
conn.mav.param_request_read_send(src_system, 1, 'MAV_TYPE', -1)
def _update_model(self):
with QMutexLocker(self._mutex):
updates = self._update_queue
self._update_queue = []
for update in updates:
self._widget.update_vehicle(*update)
def shutdown_plugin(self):
for (conn, thread) in self.subscribers.values():
conn.close()
thread.join(1)
|
91261
|
import string
import random
def gen_rand_filename():
name = ""
for i in range(1, 10):
name += random.choice(list(string.ascii_uppercase + string.ascii_lowercase))
return name
def get_size(filename):
with open(filename, "rb") as file:
length = len(file.read())
return length
def clean_hex_output(hex_bytes):
raw_crypt_bytes = b""
for byte in hex_bytes.split():
byte = byte.replace("0x", '')
byte = byte.replace(",", '')
if len(byte) == 1:
byte = f"0{byte}"
try:
raw_crypt_bytes += bytes.fromhex(byte).encode('utf-8')
except AttributeError:
raw_crypt_bytes += bytes.fromhex(byte)
return raw_crypt_bytes
def prepare_pe_image(bytes_len, data):
pe_image = f"#define array_len {bytes_len}\n\n"
pe_image += "unsigned long long image_crypt[] = {\n"
pe_image += data
pe_image += "\n};"
return pe_image
def write_pe_image(path, pe_image):
with open(path, "w") as file:
file.write(pe_image)
def write_header_file(path, keys_used, jmp=False, runpe=False):
headerfile = ""
with open(path, "w") as file:
for key in keys_used:
headerfile += f"#define key{key} {hex(keys_used[key])}\n"
if jmp is True:
headerfile += "\nVOID FixImageIAT(PIMAGE_DOS_HEADER dos_header, PIMAGE_NT_HEADERS nt_header);\n"
headerfile += "LPVOID MapImageToMemory(LPVOID base_addr);\n"
if runpe is True:
headerfile += "void RunFromMemory(char* pImage, char* pPath);\n"
file.write(headerfile)
def write_decrypt(path, loops, enc_type="xor"):
first_run = False
to_write = ""
if enc_type == "xor":
while loops != 0:
loops -= 1
if first_run is False:
first_run = True
first_decrypt = """
for (i = 0; i < array_len; i++) {
decrypted_bytes[i] = key%s ^ image_crypt[i];
image_crypt[i] = '\\0';
}
""" % loops
to_write += first_decrypt
else:
decrypt = """
for (i = 0; i < array_len; i++) {
decrypted_bytes[i] = key%s ^ decrypted_bytes[i];
}\n
""" % loops
to_write += decrypt
with open(path, "r") as file:
data = file.readlines()
file.close()
data_backup = data
safe = ''.join(data_backup)
data.insert(120, to_write)
outdata = ''.join(data)
with open(path, "w") as file:
file.write(outdata)
file.close()
return safe
def clean_up(path, clean):
with open(path, "w") as file:
file.write(clean)
file.close()
return
|
91264
|
description = 'Actuators and feedback of the shutter, detector, and valves'
group = 'lowlevel'
excludes = ['IOcard']
devices = dict(
I1_pnCCD_Active = device('nicos.devices.generic.ManualSwitch',
description = 'high: Detector is turned on',
states = [0, 1],
),
I2_Shutter_safe = device('nicos.devices.generic.ManualSwitch',
description = 'high: Detector is turned on',
states = [0, 1],
),
I3_Det_chamber_vent_open = device('nicos.devices.generic.ManualSwitch',
description = 'high: Detector Chamber venting gauge open',
states = [0, 1],
),
I4_Exp_ch_vent_open = device('nicos.devices.generic.ManualSwitch',
description = 'high: Experiment Chamber venting gauge open',
states = [0, 1],
),
I5_Det_ch_pump_open = device('nicos.devices.generic.ManualSwitch',
description = 'high: Detector Chamber pumping gauge open',
states = [0, 1],
),
I6_Exp_ch_pump_open = device('nicos.devices.generic.ManualSwitch',
description = 'high: Experiment Chamber pumping gauge open',
states = [0, 1],
),
I7_Exp_ch_vent_gas_selection = device('nicos.devices.generic.ManualSwitch',
description = 'Venting either with air or nitrogen',
states = [0, 1],
),
I8_unused = device('nicos.devices.generic.ManualSwitch',
description = '1 Bit wide digital input starting at E8',
states = [0, 1],
),
O1_pnCCD_Trigger = device('nicos.devices.generic.ManualSwitch',
description = 'Send Trigger to detector to start collecting data',
states = [0, 1],
),
O2_Shutter_open = device('nicos.devices.generic.ManualSwitch',
description = 'Open the shutter from LMJ',
states = [0, 1],
),
O3_Det_ch_vent = device('nicos.devices.generic.ManualSwitch',
description = 'Vent Detector Chamber',
states = [0, 1],
),
O4_Exp_ch_vent= device('nicos.devices.generic.ManualSwitch',
description = 'Vent Experiment Chamber',
states = [0, 1],
),
O5_Det_ch_pump = device('nicos.devices.generic.ManualSwitch',
description = 'Open gauge from pump to Detector Chamber',
states = [0, 1],
),
O6_Exp_ch_pump = device('nicos.devices.generic.ManualSwitch',
description = 'Open gauge from pump to Experiment Chamber',
states = [0, 1],
),
O7_Exp_ch_vent_gas = device('nicos.devices.generic.ManualSwitch',
description = 'Choose either air or Nitrogen for venting',
states = [0, 1],
),
O8_unused = device('nicos.devices.generic.ManualSwitch',
description = '1 Bit wide digital output starting at A8',
states = [0, 1],
),
)
|
91279
|
from unittest import TestCase
from mock import patch, Mock
from torch import nn
from torchbearer.callbacks.manifold_mixup import ManifoldMixup
import torchbearer
import torch
class TestModule(nn.Module):
def __init__(self):
super(TestModule, self).__init__()
self.conv = nn.Conv1d(1, 1, 1)
self.relu = nn.ReLU()
self.bn = nn.BatchNorm1d(1)
def forward(self, x):
x = self.conv(x.view(-1, 1, 1))
x = self.relu(x)
x = self.bn(x)
return x
class TestModule2(nn.Module):
def __init__(self):
super(TestModule2, self).__init__()
self.layer1 = TestModule()
def forward(self, x):
return self.layer1(x)
class TestModel(nn.Module):
def __init__(self):
super(TestModel, self).__init__()
self.fc1 = nn.Linear(1, 1)
self.conv1 = nn.Conv1d(1, 1, 1)
self.relu = nn.ReLU()
self.layer1 = TestModule()
self.layer2 = TestModule2()
def forward(self, x):
x = self.fc1(x)
x = self.conv1(x.view(-1,1,1))
x = self.relu(x)
x = self.layer1(x)
x = self.layer2(x)
return x
class TestManifoldMixup(TestCase):
def setUp(self):
super(TestManifoldMixup, self).setUp()
self.model = TestModel()
def test_depth_none(self):
mm = ManifoldMixup().at_depth(None)
state = {torchbearer.MODEL: self.model}
mm.on_start(state)
self.assertTrue(len(mm._layers) == 12)
def test_depth_0(self):
mm = ManifoldMixup().at_depth(0)
state = {torchbearer.MODEL: self.model}
mm.on_start(state)
checks = [
self.model.fc1 in mm._layers,
self.model.conv1 in mm._layers,
self.model.relu in mm._layers,
self.model.layer1 in mm._layers,
self.model.layer2 in mm._layers,
]
self.assertTrue(all(checks)) # Top level modules in
self.assertFalse(self.model.layer1.conv in mm._layers) # Depth 1 modules not in
def test_depth_1(self):
mm = ManifoldMixup().at_depth(1)
state = {torchbearer.MODEL: self.model}
mm.on_start(state)
top_checks = [
self.model.fc1 in mm._layers,
self.model.conv1 in mm._layers,
self.model.relu in mm._layers,
self.model.layer1 in mm._layers,
self.model.layer2 in mm._layers,
]
first_checks = [
self.model.layer1.conv in mm._layers,
self.model.layer1.relu in mm._layers,
self.model.layer1.bn in mm._layers,
self.model.layer2.layer1 in mm._layers,
]
self.assertFalse(any(top_checks)) # Top level modules not in
self.assertTrue(all(first_checks)) # Depth 1 modules in
def test_depth_2(self):
mm = ManifoldMixup().at_depth(2)
state = {torchbearer.MODEL: self.model}
mm.on_start(state)
top_checks = [
self.model.fc1 in mm._layers,
self.model.conv1 in mm._layers,
self.model.relu in mm._layers,
self.model.layer1 in mm._layers,
self.model.layer2 in mm._layers,
]
first_checks = [
self.model.layer1.conv in mm._layers,
self.model.layer1.relu in mm._layers,
self.model.layer1.bn in mm._layers,
self.model.layer2.layer1 in mm._layers,
]
second_checks = [
self.model.layer2.layer1.conv in mm._layers,
self.model.layer2.layer1.relu in mm._layers,
self.model.layer2.layer1.bn in mm._layers,
]
self.assertFalse(any(top_checks)) # Top level modules not in
self.assertFalse(any(first_checks)) # Depth 1 modules not in
self.assertTrue(all(second_checks)) # Depth 2 modules in
def test_for_layers(self):
mm = ManifoldMixup().at_depth(None).for_layers(['conv1', 'layer1_conv', 'layer2_layer1_conv'])
state = {torchbearer.MODEL: self.model}
mm.on_start(state)
self.assertTrue(self.model.conv1 in mm._layers, self.model.layer1.conv in mm._layers and self.model.layer2.layer1.conv in mm._layers)
self.assertTrue(len(mm._layers) == 3)
def test_get_selected_layers(self):
mm = ManifoldMixup().at_depth(None).for_layers(['conv1', 'layer1_conv', 'layer2_layer1_conv'])
found_layers = mm.get_selected_layers(self.model)
self.assertTrue(len(found_layers) == 3)
self.assertTrue('conv1' in found_layers)
self.assertTrue('layer1_conv' in found_layers)
self.assertTrue('layer2_layer1_conv' in found_layers)
def test_layer_filter(self):
mm = ManifoldMixup().at_depth(None).with_layer_filter(['conv1', 'layer1_conv', 'layer2_layer1_conv'])
state = {torchbearer.MODEL: self.model}
mm.on_start(state)
self.assertFalse(self.model.conv1 in mm._layers,
self.model.layer1.conv in mm._layers and self.model.layer2.layer1.conv in mm._layers)
self.assertTrue(len(mm._layers) == 12-3)
def test_layer_type_filter(self):
mm = ManifoldMixup().at_depth(None).with_layer_type_filter([nn.Conv1d])
state = {torchbearer.MODEL: self.model}
mm.on_start(state)
self.assertFalse(self.model.conv1 in mm._layers,
self.model.layer1.conv in mm._layers and self.model.layer2.layer1.conv in mm._layers)
self.assertTrue(len(mm._layers) == 12-3)
def test_wrap(self):
mm = ManifoldMixup().at_depth(None).for_layers(['conv1', 'layer1_relu', 'layer2_layer1_conv'])
state = {torchbearer.MODEL: self.model}
mm.on_start(state)
self.model.conv1.mixup()
self.model.layer1.relu.mixup()
self.model.layer2.layer1.conv.mixup()
self.assertRaises(AttributeError, lambda: self.model.relu.mixup())
@patch('torchbearer.callbacks.manifold_mixup._mixup_inputs', side_effect=lambda x, _: x)
def test_call_mix(self, _):
mm = ManifoldMixup().at_depth(None).for_layers(['conv1', 'layer1_relu', 'layer2_layer1_conv'])
state = {torchbearer.MODEL: self.model}
mm.on_start(state)
self.model.conv1.mixup()
self.assertTrue(self.model.conv1.do_mixup)
self.model(torch.rand(3, 1))
self.assertFalse(self.model.conv1.do_mixup)
@patch('torchbearer.callbacks.manifold_mixup._mixup')
def test_on_sample(self, mix):
mm = ManifoldMixup().at_depth(None).for_layers(['conv1', 'layer1_relu', 'layer2_layer1_conv'])
state = {torchbearer.MODEL: self.model, torchbearer.X: torch.rand(3, 1), torchbearer.Y_TRUE: torch.rand(3, 1)}
mm.on_start(state)
mm.on_sample(state)
self.assertTrue(mix.call_count == 1)
self.assertTrue(torchbearer.MIXUP_PERMUTATION in state)
self.assertTrue(torchbearer.MIXUP_LAMBDA in state)
state = {torchbearer.MODEL: self.model, torchbearer.X: torch.rand(3, 1), torchbearer.Y_TRUE: torch.rand(3, 1)}
mm.on_sample(state)
self.assertTrue(mix.call_count == 2)
@patch('torchbearer.callbacks.manifold_mixup._mixup_inputs', side_effect=lambda x, _: x)
def test_eval(self, mix):
mm = ManifoldMixup().at_depth(None).for_layers(['conv1', 'layer1_relu', 'layer2_layer1_conv'])
self.model.eval()
state = {torchbearer.MODEL: self.model, torchbearer.X: torch.rand(3, 1), torchbearer.Y_TRUE: torch.rand(3, 1)}
mm.on_start(state)
mm.on_sample(state)
self.model(torch.rand(3, 1))
self.assertTrue(mix.call_count == 0)
state = {torchbearer.MODEL: self.model, torchbearer.X: torch.rand(3, 1), torchbearer.Y_TRUE: torch.rand(3, 1)}
mm.on_sample(state)
self.model = self.model.train()
self.model(torch.rand(3, 1))
self.assertTrue(mix.call_count == 1)
def test_mixup_inputs(self):
from torchbearer.callbacks.manifold_mixup import _mixup_inputs
x = torch.Tensor([[1, 2], [2, 3]])
perm = torch.Tensor([1, 0]).long()
lam = torch.Tensor([0.1])
state = {torchbearer.X: x, torchbearer.MIXUP_PERMUTATION: perm, torchbearer.MIXUP_LAMBDA: lam}
mixed = _mixup_inputs(x, state)
self.assertFalse((mixed - torch.Tensor([[1.9, 2.9], [1.1, 2.1]]) > 1e-6).any())
@patch('torchbearer.callbacks.manifold_mixup.Beta')
def test_sample_lam_random(self, beta):
mm = ManifoldMixup()
sl = mm._sample_lam
sl()
self.assertTrue(beta.mock_calls[0][1] == (1., 1.))
self.assertTrue(beta.mock_calls[1][0] == '().sample')
def test_sample_lam_negative(self):
mm = ManifoldMixup(alpha=-1)
sl = mm._sample_lam
lam = sl()
self.assertTrue(lam == 1.)
def test_sample_lam_fixed(self):
mm = ManifoldMixup(lam=2.)
sl = mm._sample_lam
lam = sl()
self.assertTrue(lam == 2.)
def test_single_to_list(self):
mm = ManifoldMixup()
sl = mm._single_to_list
item = 1.
self.assertTrue(sl(item) == [item, ])
|
91282
|
from wagtail.admin.edit_handlers import FieldPanel
from wagtail.contrib.modeladmin.views import CreateView, EditView
from .models import InvestmentCategorySettings
class CreateInvestmentView(CreateView):
def get_form_kwargs(self):
kwargs = super(CreateInvestmentView, self).get_form_kwargs()
kwargs['request'] = self.request
return kwargs
def get_context_data(self):
context = super(CreateInvestmentView, self).get_context_data()
ics = InvestmentCategorySettings.for_request(self.request)
categories = ics.categories.all()
for category in categories:
field_name = category.name.lower().replace(' ', '_')
field_panel = FieldPanel(field_name).bind_to(
model=self.model,
instance=context['edit_handler'].instance,
request=context['edit_handler'].request,
form=context['form']
)
context['edit_handler'].children.append(field_panel)
return context
class EditInvestmentView(EditView):
def get_form_kwargs(self):
kwargs = super(EditInvestmentView, self).get_form_kwargs()
kwargs['request'] = self.request
return kwargs
def get_context_data(self):
context = super(EditInvestmentView, self).get_context_data()
ics = InvestmentCategorySettings.for_request(self.request)
categories = ics.categories.all()
for category in categories:
field_name = category.name.lower().replace(' ', '_')
field_panel = FieldPanel(field_name).bind_to(
model=self.model,
instance=context['edit_handler'].instance,
request=context['edit_handler'].request,
form=context['form']
)
context['edit_handler'].children.append(field_panel)
return context
|
91299
|
import onmt
import numpy as np
import argparse
import torch
import codecs
import json
import sys
import csv
parser = argparse.ArgumentParser(description='preprocess.py')
##
## **Preprocess Options**
##
parser.add_argument('-config', help="Read options from this file")
parser.add_argument('-train_src', required=True,
help="Path to the training source data")
parser.add_argument('-valid_src', required=True,
help="Path to the validation source data")
parser.add_argument('-save_data', required=True,
help="Output file for the prepared data")
parser.add_argument('-src_vocab_size', type=int, default=20000,
help="Size of the source vocabulary")
parser.add_argument('-src_vocab', default=None,
help="Path to an existing source vocabulary")
parser.add_argument('-src_embedding', default=None,
help="Path to an existing source embedding matrix")
parser.add_argument('-seq_length', type=int, default=100,
help="Maximum sequence length")
parser.add_argument('-shuffle', type=int, default=1,
help="Shuffle data")
parser.add_argument('-seed', type=int, default=3435,
help="Random seed")
parser.add_argument('-lower', action='store_true', help='lowercase data')
parser.add_argument('-report_every', type=int, default=100000,
help="Report status every this many sentences")
opt = parser.parse_args()
print(opt)
torch.manual_seed(opt.seed)
def makeVocabulary(filename, size):
vocab = onmt.Dict(
[onmt.Constants.PAD_WORD, onmt.Constants.UNK_WORD, \
onmt.Constants.BOS_WORD, onmt.Constants.EOS_WORD], lower=opt.lower, seq_len=opt.seq_length)
count = 0
with codecs.open(filename, "r", "utf-8") as f:
tsv_reader = csv.reader(f, delimiter='\t')
for line in tsv_reader:
sent = line[1]
for word in sent.split():
vocab.add(word)
sent = line[2]
for word in sent.split():
vocab.add(word)
count += 1
with codecs.open(opt.valid_src, "r", "utf-8") as f:
tsv_reader = csv.reader(f, delimiter='\t')
for line in tsv_reader:
sent = line[1]
for word in sent.split():
vocab.add(word)
sent = line[2]
for word in sent.split():
vocab.add(word)
fname = opt.valid_src.split('.tsv')[0][:-3] + 'test.tsv'
with codecs.open(fname, "r", "utf-8") as f:
tsv_reader = csv.reader(f, delimiter='\t')
for line in tsv_reader:
sent = line[1]
for word in sent.split():
vocab.add(word)
sent = line[2]
for word in sent.split():
vocab.add(word)
originalSize = vocab.size()
vocab = vocab.prune(size)
print('Created dictionary of size %d (pruned from %d)' %
(vocab.size(), originalSize))
return vocab
def initVocabulary(name, dataFile, vocabFile, vocabSize):
vocab = None
if vocabFile is not None:
# If given, load existing word dictionary.
print('Reading '+name+' vocabulary from \''+vocabFile + '\'...')
vocab = onmt.Dict()
vocab.lower = opt.lower
vocab.seq_length = opt.seq_length
vocab.loadFile(vocabFile)
print('Loaded ' + str(vocab.size()) + ' ' + name + ' words')
if vocab is None:
# If a dictionary is still missing, generate it.
print('Building ' + name + ' vocabulary...')
genWordVocab = makeVocabulary(dataFile, vocabSize)
vocab = genWordVocab
print()
return vocab
def saveVocabulary(name, vocab, file):
print('Saving ' + name + ' vocabulary to \'' + file + '\'...')
vocab.writeFile(file)
def createEmbedMatrix(srcDicts):
print('Creating Embed matrix ...')
src_embed = torch.FloatTensor(torch.randn(srcDicts.size(), 300))
found = 0
f = codecs.open(opt.src_embedding, 'rb', 'utf-8')
for line in f:
splitLine = line.split(' ')
word = splitLine[0]
embedding = np.array([float(val) for val in splitLine[1:]])
idx = srcDicts.lookup(word)
if idx:
src_embed[idx] = torch.from_numpy(embedding)
found += 1
print('No of words from the vocab in the Glove: ' + str(found))
return src_embed
def makeData(srcFile, srcDicts):
src1, src2, tgt = [], [], []
sizes1, sizes2 = [], []
count, ignored = 0, 0
print('Processing %s ...' % (srcFile))
with codecs.open(srcFile, "r", "utf-8") as srcF:
tsv_reader = csv.reader(srcF, delimiter='\t')
for line in tsv_reader:
sent1 = line[1]
sent2 = line[2]
label = line[3]
src1Words = sent1.split()
src2Words = sent2.split()
if len(src1Words) > opt.seq_length:
src1Words = src1Words[:opt.seq_length]
if len(src2Words) > opt.seq_length:
src2Words = src2Words[:opt.seq_length]
src1 += [srcDicts.convertToIdx(src1Words,
onmt.Constants.UNK_WORD, padding=True)]
src2 += [srcDicts.convertToIdx(src2Words,
onmt.Constants.UNK_WORD, padding=True)]
tgt += [torch.LongTensor([int(label)])]
sizes1 += [len(src1Words)]
sizes2 += [len(src2Words)]
count += 1
if count % opt.report_every == 0:
print('... %d sentences prepared' % count)
if opt.shuffle == 1:
print('... shuffling sentences')
perm = torch.randperm(len(src1))
src1 = [src1[idx] for idx in perm]
src2 = [src2[idx] for idx in perm]
tgt = [tgt[idx] for idx in perm]
sizes1 = [sizes1[idx] for idx in perm]
sizes2 = [sizes2[idx] for idx in perm]
print('Prepared %d sentences (%d ignored due to length == 0 or > %d)' %
(len(src1), ignored, opt.seq_length))
return src1, src2, tgt
def main():
dicts = {}
print('Preparing source vocab ....')
dicts['src'] = initVocabulary(
'source',
opt.train_src,
opt.src_vocab,
opt.src_vocab_size
)
if opt.src_embedding:
embedding = createEmbedMatrix(dicts['src'])
torch.save(embedding, opt.save_data + '.embed.pt')
print('Preparing training ...')
train = {}
train['src1'], train['src2'], train['tgt'] = makeData(
opt.train_src,
dicts['src']
)
print('Preparing validation ...')
valid = {}
valid['src1'], valid['src2'], valid['tgt'] = makeData(
opt.valid_src,
dicts['src']
)
if opt.src_vocab is None:
saveVocabulary(
'source',
dicts['src'],
opt.save_data +
'.src.dict'
)
print('Saving data to \'' + opt.save_data + '.train.pt\'...')
save_data = {'dicts': dicts,
'train': train,
'valid': valid,
}
torch.save(save_data, opt.save_data + '.train.pt')
if __name__ == "__main__":
main()
|
91306
|
import argparse
from collections import Counter, OrderedDict
from prenlp.tokenizer import *
TOKENIZER = {'nltk_moses': NLTKMosesTokenizer(),
'mecab' : Mecab()}
class Vocab:
"""Defines a vocabulary object that will be used to numericalize text.
Args:
vocab_size (int) : the maximum size of the vocabulary
pad_token (str) : token that indicates 'padding'
unk_token (str) : token that indicates 'unknown word'
bos_token (str) : token that indicates 'beginning of sentence'
eos_token (str) : token that indicates 'end of sentence'
"""
def __init__(self, vocab_size: int = 16000, pad_token: str = '[PAD]', unk_token: str = '[UNK]',
bos_token: str = '[BOS]', eos_token: str = '[EOS]'):
self.vocab_size = vocab_size
self.pad_token = pad_token
self.unk_token = unk_token
self.bos_token = bos_token
self.eos_token = eos_token
self.special_tokens = [pad_token, unk_token, bos_token, eos_token]
self.freqs = Counter()
self.vocab = OrderedDict()
# Initialize vocabulary with special tokens
for special_token in self.special_tokens:
self.vocab[special_token] = len(self.vocab)
def build(self, corpus, tokenizer, max_sentence_length=100000):
"""Build vocabulary with given corpus and tokenizer.
"""
with open(corpus, 'r', encoding='utf-8') as reader:
for i, line in enumerate(reader.readlines()):
if len(line) >= max_sentence_length:
line = line[:max_sentence_length]
tokens = tokenizer.tokenize(line.strip())
self.freqs.update(tokens)
for token, freq in self.freqs.most_common(self.vocab_size-len(self.special_tokens)):
self.vocab[token] = len(self.vocab)
def save(self, path, postfix='.vocab'):
"""Save vocabulary.
"""
with open(path+postfix, 'w', encoding='utf-8') as writer:
for token, id in self.vocab.items():
writer.write('{token}\t{id}\n'.format(token=token, id=id))
def __len__(self):
return len(self.vocab)
def build(args):
if args.tokenizer == 'sentencepiece':
tokenizer = SentencePiece.train(input = args.corpus, model_prefix = args.prefix,
vocab_size = args.vocab_size,
model_type = args.model_type,
character_coverage = args.character_coverage,
max_sentence_length = args.max_sentence_length,
pad_token = args.pad_token,
unk_token = args.unk_token,
bos_token = args.bos_token,
eos_token = args.eos_token)
else:
tokenizer = TOKENIZER[args.tokenizer]
vocab = Vocab(vocab_size = args.vocab_size,
pad_token = args.pad_token,
unk_token = args.unk_token,
bos_token = args.bos_token,
eos_token = args.eos_token)
vocab.build(args.corpus, tokenizer, args.max_sentence_length)
vocab.save(args.prefix)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--corpus', required=True, type=str, help='one-sentence-per-line corpus file')
parser.add_argument('--prefix', required=True, type=str, help='output vocab(or sentencepiece model) name prefix')
parser.add_argument('--tokenizer', default='sentencepiece', type=str, help='tokenizer to tokenize input corpus. available: sentencepiece, '+', '.join(TOKENIZER.keys()))
parser.add_argument('--vocab_size', default=16000, type=int, help='the maximum size of the vocabulary')
parser.add_argument('--character_coverage', default=1.0, type=float,
help='amount of characters covered by the model, good defaults are: 0.9995 for languages with rich character set\
like Japanse or Chinese and 1.0 for other languages with small character set')
parser.add_argument('--model_type', default='bpe', type=str, help='sentencepiece model type. Choose from unigram, bpe, char, or word')
parser.add_argument('--max_sentence_length', default=100000, type=int, help='The maximum input sequence length')
parser.add_argument('--pad_token', default='[PAD]', type=str, help='token that indicates padding')
parser.add_argument('--unk_token', default='[UNK]', type=str, help='token that indicates unknown word')
parser.add_argument('--bos_token', default='[BOS]', type=str, help='token that indicates beginning of sentence')
parser.add_argument('--eos_token', default='[EOS]', type=str, help='token that indicates end of sentence')
args = parser.parse_args()
build(args)
|
91359
|
import pytest
from rest_framework import serializers
from .models import Album, Track
from drf_jsonschema import to_jsonschema
from jsonschema import validate
@pytest.mark.django_db
def test_string_related_field():
album = Album.objects.create(
album_name="Collected Stories", artist="<NAME>")
track1 = track1 = Track.objects.create(
album=album,
order=1,
title="The Dunwich Horror",
duration=10)
Track.objects.create(
album=album,
order=2,
title="The Call of Cthulhu",
duration=15)
Track.objects.create(
album=album,
order=3,
title="At the Mountains of Madness",
duration=20)
class AlbumSerializer(serializers.ModelSerializer):
class Meta:
model = Album
fields = ('album_name', 'artist', 'tracks')
class TrackSerializer(serializers.ModelSerializer):
class Meta:
model = Track
fields = ('order', 'title', 'duration')
json_schema = to_jsonschema(TrackSerializer())
valid = {'title': "The Dunwich Horror", "order": 1, "duration": 11}
validate(valid, json_schema)
serializer = TrackSerializer(track1, data=valid)
assert serializer.is_valid()
serializer.save()
assert track1.duration == 11
invalid = {'title': "The Dunwich Horror", "order": 1}
serializer = TrackSerializer(track1, data=invalid)
assert not serializer.is_valid()
album_data = AlbumSerializer(album).data
assert album_data == {
'album_name': 'Collected Stories',
'artist': '<NAME>',
'tracks': [1, 2, 3]
}
album_serializer = AlbumSerializer(album, album_data)
json_schema = to_jsonschema(album_serializer)
validate(album_data, json_schema)
|
91367
|
from ..exceptions import RedisError
from ..utils import b
from ..utils import nativestr
def parse_georadius_generic(response, **options):
if options['store'] or options['store_dist']:
# `store` and `store_diff` cant be combined
# with other command arguments.
return response
if not isinstance(response, list):
response_list = [response]
else:
response_list = response
if (not options['withdist'] and not options['withcoord']
and not options['withhash']):
# just a bunch of places
return [nativestr(r) for r in response_list]
cast = {
'withdist': float,
'withcoord': lambda ll: (float(ll[0]), float(ll[1])),
'withhash': int,
}
# zip all output results with each casting functino to get
# the properly native Python value.
f = [nativestr]
f += [cast[o] for o in ['withdist', 'withhash', 'withcoord'] if options[o]]
return [
list(map(lambda fv: fv[0](fv[1]), zip(f, r))) for r in response_list
]
class GeoCommandMixin:
RESPONSE_CALLBACKS = {
'GEOPOS': lambda r: list(map(lambda ll: (float(ll[0]),
float(ll[1]))
if ll is not None else None, r)),
'GEOHASH': list,
'GEORADIUS': parse_georadius_generic,
'GEORADIUSBYMEMBER': parse_georadius_generic,
'GEODIST': float,
'GEOADD': int,
}
# GEO COMMANDS
async def geoadd(self, name, *values):
"""
Add the specified geospatial items to the specified key identified
by the ``name`` argument. The Geospatial items are given as ordered
members of the ``values`` argument, each item or place is formed by
the triad latitude, longitude and name.
"""
if len(values) % 3 != 0:
raise RedisError('GEOADD requires places with lon, lat and name'
' values')
return await self.execute_command('GEOADD', name, *values)
async def geodist(self, name, place1, place2, unit=None):
"""
Return the distance between ``place1`` and ``place2`` members of the
``name`` key.
The units must be one of the following : m, km mi, ft. By async default
meters are used.
"""
pieces = [name, place1, place2]
if unit and unit not in ('m', 'km', 'mi', 'ft'):
raise RedisError('GEODIST invalid unit')
if unit:
pieces.append(unit)
return await self.execute_command('GEODIST', *pieces)
async def geohash(self, name, *values):
"""
Return the geo hash string for each item of ``values`` members of
the specified key identified by the ``name``argument.
"""
return await self.execute_command('GEOHASH', name, *values)
async def geopos(self, name, *values):
"""
Return the positions of each item of ``values`` as members of
the specified key identified by the ``name``argument. Each position
is represented by the pairs lon and lat.
"""
return await self.execute_command('GEOPOS', name, *values)
async def georadius(self, name, longitude, latitude, radius, unit=None,
withdist=False, withcoord=False, withhash=False, count=None,
sort=None, store=None, store_dist=None):
"""
Return the members of the specified key identified by the
``name`` argument which are within the borders of the area specified
with the ``latitude`` and ``longitude`` location and the maximum
distance from the center specified by the ``radius`` value.
The units must be one of the following : m, km mi, ft. By default
``withdist`` indicates to return the distances of each place.
``withcoord`` indicates to return the latitude and longitude of
each place.
``withhash`` indicates to return the geohash string of each place.
``count`` indicates to return the number of elements up to N.
``sort`` indicates to return the places in a sorted way, ASC for
nearest to fairest and DESC for fairest to nearest.
``store`` indicates to save the places names in a sorted set named
with a specific key, each element of the destination sorted set is
populated with the score got from the original geo sorted set.
``store_dist`` indicates to save the places names in a sorted set
named with a specific key, instead of ``store`` the sorted set
destination score is set with the distance.
"""
return await self._georadiusgeneric('GEORADIUS',
name, longitude, latitude, radius,
unit=unit, withdist=withdist,
withcoord=withcoord, withhash=withhash,
count=count, sort=sort, store=store,
store_dist=store_dist)
async def georadiusbymember(self, name, member, radius, unit=None,
withdist=False, withcoord=False, withhash=False,
count=None, sort=None, store=None, store_dist=None):
"""
This command is exactly like ``georadius`` with the sole difference
that instead of taking, as the center of the area to query, a longitude
and latitude value, it takes the name of a member already existing
inside the geospatial index represented by the sorted set.
"""
return await self._georadiusgeneric('GEORADIUSBYMEMBER',
name, member, radius, unit=unit,
withdist=withdist, withcoord=withcoord,
withhash=withhash, count=count,
sort=sort, store=store,
store_dist=store_dist)
async def _georadiusgeneric(self, command, *args, **kwargs):
pieces = list(args)
if kwargs['unit'] and kwargs['unit'] not in ('m', 'km', 'mi', 'ft'):
raise RedisError('GEORADIUS invalid unit')
if kwargs['unit']:
pieces.append(kwargs['unit'])
else:
pieces.append('m')
for token in ('withdist', 'withcoord', 'withhash'):
if kwargs[token]:
pieces.append(b(token.upper()))
if kwargs['count']:
pieces.extend([b('COUNT'), kwargs['count']])
if kwargs['sort'] and kwargs['sort'] not in ('ASC', 'DESC'):
raise RedisError('GEORADIUS invalid sort')
if kwargs['sort']:
pieces.append(b(kwargs['sort']))
if kwargs['store'] and kwargs['store_dist']:
raise RedisError('GEORADIUS store and store_dist cant be set'
' together')
if kwargs['store']:
pieces.extend([b('STORE'), kwargs['store']])
if kwargs['store_dist']:
pieces.extend([b('STOREDIST'), kwargs['store_dist']])
return await self.execute_command(command, *pieces, **kwargs)
|
91390
|
from datetime import datetime
from airflow import DAG
from airflow.operators.valohai import ValohaiSubmitExecutionOperator, ValohaiDownloadExecutionOutputsOperator
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': datetime(2018, 1, 1),
'email': ['<EMAIL>'],
'email_on_failure': False,
'email_on_retry': False
}
dag = DAG(
'example_valohai_dag',
default_args=default_args,
schedule_interval=None,
catchup=False
)
train_model = ValohaiSubmitExecutionOperator(
task_id='train_model',
project_name='tensorflow-example',
step='Train model (MNIST)',
dag=dag,
inputs={
'test-set-images': 'https://valohai-mnist.s3.amazonaws.com/t10k-images-idx3-ubyte.gz',
'test-set-labels': 'https://valohai-mnist.s3.amazonaws.com/t10k-labels-idx1-ubyte.gz',
'training-set-images': 'https://valohai-mnist.s3.amazonaws.com/train-images-idx3-ubyte.gz',
'training-set-labels': 'https://valohai-mnist.s3.amazonaws.com/train-labels-idx1-ubyte.gz'
},
parameters={
'dropout': 0.9,
'learning_rate': 0.001,
'max_steps': 300,
'batch_size': 200,
}
)
download_model = ValohaiDownloadExecutionOutputsOperator(
task_id='download_model',
output_task=train_model,
output_name='model.pb',
dag=dag
)
train_model >> download_model
|
91395
|
import argparse
import torch
def load_model(model_path):
try:
ckpt = torch.load(model_path)
except RuntimeError:
ckpt = torch.load(model_path, map_location="cpu")
if "model" in ckpt.keys():
return ckpt["model"]
return ckpt
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--novel-model", type=str,
required=True,
help="path to the final trained model")
parser.add_argument("--base-model", type=str,
default="/path/to/your/model.pth",
help="path to the base model")
parser.add_argument("--save-model", type=str,
default="model_redetect.pth",
help="path to the saved model")
return parser.parse_args()
def combine_and_save(
base_model="/path/to/your/model.pth",
novel_model="log/model_final.pth",
save_model="log/model_redetect.pth"
):
base_model = load_model(base_model)
novel_model = load_model(novel_model)
# ensemble rpn
rpn_cls_keys = [
'proposal_generator.rpn_head.objectness_logits.weight',
'proposal_generator.rpn_head.objectness_logits.bias'
]
for k in rpn_cls_keys:
novel_model[k.replace(".objectness", ".finetuned_objectness")] = \
novel_model[k]
novel_model[k] = base_model[k]
rpn_keys = [k for k in base_model.keys() if "rpn" in k]
for k in rpn_keys:
assert torch.all(base_model[k] == novel_model[k]), f"{k} not equal!"
torch.save(novel_model, save_model)
return save_model
if __name__ == "__main__":
args = parse_args()
combine_and_save(args.base_model, args.novel_model, args.save_model)
|
91401
|
import traceback
import pymel.core as pm
import mgear
from mgear.vendor.Qt import QtCore
from mgear.core.anim_utils import *
# =============================================================================
# constants
# =============================================================================
SYNOPTIC_WIDGET_NAME = "synoptic_view"
##################################################
#
##################################################
def getSynopticWidget(widget, max_iter=20):
"""Return the widget where the synoptic panel is attach
Arguments:
widget (QWidget): The widget to get the parent
max_iter (int, optional): Iteration limit to find the paretn widget
Returns:
widget: The Parent widget
"""
parent = widget.parentWidget()
for i in range(max_iter):
if parent.objectName() == SYNOPTIC_WIDGET_NAME:
return parent
parent = parent.parentWidget()
return False
def getModel(widget):
"""Get the model Name
Args:
widget (QWidget): Synoptic widget
Returns:
PyNode: The rig model name
"""
syn_widget = getSynopticWidget(widget, max_iter=20)
model_name = syn_widget.model_list.currentText()
if not pm.ls(model_name):
return None
try:
model = pm.PyNode(model_name)
except pm.general.MayaNodeError:
mes = traceback.format_exc()
mes = "Can't find model {0} for widget: {1}\n{2}".format(
model_name, widget, mes)
mgear.log(mes, mgear.sev_error)
return None
return model
##################################################
# SELECT
##################################################
# ================================================
def selectObj(model, object_names, mouse_button, key_modifier):
"""Select an object
Args:
model (PyNode): The rig top node
object_names (list): The names of the objects to select
mouse_button (QtSignal): Clicked mouse button signal
key_modifier (QtSignal): Modifier button signal
Returns:
None
"""
if not model:
return
nameSpace = getNamespace(model)
with pm.UndoChunk():
nodes = []
for name in object_names:
if nameSpace:
node = getNode(nameSpace + ":" + name)
else:
node = getNode(name)
if not node:
continue
if not node and nameSpace:
mgear.log("Can't find object : %s:%s" % (nameSpace, name),
mgear.sev_error)
elif not node:
mgear.log("Can't find object : %s" % (name), mgear.sev_error)
nodes.append(node)
if not nodes:
return
if mouse_button == QtCore.Qt.RightButton:
mirrorPose(False, nodes)
return
if mouse_button == QtCore.Qt.MiddleButton:
mirrorPose(True, nodes)
return
# Key pressed
if key_modifier is None:
pm.select(nodes)
elif key_modifier == QtCore.Qt.NoModifier: # No Key
pm.select(nodes)
elif key_modifier == QtCore.Qt.ControlModifier: # ctrl
pm.select(nodes, deselect=True)
elif key_modifier == QtCore.Qt.ShiftModifier: # shift
pm.select(nodes, toggle=True)
elif int(key_modifier) == (QtCore.Qt.ControlModifier
| QtCore.Qt.ShiftModifier): # ctrl + shift
pm.select(nodes, add=True)
elif key_modifier == QtCore.Qt.AltModifier: # alt
pm.select(nodes)
elif int(key_modifier) == (QtCore.Qt.ControlModifier
| QtCore.Qt.AltModifier): # ctrl + alt
pm.select(nodes, deselect=True)
elif int(key_modifier) == (QtCore.Qt.ShiftModifier
| QtCore.Qt.AltModifier): # shift + alt
pm.select(nodes, toggle=True)
# Ctrl + alt + shift
elif int(key_modifier) == (QtCore.Qt.ControlModifier
| QtCore.Qt.AltModifier
| QtCore.Qt.ShiftModifier):
pm.select(nodes, add=True)
else:
pm.select(nodes)
|
91404
|
import torch.nn as nn
import torch.nn.functional as F
class PadLayer(nn.Module):
# E.g., (-1, 0) means this layer should crop the first and last rows of the feature map. And (0, -1) crops the first and last columns
def __init__(self, pad):
super(PadLayer, self).__init__()
self.pad = pad
def forward(self, input):
F.pad(input, [self.pad] * 4)
|
91409
|
import socket
from sockets.broadcast_socket import BroadcastSocket
import logger
log = logger.getLogger(__name__)
class BroadcastDiscoverer(BroadcastSocket):
def __init__(self, port):
super(BroadcastDiscoverer, self).__init__()
self.socket.bind(('0.0.0.0', port))
def __del__(self):
"Shutdown and close the underlying socket."
self._sock.close()
@property
def timeout(self):
'Receive timeout'
return self._sock.gettimeout()
@timeout.setter
def timeout(self, value):
self._sock.settimeout(value)
def recv(self, size):
"Receive a broadcast through the underlying socket."
return self._sock.recvfrom(size)
|
91416
|
answer1 = widget_inputs["radio1"]
answer2 = widget_inputs["radio2"]
answer3 = widget_inputs["radio3"]
answer4 = widget_inputs["radio4"]
is_correct = False
comments = []
def commentizer(new):
if new not in comments:
comments.append(new)
if answer2 == True:
is_correct = True
else:
is_correct = is_correct and False
commentizer("Open the site and try changing `cover` to `contain` in DevTools to see the difference.")
commentizer("Check the first one.")
if answer3 == True:
is_correct = is_correct and True
else:
is_correct = is_correct and False
commentizer("Open the site and try changing `cover` to `contain` in DevTools to see the difference.")
commentizer("Check the second one.")
if is_correct:
commentizer("Great job! You're starting to learn how to decide between raster and vector options.")
grade_result["comment"] = "\n\n".join(comments)
grade_result["correct"] = is_correct
|
91417
|
import logging
from typing import Any, List
from absl import flags
from injector import Module, inject, singleton
from rep0st.framework import app
from rep0st.framework.scheduler import Scheduler
from rep0st.service.tag_service import TagService, TagServiceModule
log = logging.getLogger(__name__)
FLAGS = flags.FLAGS
flags.DEFINE_string(
'rep0st_update_tags_job_schedule', '*/1 * * * *',
'Schedule in crontab format for running the tag update job.')
class UpdateTagsJobModule(Module):
def configure(self, binder):
binder.install(TagServiceModule)
binder.bind(UpdateTagsJob)
@singleton
class UpdateTagsJob:
tag_service: TagService
@inject
def __init__(self, tag_service: TagService, scheduler: Scheduler):
self.tag_service = tag_service
scheduler.schedule(FLAGS.rep0st_update_tags_job_schedule,
self.update_tags_job)
def update_tags_job(self):
self.tag_service.update_tags()
def modules() -> List[Any]:
return [UpdateTagsJobModule]
if __name__ == "__main__":
app.run(modules)
|
91435
|
import sys
from hashlib import md5
from management_database import User
def hash_password(email, password):
return md5("%s:%s" % (email, password)).hexdigest()
def main(email, password):
try:
user = User.get_by_email(email)
user.passwd = hash_password(email, password)
user.save()
except Exception as e:
print "Exception: %s" % e
print "Success."
if __name__ == '__main__':
if len(sys.argv) < 3:
print "Usage: python change_password.py <email> <new_password>"
email = str(sys.argv[1])
password = str(sys.argv[2])
main(email, password)
|
91440
|
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
SAASU_ERRORS = {
'no_key': 'Please set your SAASU_WSACCESS_KEY setting.',
'no_uid': 'Please set your SAASU_FILE_UID setting.',
'disabled': 'Disabled in demo mode'
}
SAASU_WSACCESS_KEY = getattr(settings, 'SAASU_WSACCESS_KEY', None)
if SAASU_WSACCESS_KEY is None:
raise ImproperlyConfigured(SAASU_ERRORS['no_key'])
SAASU_FILE_UID = getattr(settings, 'SAASU_FILE_UID', None)
if SAASU_FILE_UID is None:
raise ImproperlyConfigured(SAASU_ERRORS['no_uid'])
|
91443
|
import traceback
def error_str(func_name, exception):
return func_name + " failed with exception: " + str(exception) + "\n" + str(traceback.print_exc())
def print_dict(dictn):
for elem in dictn:
print(elem, repr(dictn[elem]))
def dump_attrs(obj):
for attr in dir(obj):
print("obj.%s" % attr)
|
91459
|
from django_messages.api import ReceivedMessageResource, SentMessageResource, TrashMessageResource
from friendship.api import FollowerResource, FollowingResource
from tastypie.api import Api
from plan.api import PlanResource
from traveller.api import TravellerResource
from notifications.api import AllNotificationResource, UnreadNotificationResource
from city.api import ProvinceResource
from django.conf.urls import patterns, url, include
__author__ = 'danielqiu'
v1_api = Api(api_name='v1')
v1_api.register(PlanResource())
v1_api.register(TravellerResource())
v1_api.register(AllNotificationResource())
v1_api.register(UnreadNotificationResource())
v1_api.register(FollowerResource())
v1_api.register(FollowingResource())
v1_api.register(ReceivedMessageResource())
v1_api.register(SentMessageResource())
v1_api.register(TrashMessageResource())
v1_api.register(ProvinceResource())
urlpatterns = patterns('',
url(r'^', include(v1_api.urls)),
)
|
91467
|
import tkinter as tk
from tkinter import ttk
from collections import deque
class Timer(ttk.Frame):
"""parent is the frame which contains the timer frame self is the object whose properties are being created
and controller is the class whose properties are inherited....tk.Frame properties are also inherited"""
def __init__(self, parent, controller, show_settings):
super().__init__(parent)
self['style'] = 'Background.TFrame'
# setting the object as the controller
self.controller = controller
pomodoro_time = int(controller.pomodoro.get())
# variable to hold the current time with default value
self.current_time = tk.StringVar(value=f'{pomodoro_time:02d}:00')
# variable to hold the current phase of the timer_Schedule
self.current_timer_label = tk.StringVar(value=controller.timer_schedule[0])
# timer_running variable with boolean value false as timer is initially off
# it will start after clicking start button
self.timer_running = False
# private variable to stop the execution of after method in decrement method
self._timer_decrement_job = None
# label showing the current phase
timer_description = ttk.Label(
self,
textvariable=self.current_timer_label,
style='LightText.TLabel'
)
timer_description.grid(row=0, column=0, sticky='W', padx=(10, 0), pady=(10, 0))
# button to witch frame from timer to settings frame
settings_button = ttk.Button(
self,
text='Settings',
command=show_settings,
style='PomodoroButton.TButton',
cursor='hand2'
)
settings_button.grid(row=0, column=1, sticky='E', padx=10, pady=10)
timer_frame = ttk.Frame(self, height='100', style='Timer.TFrame')
timer_frame.grid(row=1, column=0, columnspan=2, pady=(10, 0), sticky='NSEW')
# counter label in timer_frame
timer_counter = ttk.Label(timer_frame,
textvariable=self.current_time,
style='TimerText.TLabel',
)
timer_counter.place(relx=0.5, rely=0.5, anchor='center') # positioning method like grid
# Button containing frame
button_container = ttk.Frame(self, padding=100, style='Background.TFrame')
button_container.grid(row=2, column=0, columnspan=2, sticky='EW')
button_container.columnconfigure((0, 1, 2), weight=1)
self.start_button = ttk.Button(
button_container,
text='Start',
command=self.start_timer,
style='PomodoroButton.TButton',
cursor='hand2' # change the appearance of cursor on the button
)
self.start_button.grid(row=0, column=0, sticky='EW')
self.stop_button = ttk.Button(
button_container,
text='Stop',
state='disabled', # initially off
command=self.stop_timer,
style='PomodoroButton.TButton',
cursor='hand2'
)
self.stop_button.grid(row=0, column=1, sticky='EW', padx=5)
"""self not used with reset_button and rest_timer because we don't want to use them out of this class"""
reset_button = ttk.Button(
button_container,
text='Reset',
command=self.reset_timer,
style='PomodoroButton.TButton',
cursor='hand2'
)
reset_button.grid(row=0, column=2, sticky='EW')
def start_timer(self):
self.timer_running = True # setting the timer status on after clicking start
self.start_button['state'] = 'disabled' # disables the start button after start of timer
self.stop_button['state'] = 'enabled' # enable the stop button after start of timer which was initially disable
self.decrement_time()
def stop_timer(self):
self.timer_running = False # on click of stop ,off the timer
self.stop_button['state'] = 'disabled' # disables the stop button after the click
self.start_button['state'] = 'enabled' # enables the start button after the start of button
if self._timer_decrement_job: # when the _timer_decrement_job found
self.after_cancel(self._timer_decrement_job) # cancel the further execution
self._timer_decrement_job = None # set the value of the _timer_decrement_job to None
def reset_timer(self):
self.stop_timer()
pomodoro_time = int(self.controller.pomodoro.get()) # getting value of pomodoro time from pomodoro class
self.current_time.set(f'{pomodoro_time:02d}:00') # set the current time to 25 after click of button
self.controller.timer_schedule = deque(self.controller.timer_order) # change timer schedule to initial state
self.current_timer_label.set(self.controller.timer_schedule[0]) # update timer label with first value of queue
def decrement_time(self):
"""This function reducing or updating the label every second"""
current_time = self.current_time.get()
if self.timer_running and current_time != '00:00': # timer is running
minutes, seconds = current_time.split(':') # splitting the string values into two variables
if int(seconds) > 0: # never let seconds be negative
seconds = int(seconds)-1
minutes = int(minutes)
else: # sets the timer to max after reaching zero
seconds = 59
minutes = int(minutes)-1
# setting the label value
self.current_time.set(f'{minutes:02d}:{seconds:02d}')
# calling the decrement function repeatedly after a second
self._timer_decrement_job = self.after(1000, self.decrement_time)
elif self.timer_running and current_time == '00:00':
self.controller.timer_schedule.rotate(-1) # rotate the list in reverse
next_up = self.controller.timer_schedule[0] # put the last element at first
# variable constantly updating the phase of scheduler after each phase changes
self.current_timer_label.set(next_up)
# checking which element is now at first position in task_order
# setting the current time accordingly
if next_up == 'Pomodoro':
pomodoro_time = int(self.controller.pomodoro.get())
self.current_time.set(f'{pomodoro_time:02d}:00')
elif next_up == 'Short Break':
short_break_time = int(self.controller.short_break.get())
self.current_time.set(f'{short_break_time:02d}:00')
elif next_up == 'Long Break':
long_break_time = int(self.controller.long_break.get())
self.current_time.set(f'{long_break_time:02d}:00')
self._timer_decrement_job = self.after(1000, self.decrement_time)
|
91556
|
r"""
Utility functions for building Sage
"""
# ****************************************************************************
# Copyright (C) 2017 <NAME> <<EMAIL>>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# https://www.gnu.org/licenses/
# ****************************************************************************
def stable_uniq(L):
"""
Given an iterable L, remove duplicate items from L by keeping only
the last occurrence of any item.
The items must be hashable.
EXAMPLES::
sage: from sage_setup.util import stable_uniq
sage: stable_uniq( (1, 2, 3, 4, 5, 6, 3, 7, 5, 1, 5, 9) )
[2, 4, 6, 3, 7, 1, 5, 9]
"""
D = {}
for pos, item in enumerate(L):
D[item] = pos # Store the last position where an item appears
return sorted(D, key=lambda item: D[item])
def have_module(name):
"""
Check whether a Python module named ``name`` can be imported.
This is done by trying to import that module and returning ``True``
if that import succeeded. So, as a side effect, the module is
actually imported if possible.
EXAMPLES::
sage: from sage_setup.util import have_module
sage: have_module("itertools")
True
sage: have_module("sage.rings.integer")
True
sage: have_module("no.such.module")
False
"""
try:
__import__(name, {}, {}, [], 0)
return True
except ImportError:
return False
|
91581
|
from datetime import datetime
import pytz
def convert_to_utc_date_time(date):
"""Convert date into utc date time."""
if date is None:
return
return datetime.combine(date, datetime.min.time(), tzinfo=pytz.UTC)
|
91590
|
import pytest
import numpy as np
from sklearn.datasets import make_blobs
from sklearn.datasets import load_diabetes
from tests.utils import resample_data
from deeprob.spn.structure.node import Sum, Product
from deeprob.spn.structure.leaf import Bernoulli, Gaussian
from deeprob.spn.learning.wrappers import learn_estimator
from deeprob.spn.algorithms.inference import log_likelihood
from deeprob.spn.learning.em import expectation_maximization
@pytest.fixture
def data():
data, _, = load_diabetes(return_X_y=True)
return (data < np.median(data, axis=0)).astype(np.float32)
@pytest.fixture
def evi_data(data):
return resample_data(data, 1000, np.random.RandomState(42))
@pytest.fixture
def blobs_data():
blobs_data, _ = make_blobs(
n_samples=1000, n_features=2, random_state=1337,
centers=[[-1.0, 1.0], [1.0, -1.0]], cluster_std=0.25
)
return blobs_data
@pytest.fixture
def gaussian_spn():
g0a, g1a = Gaussian(0, -0.5, 0.5), Gaussian(1, 0.5, 0.5)
g0b, g1b = Gaussian(0, 0.5, 0.5), Gaussian(1, -0.5, 0.5)
p0 = Product(children=[g0a, g1a])
p1 = Product(children=[g0b, g1b])
p2 = Product(children=[g0a, g1b])
s0 = Sum(children=[p0, p1, p2], weights=[0.3, 0.5, 0.2])
s0.id, p0.id, p1.id, p2.id = 0, 1, 2, 3
g0a.id, g1a.id, g0b.id, g1b.id = 4, 5, 6, 7
return s0
@pytest.fixture
def spn_mle(evi_data):
return learn_estimator(
evi_data, [Bernoulli] * 10, [[0, 1]] * 10,
learn_leaf='mle', split_rows='gmm', split_cols='gvs', min_rows_slice=512,
random_state=42, verbose=False
)
@pytest.fixture
def spn_clt(evi_data):
return learn_estimator(
evi_data, [Bernoulli] * 10, [[0, 1]] * 10,
learn_leaf='binary-clt', split_rows='kmeans', split_cols='gvs', min_rows_slice=512,
learn_leaf_kwargs={'to_pc': False},
random_state=42, verbose=False
)
def test_spn_binary(spn_mle, evi_data):
expectation_maximization(
spn_mle, evi_data, num_iter=100, batch_perc=0.1, step_size=0.5,
random_init=False, random_state=42, verbose=False
)
ll = log_likelihood(spn_mle, evi_data).mean()
assert np.isclose(ll, -5.3, atol=5e-2)
def test_clt_binary(spn_clt, evi_data):
expectation_maximization(
spn_clt, evi_data, num_iter=100, batch_perc=0.1, step_size=0.5,
random_init=True, random_state=42, verbose=False
)
ll = log_likelihood(spn_clt, evi_data).mean()
assert np.isclose(ll, -5.1, atol=5e-2)
def test_spn_gaussian(gaussian_spn, blobs_data):
expectation_maximization(
gaussian_spn, blobs_data, num_iter=25, batch_perc=0.1, step_size=0.5,
random_init=True, random_state=42, verbose=False
)
ll = log_likelihood(gaussian_spn, blobs_data).mean()
assert np.isclose(ll, -0.7, atol=5e-2)
|
91597
|
from .dosed1 import DOSED1
from .dosed2 import DOSED2
from .dosed3 import DOSED3
__all__ = [
"DOSED1",
"DOSED2",
"DOSED3",
]
|
91634
|
from sklearn.gaussian_process.kernels import Kernel, Hyperparameter
from sklearn.gaussian_process.kernels import GenericKernelMixin
from sklearn.gaussian_process.kernels import StationaryKernelMixin
import numpy as np
from sklearn.base import clone
class MiniSeqKernel(GenericKernelMixin, StationaryKernelMixin, Kernel):
"""
A minimal (but valid) convolutional kernel for sequences of variable
length.
"""
def __init__(self, baseline_similarity=0.5, baseline_similarity_bounds=(1e-5, 1)):
self.baseline_similarity = baseline_similarity
self.baseline_similarity_bounds = baseline_similarity_bounds
@property
def hyperparameter_baseline_similarity(self):
return Hyperparameter(
"baseline_similarity", "numeric", self.baseline_similarity_bounds
)
def _f(self, s1, s2):
return sum(
[1.0 if c1 == c2 else self.baseline_similarity for c1 in s1 for c2 in s2]
)
def _g(self, s1, s2):
return sum([0.0 if c1 == c2 else 1.0 for c1 in s1 for c2 in s2])
def __call__(self, X, Y=None, eval_gradient=False):
if Y is None:
Y = X
if eval_gradient:
return (
np.array([[self._f(x, y) for y in Y] for x in X]),
np.array([[[self._g(x, y)] for y in Y] for x in X]),
)
else:
return np.array([[self._f(x, y) for y in Y] for x in X])
def diag(self, X):
return np.array([self._f(x, x) for x in X])
def clone_with_theta(self, theta):
cloned = clone(self)
cloned.theta = theta
return cloned
|
91685
|
import json
import numpy as np
learning_map = {
0 : 0, # "unlabeled"
1 : 0, # "outlier" mapped to "unlabeled" --------------------------mapped
10: 1, # "car"
11: 2, # "bicycle"
13: 5, # "bus" mapped to "other-vehicle" --------------------------mapped
15: 3, # "motorcycle"
16: 5, # "on-rails" mapped to "other-vehicle" ---------------------mapped
18: 4, # "truck"
20: 5, # "other-vehicle"
30: 6, # "person"
31: 7, # "bicyclist"
32: 8, # "motorcyclist"
40: 9, # "road"
44: 10, # "parking"
48: 11, # "sidewalk"
49: 12, # "other-ground"
50: 13, # "building"
51: 14, # "fence"
52: 0, # "other-structure" mapped to "unlabeled" ------------------mapped
60: 9, # "lane-marking" to "road" ---------------------------------mapped
70: 15, # "vegetation"
71: 16, # "trunk"
72: 17, # "terrain"
80: 18, # "pole"
81: 19, # "traffic-sign"
99: 0, # "other-object" to "unlabeled" ----------------------------mapped
252: 1, # "moving-car"
253: 7, # "moving-bicyclist"
254: 6, # "moving-person"
255: 8, # "moving-motorcyclist"
256: 5, # "moving-on-rails" mapped to "moving-other-vehicle" ------mapped
257: 5, # "moving-bus" mapped to "moving-other-vehicle" -----------mapped
258: 4, # "moving-truck"
259: 5, # "moving-other-vehicle"
}
def class_contents(labels_files, lbl_count):
for file_ in labels_files['labels']:
labels = np.fromfile('../' + file_, dtype=np.uint32)
labels = labels.reshape((-1))
labels = labels & 0xFFFF
#remap labels to learning values
labels = np.vectorize(learning_map.get)(labels)
classes, counts = np.unique(labels, return_counts=True)
for class_, count in zip(classes, counts):
lbl_count[class_] += count
return lbl_count
splits = None
with open('percentiles_split.json', 'r') as f:
splits = json.load(f)
for percentile in splits:
print(f'PERCENT: {percentile}')
lbl_count = [ 0 for _ in range(20) ]
for seq in splits[percentile]:
lbl_count = class_contents(splits[percentile][seq], lbl_count)
lbl_count = np.array(lbl_count)
class_dist = lbl_count / np.sum(lbl_count)
for class_ in range(20):
print(f'{class_}: {round(class_dist[class_],5)}')
print(f'\t- CLASS DIST: {class_dist}')
|
91688
|
import MinkowskiEngine as ME
import MinkowskiEngine.MinkowskiFunctional as MEF
import torch
import torch.nn as nn
from src.models.common import conv, conv_tr, get_nonlinearity, get_norm
class BasicBlockBase(nn.Module):
expansion = 1
NORM_TYPE = "BN"
def __init__(
self,
inplanes,
planes,
stride=1,
dilation=1,
downsample=None,
bn_momentum=0.1,
region_type=0,
D=3,
):
super(BasicBlockBase, self).__init__()
self.conv1 = conv(
inplanes,
planes,
kernel_size=3,
stride=stride,
dilation=dilation,
region_type=region_type,
dimension=D,
)
self.norm1 = get_norm(
self.NORM_TYPE, planes, bn_momentum=bn_momentum, dimension=D
)
self.conv2 = conv(
planes,
planes,
kernel_size=3,
stride=1,
dilation=dilation,
region_type=region_type,
dimension=D,
)
self.norm2 = get_norm(
self.NORM_TYPE, planes, bn_momentum=bn_momentum, dimension=D
)
self.downsample = downsample
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.norm1(out)
out = MEF.relu(out)
out = self.conv2(out)
out = self.norm2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = MEF.relu(out)
return out
class BasicBlockBN(BasicBlockBase):
NORM_TYPE = "BN"
class BasicBlockIN(BasicBlockBase):
NORM_TYPE = "IN"
class BasicBlockINBN(BasicBlockBase):
NORM_TYPE = "INBN"
def get_block(
norm_type,
inplanes,
planes,
stride=1,
dilation=1,
downsample=None,
bn_momentum=0.1,
region_type=0,
dimension=3,
):
if norm_type == "BN":
Block = BasicBlockBN
elif norm_type == "IN":
Block = BasicBlockIN
elif norm_type == "INBN":
Block = BasicBlockINBN
elif norm_type == "SE":
Block = SEBlock
else:
raise ValueError(f"Type {norm_type}, not defined")
return Block(
inplanes,
planes,
stride,
dilation,
downsample,
bn_momentum,
region_type,
dimension,
)
def conv_norm_non(
inc,
outc,
kernel_size,
stride,
dimension,
bn_momentum=0.05,
region_type=ME.RegionType.HYPER_CUBE,
norm_type="BN",
nonlinearity="ELU",
):
return nn.Sequential(
conv(
in_channels=inc,
out_channels=outc,
kernel_size=kernel_size,
stride=stride,
dilation=1,
bias=False,
region_type=region_type,
dimension=dimension,
),
get_norm(norm_type, outc, bn_momentum=bn_momentum, dimension=dimension),
get_nonlinearity(nonlinearity),
)
class SEBlock(nn.Module):
expansion = 1
NORM_TYPE = "BN"
def __init__(
self,
inplanes,
planes,
stride=1,
dilation=1,
downsample=None,
bn_momentum=0.1,
region_type=0,
D=3,
):
super(SEBlock, self).__init__()
self.conv1 = conv(
inplanes,
planes,
kernel_size=3,
stride=stride,
dilation=dilation,
region_type=region_type,
dimension=D,
)
self.norm1 = get_norm(
self.NORM_TYPE, planes, bn_momentum=bn_momentum, dimension=D
)
self.squeeze = ME.MinkowskiGlobalSumPooling()
self.fc1 = ME.MinkowskiConvolution(
in_channels=planes,
out_channels=int(planes / 4),
kernel_size=1,
stride=1,
dilation=1,
bias=False,
dimension=D,
)
self.fc2 = ME.MinkowskiConvolution(
in_channels=int(planes / 4),
out_channels=planes,
kernel_size=1,
stride=1,
dilation=1,
bias=False,
dimension=D,
)
self.conv2 = conv(
planes,
planes,
kernel_size=3,
stride=1,
dilation=dilation,
region_type=region_type,
dimension=D,
)
self.norm2 = get_norm(
self.NORM_TYPE, planes, bn_momentum=bn_momentum, dimension=D
)
self.downsample = downsample
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.norm1(out)
out = MEF.relu(out)
se = self.squeeze(out)
se = self.fc1(se)
se = MEF.relu(se)
se = self.fc2(se)
se = MEF.sigmoid(se)
feats = []
batch_size = len(se.decomposed_features)
for i in range(batch_size):
F = out.features_at(i)
scale = se.features_at(i)
feats.append(F * scale)
feats = torch.cat(feats, 0)
out = ME.SparseTensor(
feats,
coordinate_map_key=out.coordinate_map_key,
coordinate_manager=out.coordinate_manager,
)
out = self.conv2(out)
out = self.norm2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = MEF.relu(out)
return out
|
91714
|
from pyradioconfig.calculator_model_framework.interfaces.icalculator import ICalculator
from enum import Enum
from pycalcmodel.core.variable import ModelVariableFormat, CreateModelVariableEnum
class Calc_AoX_Bobcat(ICalculator):
###AoX Calculations###
def buildVariables(self, model):
var = self._addModelVariable(model, 'aox_enable', Enum, ModelVariableFormat.DECIMAL, units='',
desc='Enables AoX settings')
member_data = [
['DISABLED', 0, 'AoX Disabled'],
['ENABLED', 1, 'AoX Enabled'],
]
var.var_enum = CreateModelVariableEnum(
'AoxEnableEnum',
'AoX Enable/Disable Selection',
member_data)
def calc_aox_enable(self, model):
#Disable by default
model.vars.aox_enable.value = model.vars.aox_enable.var_enum.DISABLED
def calc_timeperiod_reg(self, model):
TIMEPERIOD_FRACTIONAL_BITS = 24
xtal_frequency_hz = model.vars.xtal_frequency.value
timeperiod = int(2**TIMEPERIOD_FRACTIONAL_BITS / (xtal_frequency_hz / 1e6))
self._reg_write(model.vars.MODEM_ANTSWCTRL1_TIMEPERIOD, timeperiod)
def calc_aox_misc(self, model):
aox_enable = True if model.vars.aox_enable.value == model.vars.aox_enable.var_enum.ENABLED else False
if aox_enable:
chfswsel = 2 # CHFSWTRIG
disafcsupp = 1 # disable AFC during the CTE
chfswtrig = 1 # clk cycles to trigger after ets_set_mux, must be non-zero
else:
chfswsel = 0
disafcsupp = 0
chfswtrig = 0
self._reg_write(model.vars.MODEM_CHFCTRL_CHFSWSEL, chfswsel)
self._reg_write(model.vars.MODEM_CHFSWCTRL_CHFSWTIME, chfswtrig)
self._reg_write(model.vars.MODEM_AFC_DISAFCCTE, disafcsupp)
|
91717
|
from typing import Union, Optional, Dict
import torch
from falkon import sparse
from falkon.kernels.diff_kernel import DiffKernel
from falkon.la_helpers.square_norm_fn import square_norm_diff
from falkon.options import FalkonOptions
from falkon.sparse import SparseTensor
SQRT3 = 1.7320508075688772
SQRT5 = 2.23606797749979
def validate_sigma(sigma: Union[float, torch.Tensor]) -> torch.Tensor:
if isinstance(sigma, torch.Tensor):
# Sigma is a 1-item tensor ('single')
try:
sigma.item()
return sigma
except ValueError:
pass
# Sigma is a vector ('diag')
if sigma.dim() == 1 or sigma.shape[1] == 1:
return sigma.reshape(-1)
else:
# TODO: Better error
raise ValueError("sigma must be a scalar or a vector.")
else:
try:
return torch.tensor([float(sigma)], dtype=torch.float64)
except TypeError:
raise TypeError("Sigma must be a scalar or a tensor.")
def _sq_dist(mat1, mat2, norm_mat1, norm_mat2, out: Optional[torch.Tensor]) -> torch.Tensor:
if mat1.dim() == 3:
if out is None:
out = torch.baddbmm(norm_mat1, mat1, mat2.transpose(-2, -1), alpha=-2, beta=1) # b*n*m
else:
out = torch.baddbmm(norm_mat1, mat1, mat2.transpose(-2, -1), alpha=-2, beta=1,
out=out) # b*n*m
else:
if out is None:
out = torch.addmm(norm_mat1, mat1, mat2.transpose(-2, -1), alpha=-2, beta=1) # n*m
else:
out = torch.addmm(norm_mat1, mat1, mat2.transpose(-2, -1), alpha=-2, beta=1,
out=out) # n*m
out.add_(norm_mat2.transpose(-2, -1))
out.clamp_min_(1e-20)
return out
def _sparse_sq_dist(X1_csr: SparseTensor, X2_csr: SparseTensor,
X1: SparseTensor, X2: SparseTensor,
out: torch.Tensor) -> torch.Tensor:
sq1 = torch.empty(X1_csr.size(0), dtype=X1_csr.dtype, device=X1_csr.device)
sparse.sparse_square_norm(X1_csr, sq1) # TODO: This must be implemented for CUDA tensors
sq1 = sq1.reshape(-1, 1)
sq2 = torch.empty(X2_csr.size(0), dtype=X2_csr.dtype, device=X2_csr.device)
sparse.sparse_square_norm(X2_csr, sq2)
sq2 = sq2.reshape(-1, 1)
sparse.sparse_matmul(X1, X2, out)
out.mul_(-2.0)
out.add_(sq1.to(device=X1.device))
out.add_(sq2.to(device=X2.device).t())
out.clamp_min_(1e-20)
return out
def rbf_core(mat1, mat2, out: Optional[torch.Tensor], sigma):
"""
Note 1: if out is None, then this function will be differentiable wrt all three remaining inputs.
Note 2: this function can deal with batched inputs
Parameters
----------
sigma
mat1
mat2
out
Returns
-------
"""
mat1_div_sig = mat1 / sigma
mat2_div_sig = mat2 / sigma
norm_sq_mat1 = square_norm_diff(mat1_div_sig, -1, True) # b*n*1 or n*1
norm_sq_mat2 = square_norm_diff(mat2_div_sig, -1, True) # b*m*1 or m*1
out = _sq_dist(mat1_div_sig, mat2_div_sig, norm_sq_mat1, norm_sq_mat2, out)
out.mul_(-0.5)
out.exp_()
return out
def rbf_core_sparse(mat1: SparseTensor, mat2: SparseTensor,
mat1_csr: SparseTensor, mat2_csr: SparseTensor,
out: torch.Tensor, sigma) -> torch.Tensor:
gamma = 0.5 / (sigma ** 2)
out = _sparse_sq_dist(X1_csr=mat1_csr, X2_csr=mat2_csr, X1=mat1, X2=mat2, out=out)
out.mul_(-gamma)
out.exp_()
return out
def laplacian_core(mat1, mat2, out: Optional[torch.Tensor], sigma):
mat1_div_sig = mat1 / sigma
mat2_div_sig = mat2 / sigma
norm_sq_mat1 = square_norm_diff(mat1_div_sig, -1, True) # b*n*1
norm_sq_mat2 = square_norm_diff(mat2_div_sig, -1, True) # b*m*1
orig_out = out
out = _sq_dist(mat1_div_sig, mat2_div_sig, norm_sq_mat1, norm_sq_mat2, out)
out.sqrt_() # Laplacian: sqrt of squared-difference
# The gradient calculation needs the output of sqrt_
if orig_out is None: # TODO: We could be more explicit in the parameters about whether the gradient is or isn't needed
out = out.neg()
else:
out.neg_()
out.exp_()
return out
def laplacian_core_sparse(mat1: SparseTensor, mat2: SparseTensor,
mat1_csr: SparseTensor, mat2_csr: SparseTensor,
out: torch.Tensor, sigma) -> torch.Tensor:
gamma = 1 / sigma
out = _sparse_sq_dist(X1_csr=mat1_csr, X2_csr=mat2_csr, X1=mat1, X2=mat2, out=out)
out.sqrt_()
out.mul_(-gamma)
out.exp_()
return out
def matern_core(mat1, mat2, out: Optional[torch.Tensor], sigma, nu):
if nu == 0.5:
return laplacian_core(mat1, mat2, out, sigma)
elif nu == float('inf'):
return rbf_core(mat1, mat2, out, sigma)
orig_out = out
mat1_div_sig = mat1 / sigma
mat2_div_sig = mat2 / sigma
norm_sq_mat1 = square_norm_diff(mat1_div_sig, -1, True) # b*n*1
norm_sq_mat2 = square_norm_diff(mat2_div_sig, -1, True) # b*m*1
out = _sq_dist(mat1_div_sig, mat2_div_sig, norm_sq_mat1, norm_sq_mat2, out)
if nu == 1.5:
# (1 + sqrt(3)*D) * exp(-sqrt(3)*D))
out.sqrt_()
if orig_out is None: # TODO: We could be more explicit in the parameters about whether the gradient is or isn't needed
out = out.mul(SQRT3)
else:
out.mul_(SQRT3)
out_neg = torch.neg(out) # extra n*m block
out_neg.exp_()
out.add_(1.0).mul_(out_neg)
elif nu == 2.5:
# (1 + sqrt(5)*D + (sqrt(5)*D)^2 / 3 ) * exp(-sqrt(5)*D)
out_sqrt = torch.sqrt(out)
if orig_out is None: # TODO: We could be more explicit in the parameters about whether the gradient is or isn't needed
out_sqrt = out_sqrt.mul(SQRT5)
else:
out_sqrt.mul_(SQRT5)
out.mul_(5.0 / 3.0).add_(out_sqrt).add_(1.0)
out_sqrt.neg_().exp_()
out.mul_(out_sqrt)
return out
def matern_core_sparse(mat1: SparseTensor, mat2: SparseTensor,
mat1_csr: SparseTensor, mat2_csr: SparseTensor,
out: torch.Tensor, sigma, nu) -> torch.Tensor:
if nu == 0.5:
return laplacian_core_sparse(mat1, mat2, mat1_csr, mat2_csr, out, sigma)
elif nu == float('inf'):
return rbf_core_sparse(mat1, mat2, mat1_csr, mat2_csr, out, sigma)
gamma = 1 / (sigma ** 2)
out = _sparse_sq_dist(X1_csr=mat1_csr, X2_csr=mat2_csr, X1=mat1, X2=mat2, out=out)
out.mul_(gamma)
# For certain nu = 1.5, 2.5 we will need an extra n*m block
if nu == 1.5:
# (1 + sqrt(3)*D) * exp(-sqrt(3)*D))
out.sqrt_()
out.mul_(SQRT3)
out_neg = torch.neg(out)
out_neg.exp_()
out.add_(1.0).mul_(out_neg)
elif nu == 2.5:
# (1 + sqrt(5)*D + (sqrt(5)*D)^2 / 3 ) * exp(-sqrt(5)*D)
out_sqrt = torch.sqrt(out)
out_sqrt.mul_(SQRT5)
out.mul_(5.0 / 3.0).add_(out_sqrt).add_(1.0)
out_sqrt.neg_().exp_()
out.mul_(out_sqrt)
return out
class GaussianKernel(DiffKernel):
r"""Class for computing the Gaussian kernel and related kernel-vector products
The Gaussian kernel is one of the most common and effective kernel embeddings
since it is infinite dimensional, and governed by a single parameter. The kernel length-scale
determines the width of the Gaussian distribution which is placed on top of each point.
A larger sigma corresponds to a wide Gaussian, so that the relative influence of far away
points will be high for computing the kernel at a given datum.
On the opposite side of the spectrum, a small sigma means that only nearby points will
influence the kernel.
Parameters
-----------
sigma
The length-scale of the kernel.
This can be a scalar, and then it corresponds to the standard deviation
of the Gaussian distribution from which the kernel is derived.
If `sigma` is a vector of size `d` (where `d` is the dimensionality of the data), it is
interpreted as the diagonal standard deviation of the Gaussian distribution.
It can also be a matrix of size `d*d` where `d`, in which case sigma will be the precision
matrix (inverse covariance).
opt
Additional options to be forwarded to the matrix-vector multiplication
routines.
Examples
--------
Creating a Gaussian kernel with a single length-scale. Operations on this kernel will not
use KeOps.
>>> K = GaussianKernel(sigma=3.0, opt=FalkonOptions(keops_active="no"))
Creating a Gaussian kernel with a different length-scale per dimension
>>> K = GaussianKernel(sigma=torch.tensor([1.0, 3.5, 7.0]))
Creating a Gaussian kernel object with full covariance matrix (randomly chosen)
>>> mat = torch.randn(3, 3, dtype=torch.float64)
>>> sym_mat = mat @ mat.T
>>> K = GaussianKernel(sigma=sym_mat)
>>> K
GaussianKernel(sigma=tensor([[ 2.0909, 0.0253, -0.2490],
[ 0.0253, 0.3399, -0.5158],
[-0.2490, -0.5158, 4.4922]], dtype=torch.float64)) #random
Notes
-----
The Gaussian kernel with a single length-scale follows
.. math::
k(x, x') = \exp{-\dfrac{\lVert x - x' \rVert^2}{2\sigma^2}}
When the length-scales are specified as a matrix, the RBF kernel is determined by
.. math::
k(x, x') = \exp{-\dfrac{1}{2}x\Sigma x'}
In both cases, the actual computation follows a different path, working on the expanded
norm.
"""
kernel_name = "gaussian"
core_fn = rbf_core
def __init__(self, sigma: Union[float, torch.Tensor], opt: Optional[FalkonOptions] = None):
self.sigma = validate_sigma(sigma)
super().__init__(self.kernel_name, opt, core_fn=GaussianKernel.core_fn, sigma=self.sigma)
def _keops_mmv_impl(self, X1, X2, v, kernel, out, opt: FalkonOptions):
formula = 'Exp(SqDist(x1 / g, x2 / g) * IntInv(-2)) * v'
aliases = [
'x1 = Vi(%d)' % (X1.shape[1]),
'x2 = Vj(%d)' % (X2.shape[1]),
'v = Vj(%d)' % (v.shape[1]),
'g = Pm(%d)' % (self.sigma.shape[0])
]
other_vars = [self.sigma.to(device=X1.device, dtype=X1.dtype)]
return self.keops_mmv(X1, X2, v, out, formula, aliases, other_vars, opt)
def extra_mem(self) -> Dict[str, float]:
return {
# Data-matrix / sigma in prepare + Data-matrix / sigma in apply
'nd': 2,
'md': 1,
# Norm results in prepare
'm': 1,
'n': 1,
}
def detach(self) -> 'GaussianKernel':
detached_params = self._detach_params()
return GaussianKernel(detached_params["sigma"], opt=self.params)
# noinspection PyMethodOverriding
def compute_sparse(self, X1: SparseTensor, X2: SparseTensor, out: torch.Tensor,
X1_csr: SparseTensor, X2_csr: SparseTensor) -> torch.Tensor:
if len(self.sigma) > 1:
raise NotImplementedError("Sparse kernel is only implemented for scalar sigmas.")
dev_kernel_tensor_params = self._move_kernel_params(X1)
return rbf_core_sparse(X1, X2, X1_csr, X2_csr, out, dev_kernel_tensor_params["sigma"])
def __repr__(self):
return f"GaussianKernel(sigma={self.sigma})"
def __str__(self):
return f"Gaussian kernel<{self.sigma}>"
class LaplacianKernel(DiffKernel):
r"""Class for computing the Laplacian kernel, and related kernel-vector products.
The Laplacian kernel is similar to the Gaussian kernel, but less sensitive to changes
in the parameter `sigma`.
Parameters
----------
sigma
The length-scale of the Laplacian kernel
Notes
-----
The Laplacian kernel is determined by the following formula
.. math::
k(x, x') = \exp{-\frac{\lVert x - x' \rVert}{\sigma}}
"""
kernel_name = "laplacian"
def __init__(self, sigma: Union[float, torch.Tensor], opt: Optional[FalkonOptions] = None):
self.sigma = validate_sigma(sigma)
super().__init__(self.kernel_name, opt, core_fn=laplacian_core, sigma=self.sigma)
def _keops_mmv_impl(self, X1, X2, v, kernel, out, opt: FalkonOptions):
formula = 'Exp(-Sqrt(SqDist(x1 / g, x2 / g))) * v'
aliases = [
'x1 = Vi(%d)' % (X1.shape[1]),
'x2 = Vj(%d)' % (X2.shape[1]),
'v = Vj(%d)' % (v.shape[1]),
'g = Pm(%d)' % (self.sigma.shape[0])
]
other_vars = [self.sigma.to(device=X1.device, dtype=X1.dtype)]
return self.keops_mmv(X1, X2, v, out, formula, aliases, other_vars, opt)
def extra_mem(self) -> Dict[str, float]:
return {
# Data-matrix / sigma in prepare + Data-matrix / sigma in apply
'nd': 2,
'md': 1,
# Norm results in prepare
'm': 1,
'n': 1,
}
def detach(self) -> 'LaplacianKernel':
detached_params = self._detach_params()
return LaplacianKernel(detached_params["sigma"], opt=self.params)
# noinspection PyMethodOverriding
def compute_sparse(self, X1: SparseTensor, X2: SparseTensor, out: torch.Tensor,
X1_csr: SparseTensor, X2_csr: SparseTensor) -> torch.Tensor:
if len(self.sigma) > 1:
raise NotImplementedError("Sparse kernel is only implemented for scalar sigmas.")
dev_kernel_tensor_params = self._move_kernel_params(X1)
return laplacian_core_sparse(X1, X2, X1_csr, X2_csr, out, dev_kernel_tensor_params["sigma"])
def __repr__(self):
return f"LaplacianKernel(sigma={self.sigma})"
def __str__(self):
return f"Laplaciankernel<{self.sigma}>"
class MaternKernel(DiffKernel):
r"""Class for computing the Matern kernel, and related kernel-vector products.
The Matern kernels define a generic class of kernel functions which includes the
Laplacian and Gaussian kernels. The class is parametrized by 'nu'. When `nu = 0.5`
this kernel is equivalent to the Laplacian kernel, when `nu = float('inf')`, the
Matern kernel is equivalent to the Gaussian kernel.
This class implements the Matern kernel only for the values of nu which have a closed
form solution, which are 0.5, 1.5, 2.5, and infinity.
Parameters
----------
sigma
The length-scale of the Matern kernel. The length-scale can be either a scalar
or a vector. Matrix-valued length-scales are not allowed for the Matern kernel.
nu
The parameter of the Matern kernel. It should be one of `0.5`, `1.5`, `2.5` or
`inf`.
Notes
-----
While for `nu = float('inf')` this kernel is equivalent to the :class:`GaussianKernel`,
the implementation is more general and using the :class:`GaussianKernel` directly
may be computationally more efficient.
"""
_valid_nu_values = frozenset({0.5, 1.5, 2.5, float('inf')})
def __init__(self,
sigma: Union[float, torch.Tensor],
nu: Union[float, torch.Tensor],
opt: Optional[FalkonOptions] = None):
self.sigma = validate_sigma(sigma)
self.nu = self.validate_nu(nu)
self.kernel_name = f"{self.nu:.1f}-matern"
super().__init__(self.kernel_name, opt, core_fn=matern_core, sigma=self.sigma, nu=self.nu)
def _keops_mmv_impl(self, X1, X2, v, kernel, out, opt: FalkonOptions):
if self.nu == 0.5:
formula = 'Exp(-Norm2(x1 / s - x2 / s)) * v'
elif self.nu == 1.5:
formula = ('(IntCst(1) + Sqrt(IntCst(3)) * Norm2(x1 / s - x2 / s)) * '
'(Exp(-Sqrt(IntCst(3)) * Norm2(x1 / s - x2 / s)) * v)')
elif self.nu == 2.5:
formula = ('(IntCst(1) + Sqrt(IntCst(5)) * Norm2(x1 / s - x2 / s) + '
'(IntInv(3) * IntCst(5)) * SqNorm2(x1 / s - x2 / s)) * '
'(Exp(-Sqrt(IntCst(5)) * Norm2(x1 / s - x2 / s)) * v)')
elif self.nu == float('inf'):
formula = 'Exp(IntInv(-2) * SqDist(x1 / s, x2 / s)) * v'
else:
raise RuntimeError(f"Unrecognized value of nu ({self.nu}). "
f"The onnly allowed values are 0.5, 1.5, 2.5, inf.")
aliases = [
'x1 = Vi(%d)' % (X1.shape[1]),
'x2 = Vj(%d)' % (X2.shape[1]),
'v = Vj(%d)' % (v.shape[1]),
's = Pm(%d)' % (self.sigma.shape[0])
]
other_vars = [self.sigma.to(device=X1.device, dtype=X1.dtype)]
return self.keops_mmv(X1, X2, v, out, formula, aliases, other_vars, opt)
def extra_mem(self) -> Dict[str, float]:
extra_mem = {
# Data-matrix / sigma
'nd': 1,
'md': 1,
# Norm results in prepare
'm': 1,
'n': 1,
}
if self.nu in {1.5, 2.5}:
# Extra kernel block in transform
extra_mem['nm'] = 1
return extra_mem
def detach(self) -> 'MaternKernel':
detached_params = self._detach_params()
return MaternKernel(detached_params["sigma"], detached_params["nu"], opt=self.params)
@staticmethod
def validate_nu(nu: Union[torch.Tensor, float]) -> float:
if isinstance(nu, torch.Tensor):
if nu.requires_grad:
raise ValueError("The nu parameter of the Matern kernel is not differentiable, "
"and must not require gradients.")
try:
out_nu = round(nu.item(), ndigits=2)
except ValueError:
raise ValueError("nu=%s is not convertible to a scalar." % (nu))
elif isinstance(nu, float):
out_nu = round(nu, ndigits=2)
else:
raise TypeError(f"nu must be a float or a tensor, not a {type(nu)}")
if out_nu not in MaternKernel._valid_nu_values:
raise ValueError(f"The given value of nu = {out_nu} can only take "
f"values {MaternKernel._valid_nu_values}.")
return out_nu
# noinspection PyMethodOverriding
def compute_sparse(self, X1: SparseTensor, X2: SparseTensor, out: torch.Tensor,
X1_csr: SparseTensor, X2_csr: SparseTensor) -> torch.Tensor:
if len(self.sigma) > 1:
raise NotImplementedError("Sparse kernel is only implemented for scalar sigmas.")
dev_kernel_tensor_params = self._move_kernel_params(X1)
return matern_core_sparse(X1, X2, X1_csr, X2_csr, out, dev_kernel_tensor_params["sigma"],
self.nu)
def __repr__(self):
return f"MaternKernel(sigma={self.sigma}, nu={self.nu:.1f})"
def __str__(self):
return f"Matern kernel<{self.sigma}, {self.nu:.1f}>"
|
91755
|
import factory
from user_accounts import models
from .organization_factory import FakeOrganizationFactory
from .user_factory import UserFactory, user_with_name_and_email_from_org_slug
class UserProfileFactory(factory.DjangoModelFactory):
name = factory.Sequence(lambda n: 'Fake User {}'.format(n))
user = factory.SubFactory(UserFactory)
organization = factory.SubFactory(FakeOrganizationFactory)
should_get_notifications = True
class Meta:
model = models.UserProfile
def fake_app_reviewer(**kwargs):
user = UserFactory(group_names=['application_reviewers'], **kwargs)
return UserProfileFactory(user=user)
def profile_for_org_and_group_names(
org, group_names=None, should_get_notifications=True, **user_kwargs):
"""Creates a user and user profile based on the org slug
For example, if org.slug is 'yolo_pubdef':
user.username == 'yolo_pubdef'
user.first_name == 'Fake'
user.last_name == '<NAME>'
profile.name == '<NAME>'
email = <EMAIL>
if a list of strings are passed through group_names, each corresponding
group will be added.
"""
if not group_names:
group_names = []
user = user_with_name_and_email_from_org_slug(
org.slug, group_names=group_names, **user_kwargs)
return UserProfileFactory(
user=user, organization=org,
name=' '.join([user.first_name, user.last_name]),
should_get_notifications=should_get_notifications)
def profile_for_slug_in_groups(org_slug, group_names=None, **kwargs):
return profile_for_org_and_group_names(
models.Organization.objects.get(slug=org_slug),
group_names=group_names, **kwargs)
def app_reviewer(org_slug=None, **kwargs):
if org_slug:
org = models.Organization.objects.get(slug=org_slug)
else:
org = FakeOrganizationFactory()
return profile_for_org_and_group_names(
org, ['application_reviewers'], **kwargs)
def followup_user(**kwargs):
return profile_for_slug_in_groups(
'cfa', group_names=['followup_staff'], is_staff=True, **kwargs)
def monitor_user(**kwargs):
return profile_for_slug_in_groups(
'cfa', group_names=['performance_monitors'], **kwargs)
|
91759
|
from __future__ import absolute_import, division, print_function
import cmath
import math
from six.moves import zip
class least_squares:
def __init__(self, obs, calc):
self.obs = obs
self.calc = calc
a, b = self.calc.real, self.calc.imag
self.abs_calc = math.sqrt(a**2 + b**2)
self.delta = self.obs - self.abs_calc
def f(self):
"Mathematica: f=(obs-Sqrt[a^2+b^2])^2"
return self.delta**2
def da(self):
"Mathematica: D[f,a]"
if (self.abs_calc == 0): return 0
return -2 * self.delta * self.calc.real / self.abs_calc
def db(self):
"Mathematica: D[f,b]"
if (self.abs_calc == 0): return 0
return -2 * self.delta * self.calc.imag / self.abs_calc
def daa(self):
"Mathematica: FortranForm[FullSimplify[D[f,a,a]]]"
ac = self.abs_calc
if (ac == 0):
if (self.obs == 0): return 2
return -1.e160
return 2 - (2*self.calc.imag**2*self.obs)/ac/ac/ac
def dbb(self):
"Mathematica: FortranForm[FullSimplify[D[f,b,b]]]"
ac = self.abs_calc
if (ac == 0):
if (self.obs == 0): return 2
return -1.e160
return 2 - (2*self.calc.real**2*self.obs)/ac/ac/ac
def dab(self):
"Mathematica: FortranForm[FullSimplify[D[f,a,b]]]"
ac = self.abs_calc
if (ac == 0):
if (self.obs == 0): return 0
return 1.e160
return (2*self.calc.real*self.calc.imag*self.obs)/ac/ac/ac
class exp_i_alpha_sum:
def __init__(self, alphas):
self.alphas = alphas
def f(self):
"Mathematica: f=Exp[I alpha]"
result = 0
for alpha in self.alphas:
result += cmath.exp(1j*alpha)
return result
def d_alphas(self):
"Mathematica: D[f,alpha]"
return [1j*cmath.exp(1j*alpha) for alpha in self.alphas]
def d2_alphas(self):
"Mathematica: D[f,alpha,alpha]"
return [-cmath.exp(1j*alpha) for alpha in self.alphas]
def d_target_d_alphas(self, target):
"Rule for derivatives of sum of roots of unity."
da, db = target.da(), target.db()
return [da * d.real + db * d.imag for d in self.d_alphas()]
def d2_target_d_alphas(self, target):
"Product rule applied to da * d.real + db * d.imag."
result = []
da, db = target.da(), target.db()
daa, dbb, dab = target.daa(), target.dbb(), target.dab()
d = self.d_alphas()
d2 = self.d2_alphas()
for di,d2i in zip(d, d2):
row = []
for dj in d:
sum = daa * di.real * dj.real \
+ dbb * di.imag * dj.imag \
+ dab * (di.real * dj.imag + di.imag * dj.real)
if (di is dj):
sum += da * d2i.real + db * d2i.imag
row.append(sum)
result.append(row)
return result
|
91777
|
import os
import requests
import tarfile
import urllib.request
import zipfile
from tqdm import tqdm
def maybe_download_from_url(url, download_dir):
"""
Download the data from url, unless it's already here.
Args:
download_dir: string, path to download directory
url: url to download from
Returns:
Path to the downloaded file
"""
filename = url.split('/')[-1]
filepath = os.path.join(download_dir, filename)
os.makedirs(download_dir, exist_ok=True)
if not os.path.isfile(filepath):
print('Downloading: "{}"'.format(filepath))
urllib.request.urlretrieve(url, filepath)
size = os.path.getsize(filepath)
print('Download complete ({} bytes)'.format(size))
else:
print('File already exists: "{}"'.format(filepath))
return filepath
def maybe_download_from_google_drive(id, filepath):
def get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
def save_response_content(response, filepath, chunk_size=32 * 1024):
total_size = int(response.headers.get('content-length', 0))
with open(filepath, "wb") as f:
for chunk in tqdm(response.iter_content(chunk_size), total=total_size,
unit='B', unit_scale=True, desc=filepath):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
if not os.path.isfile(filepath):
print('Downloading: "{}"'.format(filepath))
URL = "https://docs.google.com/uc?export=download"
session = requests.Session()
response = session.get(URL, params={'id': id}, stream=True)
token = get_confirm_token(response)
if token:
params = {'id': id, 'confirm': token}
response = session.get(URL, params=params, stream=True)
save_response_content(response, filepath)
size = os.path.getsize(filepath)
print('Download complete ({} bytes)'.format(size))
else:
print('File already exists: "{}"'.format(filepath))
return filepath
def maybe_extract(compressed_filepath, train_dir, test_dir):
def is_image(filepath):
extensions = ('.jpg', '.jpeg', '.png', '.gif')
return any(filepath.endswith(ext) for ext in extensions)
os.makedirs(train_dir, exist_ok=True)
os.makedirs(test_dir, exist_ok=True)
print('Extracting: "{}"'.format(compressed_filepath))
if zipfile.is_zipfile(compressed_filepath):
with zipfile.ZipFile(compressed_filepath) as zf:
files = [member for member in zf.infolist() if is_image(member.filename)]
count = len(files)
train_test_boundary = int(count * 0.99)
for i in range(count):
if i < train_test_boundary:
extract_dir = train_dir
else:
extract_dir = test_dir
if not os.path.exists(os.path.join(extract_dir, files[i].filename)):
zf.extract(files[i], extract_dir)
elif tarfile.is_tarfile(compressed_filepath):
with tarfile.open(compressed_filepath) as tar:
files = [member for member in tar if is_image(member.name)]
count = len(files)
train_test_boundary = int(count * 0.99)
for i in range(count):
if i < train_test_boundary:
extract_dir = train_dir
else:
extract_dir = test_dir
if not os.path.exists(os.path.join(extract_dir, files[i].name)):
tar.extract(files[i], extract_dir)
else:
raise NotImplemented
print('Extraction complete')
|
91791
|
import sys
import unittest
from packaging import version
from unittest import mock
import tensorflow as tf
import larq_zoo as lqz
from tensorflow.python.eager import context
sys.modules["importlib.metadata"] = mock.MagicMock()
sys.modules["importlib_metadata"] = mock.MagicMock()
sys.modules["larq_compute_engine.mlir._tf_tfl_flatbuffer"] = mock.MagicMock()
sys.modules[
"larq_compute_engine.tflite.python.interpreter_wrapper_lite"
] = mock.MagicMock()
sys.modules["larq_compute_engine.mlir.python.tflite_schema"] = mock.MagicMock()
from larq_compute_engine.mlir.python.converter import convert_keras_model
from larq_compute_engine.mlir._tf_tfl_flatbuffer import (
convert_graphdef_to_tflite_flatbuffer as mocked_graphdef_converter,
convert_saved_model_to_tflite_flatbuffer as mocked_saved_model_converter,
)
class TestConverter(unittest.TestCase):
def test_larq_zoo_models(self):
with context.eager_mode():
model = lqz.sota.QuickNet(weights=None)
convert_keras_model(model)
if version.parse(tf.__version__) < version.parse("2.2"):
mocked_graphdef_converter.assert_called_once_with(
mock.ANY,
["input_1"],
["DT_FLOAT"],
[[1, 224, 224, 3]],
["Identity"],
False,
"arm",
None,
False,
)
else:
mocked_saved_model_converter.assert_called_once_with(
mock.ANY, ["serve"], ["serving_default"], 1, "arm", None, False
)
def test_wrong_arg(self):
with self.assertRaises(ValueError):
convert_keras_model("./model.h5")
def test_target_arg(self):
with context.eager_mode():
model = lqz.sota.QuickNet(weights=None)
# These should work
convert_keras_model(model, target="arm")
convert_keras_model(model, target="xcore")
# Anything else shouldn't
with self.assertRaises(
ValueError, msg='Expected `target` to be "arm" or "xcore"'
):
convert_keras_model(model, target="x86")
if __name__ == "__main__":
unittest.main()
|
91824
|
import CTL.funcs.xplib as xplib
def setXP(newXP):
'''
Set the *py(e.g. numpy, cupy) library for CTL
newXP : object, default numpy
The numpy-like library for numeric functions.
'''
xplib.xp = newXP
|
91830
|
import numpy as np
import random
from scipy.misc import imresize
from PIL import Image
import math
# from torchvision import transforms
import torch
class MultiRescale(object):
"""MultiScale the input image in a sample by given scales.
Args:
scales_list (tuple or int): Desired output scale list.
"""
def __init__(self, output_size):
assert isinstance(output_size, (int, tuple))
if isinstance(output_size, int):
self.output_size = (output_size, output_size)
else:
assert len(output_size) == 2
self.output_size = output_size
def __call__(self, sample):
# height, width = sample['size'][0], sample['size'][1]
rescale = lambda x: Image.fromarray(imresize(x, self.output_size))
# if (height%8 is not 0) or (width%8 is not 0):
for k, v in sample.items():
if k is not 'name' and k is not 'size':
sample[k] = rescale(v)
return sample
class RandomCrop(object):
"""Crop the images randomly in a sample.
Args:
output_size (tuple or int): Desired output size. If int, square crop is made.
"""
random_crop_list = [320, 480, 640]
def __init__(self, output_size):
assert isinstance(output_size, (int, tuple))
if isinstance(output_size, int):
self.output_size = (output_size, output_size)
else:
assert len(output_size) == 2
self.output_size = output_size
def __call__(self, sample):
random_crop_size = random.choice(self.random_crop_list)
trimap = sample['trimap']
trimap_ = np.asarray(trimap)
if (min(trimap_.shape) < random_crop_size):
h_start = w_start = 0
h_end = w_end = min(trimap_.shape)
else:
h_start, h_end, w_start, w_end = validUnknownRegion(trimap_, random_crop_size)
crop = lambda x: Image.fromarray(imresize(np.asarray(x)[h_start:h_end, w_start:w_end], self.output_size))
cropped_sample = {}
cropped_sample['name'] = sample['name']
for k, v in sample.items():
if k is not 'name':
cropped_sample[k] = crop(v)
return cropped_sample
class MultiToTensor(object):
def __call__(self, sample):
mean_ = (114., 121., 134.,)
def trans(x):
if x.mode == 'RGB':
x = torch.from_numpy(
np.transpose(np.asarray(x)-mean_, (2,0,1)) / 255.
).type(torch.FloatTensor)
else:
x = torch.from_numpy(
np.expand_dims(np.asarray(x), axis=0) / 255.
).type(torch.FloatTensor)
return x
sample_ = {}
sample_['name'] = sample['name']
sample_['size'] = sample['size']
for k, v in sample.items():
if k is not 'name' and k is not 'size':
sample_[k] = trans(v)
return sample_
def generate_gradient_map(grad, area=3):
## Generate gradient map based on computed gradient.
# This function is used to count the gradient pixels passed a certain small area.
# Parameters:
# grad: a gradient matrix
# area: small area to average
# Output:
# grad_map
num_pixel = int(area / 2)
col_ = grad.shape[1]
row_ = grad.shape[0] + 2*num_pixel
new_row = np.zeros([num_pixel, col_], dtype=np.float32)
new_col = np.zeros([row_, num_pixel], dtype=np.float32)
result = np.zeros_like(grad)
_tmp = np.r_[new_row, grad, new_row]
_tmp = np.c_[new_col, _tmp, new_col]
for i in range(grad.shape[0]):
for j in range(grad.shape[1]):
area_count = _tmp[i][j] + _tmp[i][j+1] + _tmp[i][j+2] +\
_tmp[i+1][j] + _tmp[i+1][j+1] + _tmp[i+1][j+2] +\
_tmp[i+2][j] + _tmp[i+2][j+1] + _tmp[i+2][j+2]
result[i][j] = area_count / (area **2)
return result
def getFileList(base, sub):
"""
Get file list of a directory:
Param:
base: base directory
sub: sub-directory name
Return:
a list of image file name
"""
path = os.path.join(base, sub)
files = os.listdir(path)
fileList = []
for f in files:
if (os.path.isfile(path + '/' + f)):
path_ = './' + sub
path_ = os.path.join(path_, f)
# add image file into list
fileList.append(path_)
return fileList
def candidateUnknownRegion(img):
'''
Propose a condidate of unknown region center randomly within the unknown area of img.
param:
img: trimap image
return:
an index for unknown region
'''
index = np.where(img == 128)
idx = random.choice([j for j in range(len(index[0]))])
return np.array(index)[:, idx]
def validUnknownRegion(img, output_size):
"""
Check wether the candidate unknown region is valid and return the index.
param:
img: trimap image
output_size: the desired output image size
return:
output the crop start and end index along h and w respectively.
"""
h_start = h_end = w_start = w_end = 0
cand = candidateUnknownRegion(img)
shape_ = img.shape
if (output_size == 320):
h_start = max(cand[0]-159, 0)
w_start = max(cand[1]-159, 0)
if (h_start+320 > shape_[0]):
h_start = shape_[0] - 320
if (w_start+320 > shape_[1]):
w_start = shape_[1] - 320
h_end = h_start + 320
w_end = w_start + 320
return h_start, h_end, w_start, w_end
elif (output_size == 480):
h_start = max(cand[0]-239, 0)
w_start = max(cand[1]-239, 0)
if (h_start+480 > shape_[0]):
h_start = shape_[0] - 480
if (w_start+480 > shape_[1]):
w_start = shape_[1] - 480
h_end = h_start + 480
w_end = w_start + 480
elif (output_size == 640):
h_start = max(cand[0]-319, 0)
w_start = max(cand[1]-319, 0)
if (h_start+640 > shape_[0]):
h_start = shape_[0] - 640
if (w_start+640 > shape_[1]):
w_start = shape_[1] - 640
h_end = h_start + 640
w_end = w_start + 640
return h_start, h_end, w_start, w_end
|
91834
|
from torch import nn
from rcnn.modeling import registry
from rcnn.core.config import cfg
@registry.MASKIOU_OUTPUTS.register("linear_output")
class MaskIoU_output(nn.Module):
def __init__(self, dim_in):
super(MaskIoU_output, self).__init__()
num_classes = cfg.MODEL.NUM_CLASSES
self.maskiou = nn.Linear(dim_in, num_classes)
nn.init.normal_(self.maskiou.weight, mean=0, std=0.01)
nn.init.constant_(self.maskiou.bias, 0)
def forward(self, x):
maskiou = self.maskiou(x)
return maskiou
|
91840
|
import torch
from tqdm import tqdm
import os
import numpy as np
train_features=torch.load('train_features.pt')
n = len(list(np.load('/facebook/data/images/train_imlist.npy')))
print(n)
os.makedirs('/siim/sim_pt_256', exist_ok=True)
for i in tqdm(range(n)):
a=torch.mm(train_features[i:i+1],train_features.t())
torch.save(torch.tensor(np.argpartition(np.array(a),-256)[0][-256:]),os.path.join('/siim/sim_pt_256',f'{i}_sim256.pt'))
for i in tqdm(range(n)):
a=torch.mm(train_features[i:i+1],train_features.t())
torch.save(torch.tensor(np.argpartition(np.array(a),-512)[0][-512:]),os.path.join('/siim/sim_pt',f'{i}_sim512.pt'))
os.makedirs('/storage1/sim_pt',exist_ok=True)
if n < 65746:
for i in tqdm(range(n)):
a=torch.mm(train_features[i:i+1],train_features.t())
torch.save(torch.argsort(a,descending=True)[0][:300],os.path.join('/storage1/sim_pt', f'{i}_sim2000.pt'))
else:
for i in tqdm(range(65746)):
a=torch.mm(train_features[i:i+1],train_features.t())
torch.save(torch.argsort(a,descending=True)[0][:300],os.path.join('/storage1/sim_pt', f'{i}_sim2000.pt'))
for i in tqdm(range(65746,1000000)):
a=torch.mm(train_features[i:i+1],train_features.t())
torch.save(torch.tensor(np.argpartition(np.array(a),-24)[0][-24:]), os.path.join('/storage1/sim_pt',f'{i}_sim2000.pt'))
|
91848
|
def kb_ids2known_facts(kb_ids):
"""
:param kb_ids: a knowledge base of facts that are already mapped to ids
:return: a set of all known facts (used later for negative sampling)
"""
facts = set()
for struct in kb_ids:
arrays = kb_ids[struct][0]
num_facts = len(arrays[0])
for i in range(num_facts):
fact = [x[i] for x in arrays]
facts.add(tuple(fact))
return facts
|
91952
|
from secret_sdk.core import Coins
from secret_sdk.client.localsecret import LocalSecret, main_net_chain_id
api = LocalSecret(chain_id=main_net_chain_id)
fee = api.tx.estimate_fee(
gas=250_000
)
print(fee)
fee = api.tx.estimate_fee(
gas=200_000,
gas_prices=Coins.from_data([{"amount": 0.25, "denom": "uscrt"}])
)
print(fee)
fee = api.tx.estimate_fee(
gas=200_000,
gas_prices=Coins.from_data([{"amount": 0.25, "denom": "uscrt"}]),
gas_adjustment=1.2,
fee_denoms=["uscrt"]
)
print(fee)
fee = api.tx.estimate_fee(
gas=200_000,
gas_prices=Coins.from_data([{"amount": 0.25, "denom": "uscrt"}]),
fee_denoms=["ukrw"]
)
print(fee)
|
92009
|
from fairing.backend.kubeflow import KubeflowBackend
from fairing.utils import get_image_full
class BasicArchitecture():
def add_jobs(self, svc, count, repository, image_name, image_tag, volumes, volume_mounts):
full_image_name = get_image_full(repository, image_name, image_tag)
tfjobs = []
for ix in range(count):
tfjobs.append({
"name": "{}-{}-{}".format(image_name, image_tag, ix),
"replicaSpecs": [{
"replicaType": "MASTER",
"replicas": 1,
"containers": [
{
"image": full_image_name,
"volumeMounts": volume_mounts
}
],
"volumes": volumes
}]
})
svc["tfJobs"] = tfjobs
return svc
def get_associated_backend(self):
return KubeflowBackend()
|
92011
|
from collections import Counter
from collections import defaultdict
# [3] https://leetcode.com/problems/longest-substring-without-repeating-characters/
# Given a string, find the length of the longest substring without repeating characters.
#
# variation with no pattern
def lengthOfLongestSubstring(s):
# create a default dict to maintain state
counter = defaultdict(int)
count, start, end, res = 0, 0, 0, 0
while end < len(s):
counter[s[end]] += 1
if counter[s[end]] > 1:
count += 1
end += 1
while count > 0:
counter[s[start]] -= 1
if counter[s[start]] > 0:
count -= 1
start += 1
res = max(res, end - start)
return res
# [76] https://leetcode.com/problems/minimum-window-substring/
# Given a string S and a string T, find the minimum window in S which will contain all the characters in T
#
# variation with finding minimum
def minWindow(s: str, t: str) -> str:
counter = Counter(t)
count, start, end, res = len(t), 0, 0, [float('inf'), 0]
while end < len(s):
counter[s[end]] -= 1
# consider duplicate char in t
if counter[s[end]] >= 0:
count -= 1
end += 1
# valid in while
while count == 0:
# update minimum here, inner while loop
if end - start < res[0]:
res = (end - start, start)
counter[s[start]] += 1
if counter[s[start]] > 0:
count += 1
start += 1
return s[res[1]:res[0] + res[1]] if res[0] != float('inf') else ''
# [904] https://leetcode.com/problems/fruit-into-baskets/
# You have two baskets, and each basket can carry any quantity of fruit, but you want each basket to only carry one type of fruit each.
# What is the total amount of fruit you can collect with this procedure?
#
# variation with list
def totalFruit(tree: 'List[int]') -> int:
counter = defaultdict(int)
count, start, end, res = 0, 0, 0, 0
while end < len(tree):
counter[tree[end]] += 1
if counter[tree[end]] == 1:
count += 1
end += 1
while count > 2:
counter[tree[start]] -= 1
if counter[tree[start]] == 0:
count -= 1
start += 1
res = max(res, end - start)
return res
# [438] https://leetcode.com/problems/find-all-anagrams-in-a-string/
# Given a string s and a non-empty string p, find all the start indices of p's anagrams in s.
#
# variation with restrict between start and end
def findAnagrams(s: str, p: str) -> 'List[int]':
len_p, len_s = len(p), len(s)
if len_p > len_s:
return []
counter = Counter(p)
count, start, end, res = len_p, 0, 0, []
while end < len_s:
# only update counter when match char in p
counter[s[end]] -= 1
if counter[s[end]] >= 0:
count -= 1
end += 1
if count == 0:
res.append(start)
# not use a while, because restrict the length
if end - start == len_p:
counter[s[start]] += 1
# exclude char not in p, because always negative
if counter[s[start]] > 0:
count += 1
start += 1
return res
# [30] https://leetcode.com/problems/substring-with-concatenation-of-all-words/
# Find all starting indices of substring(s) in s that is a concatenation of each word in words exactly once and without any intervening characters.
#
# variation with complex match policy
def findSubstring(s: str, words: 'List[str]') -> 'List[int]':
if not words:
return []
word_len, res = len(words[0]), []
# start offset from 0 to word_len, and step is word_len
for i in range(word_len):
# reset state every epoch
counter = Counter(words)
start, end, count = i, i, len(words)
while end < len(s):
cur_word = s[end:end + word_len]
# check is not necessary here, just for performance
if cur_word in counter:
counter[cur_word] -= 1
if counter[cur_word] >= 0:
count -= 1
end += word_len
if count == 0:
res.append(start)
# ensure consecutive words
if end - start == word_len * len(words):
cur_word = s[start:start + word_len]
if cur_word in counter:
counter[cur_word] += 1
if counter[cur_word] > 0:
count += 1
start += word_len
# the order is not necessary here
return res
|
92018
|
from mltoolkit.mldp.pipeline import Pipeline
from mltoolkit.mldp.steps.readers import CsvReader
from mltoolkit.mldp.steps.transformers.nlp import TokenProcessor,\
VocabMapper, Padder
from mltoolkit.mldp.steps.transformers.field import FieldSelector
from mltoolkit.mldp.utils.helpers.nlp.token_cleaning import twitter_text_cleaner
from mltoolkit.mldp.utils.tools import Vocabulary
from mltoolkit.mldp.utils.tools.vocabulary import PAD
from mltoolkit.mldp.tutorials.steps import TwitterFilesPreprocessor,\
FeaturesLabelsFormatter
from mltoolkit.mldp.tutorials.model import ISentiLSTM
import unittest
from nltk.tokenize import TweetTokenizer
import os
class TestTutorials(unittest.TestCase):
def setUp(self):
self.tutorials_path = "mltoolkit.mldp/tutorials/"
def test_how_to_apply_run(self):
data_path = os.path.join(self.tutorials_path,
"data/tweets.csv")
# paths where vocabs will be saved and later loaded from
words_vocab_file_path = os.path.join(self.tutorials_path,
"data/vocabs/words.txt")
labels_vocab_file_path = os.path.join(self.tutorials_path,
'data/vocabs/labels.txt')
# creating step objects
twitter_tokenizer = TweetTokenizer()
preprocessor = TwitterFilesPreprocessor(input_cols_number=3,
tweets_indx=2,
add_header=['ids', 'labels',
'tweets'])
csv_reader = CsvReader(sep='\t', chunk_size=30)
fields_selector = FieldSelector(fnames=["tweets", "labels"])
token_processor = TokenProcessor(fnames="tweets",
tok_func=twitter_tokenizer.tokenize,
tok_cleaning_func=twitter_text_cleaner,
lowercase=True)
# data pipeline for vocabularies creation
vocab_data_pipeline = Pipeline(reader=csv_reader,
preprocessor=preprocessor,
worker_processes_num=0,
name_prefix="vocabs")
vocab_data_pipeline.add_step(fields_selector)
vocab_data_pipeline.add_step(token_processor)
# creating or loading vocabs
words_vocab = Vocabulary(vocab_data_pipeline, name_prefix="words")
words_vocab.load_or_create(words_vocab_file_path,
data_source={"data_path": data_path},
data_fnames="tweets")
labels_vocab = Vocabulary(vocab_data_pipeline,
name_prefix="labels")
labels_vocab.load_or_create(labels_vocab_file_path,
data_source={"data_path": data_path},
data_fnames="labels")
print(words_vocab)
print(labels_vocab)
print(vocab_data_pipeline)
# extra steps for training and evaluation
mapper = VocabMapper(field_names_to_vocabs={"tweets": words_vocab,
"labels": labels_vocab})
padder = Padder(fname="tweets", new_mask_fname="tweets_mask",
pad_symbol=words_vocab[PAD].id)
formatter = FeaturesLabelsFormatter(features_field_name="tweets",
labels_field_name="labels",
classes_number=len(labels_vocab))
# building the actual pipeline
dev_data_pipeline = Pipeline(reader=csv_reader, preprocessor=preprocessor,
worker_processes_num=1, name_prefix="dev")
dev_data_pipeline.add_step(fields_selector)
dev_data_pipeline.add_step(token_processor)
dev_data_pipeline.add_step(mapper)
dev_data_pipeline.add_step(padder)
dev_data_pipeline.add_step(formatter)
print(dev_data_pipeline)
epochs = 2
i_model = ISentiLSTM(dev_data_pipeline)
i_model.init_model(words_vocab_size=len(words_vocab), input_dim=50,
lstm_hidden_dim=120,
number_of_classes=len(labels_vocab),
mask_symbol=words_vocab[PAD].id)
# print("testing before training")
# i_model.test(data_path=data_path)
# print("training the model")
# for epoch in range(1, epochs + 1):
# print "epoch %d" % epoch
# i_model.train(data_path=data_path)
# i_model.test(data_path=data_path)
if __name__ == '__main__':
unittest.main()
|
92020
|
import unittest
from tginviter import generate_invite_link, get_random_token, \
generate_joinchat_link
class TestLinksGeneration(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.bot_name = "test_bot"
cls.token = get_random_token()
def test_keywords_param_only(self):
with self.assertRaises(TypeError):
generate_invite_link(self.bot_name, self.token, 2)
generate_invite_link(self.bot_name, max_uses=2)
def test_proto_exist(self):
with self.assertRaises(ValueError):
generate_invite_link(self.bot_name, proto="qwe")
generate_invite_link(self.bot_name, token=self.token, proto="tg")
generate_invite_link(self.bot_name, token=self.token, proto="http")
generate_invite_link(self.bot_name, token=self.token, proto="https")
def test_return_token(self):
link, token = generate_invite_link(self.bot_name)
self.assertTrue(link.endswith(token))
def test_generate_full_deeplink(self):
http = f"http://telegram.me/{self.bot_name}?start={self.token}"
https = f"https://telegram.me/{self.bot_name}?start={self.token}"
link, _ = generate_invite_link(self.bot_name, token=self.token, short=False)
self.assertEqual(link, https)
link, _ = generate_invite_link(
self.bot_name, token=self.token, short=False, proto="https"
)
self.assertEqual(link, https)
link, _ = generate_invite_link(
self.bot_name, token=self.token, short=False, proto="http"
)
self.assertEqual(link, http)
def test_generate_short_deeplink(self):
http = f"http://t.me/{self.bot_name}?start={self.token}"
https = f"https://t.me/{self.bot_name}?start={self.token}"
link, _ = generate_invite_link(self.bot_name, token=self.token)
self.assertEqual(link, https)
link, _ = generate_invite_link(self.bot_name, token=self.token, short=True)
self.assertEqual(link, https)
link, _ = generate_invite_link(self.bot_name, token=self.token, proto="http")
self.assertEqual(link, http)
link, _ = generate_invite_link(
self.bot_name, token=self.token, proto="https"
)
self.assertEqual(link, https)
def test_random_tokens(self):
token1 = get_random_token()
token2 = get_random_token()
self.assertNotEqual(token1, token2)
def test_random_deeplinks(self):
https = f"https://t.me/{self.bot_name}?start="
link1, token1 = generate_invite_link(self.bot_name)
self.assertTrue(link1.startswith(https))
link2, token2 = generate_invite_link(self.bot_name)
self.assertTrue(link1.startswith(https))
self.assertNotEqual(token1, token2)
self.assertNotEqual(link1, link2)
def test_generate_tg_proto_deeplink(self):
tg = f"tg://resolve?domain={self.bot_name}&start={self.token}"
link, _ = generate_invite_link(self.bot_name, token=self.token, proto="tg")
self.assertEqual(link, tg)
link, _ = generate_invite_link(
self.bot_name, token=self.token, proto="tg", short=True
)
self.assertEqual(link, tg)
link, _ = generate_invite_link(
self.bot_name, token=self.token, proto="tg", short=False
)
self.assertEqual(link, tg)
def test_joinchat_short_link(self):
https = f"https://t.me/joinchat/{self.token}"
http = f"http://t.me/joinchat/{self.token}"
link = generate_joinchat_link(self.token)
self.assertEqual(link, https)
link = generate_joinchat_link(self.token, short=True)
self.assertEqual(link, https)
def test_joinchat_full_link(self):
https = f"https://telegram.me/joinchat/{self.token}"
http = f"http://telegram.me/joinchat/{self.token}"
link = generate_joinchat_link(self.token, short=False)
self.assertEqual(link, https)
|
92026
|
import json
import os
import re
import subprocess
from functools import cached_property
import requests
import yaml
# Changelog types
PULL_REQUEST = 'pull_request'
COMMIT = 'commit_message'
class ChangelogCIBase:
"""Base Class for Changelog CI"""
github_api_url = 'https://api.github.com'
def __init__(
self,
repository,
event_path,
config,
pull_request_branch,
filename='CHANGELOG.md',
token=None
):
self.repository = repository
self.filename = filename
self.config = config
self.pull_request_branch = pull_request_branch
self.token = token
title, number = self._get_pull_request_title_and_number(event_path)
self.pull_request_title = title
self.pull_request_number = number
@staticmethod
def _get_pull_request_title_and_number(event_path):
"""Gets pull request title from `GITHUB_EVENT_PATH`"""
with open(event_path, 'r') as json_file:
# This is just a webhook payload available to the Action
data = json.load(json_file)
title = data["pull_request"]['title']
number = data['number']
return title, number
@cached_property
def _get_request_headers(self):
"""Get headers for GitHub API request"""
headers = {
'Accept': 'application/vnd.github.v3+json'
}
# if the user adds `GITHUB_TOKEN` add it to API Request
# required for `private` repositories
if self.token:
headers.update({
'authorization': 'Bearer {token}'.format(token=self.token)
})
return headers
def get_changes_after_last_release(self):
return NotImplemented
def parse_changelog(self, version, changes):
return NotImplemented
def _validate_pull_request(self):
"""Check if changelog should be generated for this pull request"""
pattern = re.compile(self.config.pull_request_title_regex)
match = pattern.search(self.pull_request_title)
if match:
return True
return
def _get_version_number(self):
"""Get version number from the pull request title"""
pattern = re.compile(self.config.version_regex)
match = pattern.search(self.pull_request_title)
if match:
return match.group()
return
def _get_file_mode(self):
"""Gets the mode that the changelog file should be opened in"""
if os.path.exists(self.filename):
# if the changelog file exists
# opens it in read-write mode
file_mode = 'r+'
else:
# if the changelog file does not exists
# opens it in read-write mode
# but creates the file first also
file_mode = 'w+'
return file_mode
def _get_latest_release_date(self):
"""Using GitHub API gets latest release date"""
url = (
'{base_url}/repos/{repo_name}/releases/latest'
).format(
base_url=self.github_api_url,
repo_name=self.repository
)
response = requests.get(url, headers=self._get_request_headers)
published_date = ''
if response.status_code == 200:
response_data = response.json()
# get the published date of the latest release
published_date = response_data['published_at']
else:
# if there is no previous release API will return 404 Not Found
msg = (
f'Could not find any previous release for '
f'{self.repository}, status code: {response.status_code}'
)
print_message(msg, message_type='warning')
return published_date
def _commit_changelog(self, string_data):
"""Write changelog to the changelog file"""
file_mode = self._get_file_mode()
with open(self.filename, file_mode) as f:
# read the existing data and store it in a variable
body = f.read()
# write at the top of the file
f.seek(0, 0)
f.write(string_data)
if body:
# re-write the existing data
f.write('\n\n')
f.write(body)
subprocess.run(['git', 'add', self.filename])
subprocess.run(
['git', 'commit', '-m', '(Changelog CI) Added Changelog']
)
subprocess.run(
['git', 'push', '-u', 'origin', self.pull_request_branch]
)
def _comment_changelog(self, string_data):
"""Comments Changelog to the pull request"""
if not self.token:
# Token is required by the GitHub API to create a Comment
# if not provided exit with error message
msg = (
"Could not add a comment. "
"`GITHUB_TOKEN` is required for this operation. "
"If you want to enable Changelog comment, please add "
"`GITHUB_TOKEN` to your workflow yaml file. "
"Look at Changelog CI's documentation for more information."
)
print_message(msg, message_type='error')
return
owner, repo = self.repository.split('/')
payload = {
'owner': owner,
'repo': repo,
'issue_number': self.pull_request_number,
'body': string_data
}
url = (
'{base_url}/repos/{repo}/issues/{number}/comments'
).format(
base_url=self.github_api_url,
repo=self.repository,
number=self.pull_request_number
)
response = requests.post(
url, headers=self._get_request_headers, json=payload
)
if response.status_code != 201:
# API should return 201, otherwise show error message
msg = (
f'Error while trying to create a comment. '
f'GitHub API returned error response for '
f'{self.repository}, status code: {response.status_code}'
)
print_message(msg, message_type='error')
def run(self):
"""Entrypoint to the Changelog CI"""
if (
not self.config.commit_changelog and
not self.config.comment_changelog
):
# if both commit_changelog and comment_changelog is set to false
# then exit with warning and don't generate Changelog
msg = (
'Skipping Changelog generation as both `commit_changelog` '
'and `comment_changelog` is set to False. '
'If you did not intend to do this please set '
'one or both of them to True.'
)
print_message(msg, message_type='error')
return
is_valid_pull_request = self._validate_pull_request()
if not is_valid_pull_request:
# if pull request regex doesn't match then exit
# and don't generate changelog
msg = (
f'The title of the pull request did not match. '
f'Regex tried: "{self.config.pull_request_title_regex}", '
f'Aborting Changelog Generation.'
)
print_message(msg, message_type='error')
return
version = self._get_version_number()
if not version:
# if the pull request title is not valid, exit the method
# It might happen if the pull request is not meant to be release
# or the title was not accurate.
msg = (
f'Could not find matching version number. '
f'Regex tried: {self.config.version_regex} '
f'Aborting Changelog Generation'
)
print_message(msg, message_type='error')
return
changes = self.get_changes_after_last_release()
# exit the method if there is no changes found
if not changes:
return
string_data = self.parse_changelog(version, changes)
if self.config.commit_changelog:
print_message('Commit Changelog', message_type='group')
self._commit_changelog(string_data)
print_message('', message_type='endgroup')
# Not needed in our Case
#if self.config.comment_changelog:
#print_message('Comment Changelog', message_type='group')
#self._comment_changelog(string_data)
#print_message('', message_type='endgroup')
class ChangelogCIPullRequest(ChangelogCIBase):
"""The class that generates, commits and/or comments changelog using pull requests"""
github_api_url = 'https://api.github.com'
@staticmethod
def _get_changelog_line(item):
"""Generate each line of changelog"""
return "* [#{number}]({url}): {title}\n".format(
number=item['number'],
url=item['url'],
title=item['title']
)
def get_changes_after_last_release(self):
"""Get all the merged pull request after latest release"""
previous_release_date = self._get_latest_release_date()
if previous_release_date:
merged_date_filter = 'merged:>=' + previous_release_date
else:
# if there is no release for the repo then
# do not filter by merged date
merged_date_filter = ''
url = (
'{base_url}/search/issues'
'?q=repo:{repo_name}+'
'is:pr+'
'is:merged+'
'sort:author-date-asc+'
'{merged_date_filter}'
'&sort=merged'
).format(
base_url=self.github_api_url,
repo_name=self.repository,
merged_date_filter=merged_date_filter
)
items = []
response = requests.get(url, headers=self._get_request_headers)
if response.status_code == 200:
response_data = response.json()
# `total_count` represents the number of
# pull requests returned by the API call
if response_data['total_count'] > 0:
for item in response_data['items']:
data = {
'title': item['title'],
'number': item['number'],
'url': item['html_url'],
'labels': [label['name'] for label in item['labels']]
}
items.append(data)
else:
msg = (
f'There was no pull request '
f'made on {self.repository} after last release.'
)
print_message(msg, message_type='error')
else:
msg = (
f'Could not get pull requests for '
f'{self.repository} from GitHub API. '
f'response status code: {response.status_code}'
)
print_message(msg, message_type='error')
return items
def parse_changelog(self, version, changes):
"""Parse the pull requests data and return a string"""
string_data = (
'# ' + self.config.header_prefix + ' ' + version + '\n\n'
)
group_config = self.config.group_config
if group_config:
for config in group_config:
if len(changes) == 0:
break
items_string = ''
for pull_request in changes:
# check if the pull request label matches with
# any label of the config
if (
any(
label in pull_request['labels']
for label in config['labels']
)
):
items_string += self._get_changelog_line(pull_request)
# remove the item so that one item
# does not match multiple groups
changes.remove(pull_request)
if items_string:
string_data += '\n#### ' + config['title'] + '\n\n'
string_data += '\n' + items_string
else:
# If group config does not exist then append it without and groups
string_data += ''.join(
map(self._get_changelog_line, changes)
)
return string_data
class ChangelogCICommitMessage(ChangelogCIBase):
"""The class that generates, commits and/or comments changelog using commit messages"""
@staticmethod
def _get_changelog_line(item):
"""Generate each line of changelog"""
return "* [{sha}]({url}): {message}\n".format(
sha=item['sha'][:6],
url=item['url'],
message=item['message']
)
def get_changes_after_last_release(self):
"""Get all the merged pull request after latest release"""
previous_release_date = self._get_latest_release_date()
url = '{base_url}/repos/{repo_name}/commits?since={date}'.format(
base_url=self.github_api_url,
repo_name=self.repository,
date=previous_release_date or ''
)
items = []
response = requests.get(url, headers=self._get_request_headers)
if response.status_code == 200:
response_data = response.json()
if len(response_data) > 0:
for item in response_data:
message = item['commit']['message']
# Exclude merge commit
if not (
message.startswith('Merge pull request #') or
message.startswith('Merge branch')
):
data = {
'sha': item['sha'],
'message': message,
'url': item['html_url']
}
items.append(data)
else:
print_message(f'Skipping Merge Commit "{message}"')
else:
msg = (
f'There was no commit '
f'made on {self.repository} after last release.'
)
print_message(msg, message_type='error')
else:
msg = (
f'Could not get commits for '
f'{self.repository} from GitHub API. '
f'response status code: {response.status_code}'
)
print_message(msg, message_type='error')
return items
def parse_changelog(self, version, changes):
"""Parse the commit data and return a string"""
string_data = (
'# ' + self.config.header_prefix + ' ' + version + '\n\n'
)
string_data += ''.join(map(self._get_changelog_line, changes))
return string_data
class ChangelogCIConfiguration:
"""Configuration class for Changelog CI"""
# The regular expression used to extract semantic versioning is a
# slightly less restrictive modification of the following regular expression
# https://semver.org/#is-there-a-suggested-regular-expression-regex-to-check-a-semver-string
DEFAULT_SEMVER_REGEX = (
r"v?(0|[1-9]\d*)\.(0|[1-9]\d*)\.?(0|[1-9]\d*)?(?:-(("
r"?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|["
r"1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+(["
r"0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?"
)
DEFAULT_PULL_REQUEST_TITLE_REGEX = r"^(?i:release)"
DEFAULT_VERSION_PREFIX = "Version:"
DEFAULT_GROUP_CONFIG = []
COMMIT_CHANGELOG = True
COMMENT_CHANGELOG = False
def __init__(self, config_file):
# Initialize with default configuration
self.header_prefix = self.DEFAULT_VERSION_PREFIX
self.commit_changelog = self.COMMIT_CHANGELOG
self.comment_changelog = self.COMMENT_CHANGELOG
self.pull_request_title_regex = self.DEFAULT_PULL_REQUEST_TITLE_REGEX
self.version_regex = self.DEFAULT_SEMVER_REGEX
self.changelog_type = PULL_REQUEST
self.group_config = self.DEFAULT_GROUP_CONFIG
self.user_raw_config = self.get_user_config(config_file)
self.validate_configuration()
@staticmethod
def get_user_config(config_file):
"""Read user provided configuration file and return user configuration"""
if not config_file:
print_message(
'No Configuration file found, '
'falling back to default configuration to parse changelog',
message_type='warning'
)
return
try:
# parse config files with the extension .yml and .yaml
# using YAML syntax
if config_file.endswith('yml') or config_file.endswith('yaml'):
loader = yaml.safe_load
# parse config files with the extension .json
# using JSON syntax
elif config_file.endswith('json'):
loader = json.load
else:
print_message(
'We only support `JSON` or `YAML` file for configuration '
'falling back to default configuration to parse changelog',
message_type='error'
)
return
with open(config_file, 'r') as file:
config = loader(file)
return config
except Exception as e:
msg = (
f'Invalid Configuration file, error: {e}, '
'falling back to default configuration to parse changelog'
)
print_message(msg, message_type='error')
return
def validate_configuration(self):
"""Validate all the configuration options and update configuration attributes"""
if not self.user_raw_config:
return
if not isinstance(self.user_raw_config, dict):
print_message(
'Configuration does not contain required mapping '
'falling back to default configuration to parse changelog',
message_type='error'
)
return
self.validate_header_prefix()
self.validate_commit_changelog()
self.validate_comment_changelog()
self.validate_pull_request_title_regex()
self.validate_version_regex()
self.validate_changelog_type()
self.validate_group_config()
def validate_header_prefix(self):
"""Validate and set header_prefix configuration option"""
header_prefix = self.user_raw_config.get('header_prefix')
if not header_prefix or not isinstance(header_prefix, str):
msg = (
'`header_prefix` was not provided or not valid, '
f'falling back to `{self.header_prefix}`.'
)
print_message(msg, message_type='warning')
else:
self.header_prefix = header_prefix
def validate_commit_changelog(self):
"""Validate and set commit_changelog configuration option"""
commit_changelog = self.user_raw_config.get('commit_changelog')
if commit_changelog not in [0, 1, False, True]:
msg = (
'`commit_changelog` was not provided or not valid, '
f'falling back to `{self.commit_changelog}`.'
)
print_message(msg, message_type='warning')
else:
self.commit_changelog = bool(commit_changelog)
def validate_comment_changelog(self):
"""Validate and set comment_changelog configuration option"""
comment_changelog = self.user_raw_config.get('comment_changelog')
if comment_changelog not in [0, 1, False, True]:
msg = (
'`comment_changelog` was not provided or not valid, '
f'falling back to `{self.comment_changelog}`.'
)
print_message(msg, message_type='warning')
else:
self.comment_changelog = bool(comment_changelog)
def validate_pull_request_title_regex(self):
"""Validate and set pull_request_title_regex configuration option"""
pull_request_title_regex = self.user_raw_config.get('pull_request_title_regex')
if not pull_request_title_regex:
msg = (
'`pull_request_title_regex` is not provided, '
f'Falling back to {self.pull_request_title_regex}.'
)
print_message(msg, message_type='warning')
return
try:
# This will raise an error if the provided regex is not valid
re.compile(pull_request_title_regex)
self.pull_request_title_regex = pull_request_title_regex
except Exception:
msg = (
'`pull_request_title_regex` is not valid, '
f'Falling back to {self.pull_request_title_regex}.'
)
print_message(msg, message_type='error')
def validate_version_regex(self):
"""Validate and set validate_version_regex configuration option"""
version_regex = self.user_raw_config.get('version_regex')
if not version_regex:
msg = (
'`version_regex` is not provided, '
f'Falling back to {self.version_regex}.'
)
print_message(msg, message_type='warning')
return
try:
# This will raise an error if the provided regex is not valid
re.compile(version_regex)
self.version_regex = version_regex
except Exception:
msg = (
'`version_regex` is not valid, '
f'Falling back to {self.version_regex}.'
)
print_message(msg, message_type='warning')
def validate_changelog_type(self):
"""Validate and set changelog_type configuration option"""
changelog_type = self.user_raw_config.get('changelog_type')
if not (
changelog_type and
isinstance(changelog_type, str) and
changelog_type in [PULL_REQUEST, COMMIT]
):
msg = (
'`changelog_type` was not provided or not valid, '
f'the options are "{PULL_REQUEST}" or "{COMMIT}", '
f'falling back to default value of "{self.changelog_type}".'
)
print_message(msg, message_type='warning')
else:
self.changelog_type = changelog_type
def validate_group_config(self):
"""Validate and set group_config configuration option"""
group_config = self.user_raw_config.get('group_config')
if not group_config:
msg = '`group_config` was not provided'
print_message(msg, message_type='warning')
return
if not isinstance(group_config, list):
msg = '`group_config` is not valid, It must be an Array/List.'
print_message(msg, message_type='error')
return
for item in group_config:
self.validate_group_config_item(item)
def validate_group_config_item(self, item):
"""Validate and set group_config item configuration option"""
if not isinstance(item, dict):
msg = (
'`group_config` items must have key, '
'value pairs of `title` and `labels`'
)
print_message(msg, message_type='error')
return
title = item.get('title')
labels = item.get('labels')
if not title or not isinstance(title, str):
msg = (
'`group_config` item must contain string title, '
f'but got `{title}`'
)
print_message(msg, message_type='error')
return
if not labels or not isinstance(labels, list):
msg = (
'`group_config` item must contain array of labels, '
f'but got `{labels}`'
)
print_message(msg, message_type='error')
return
if not all(isinstance(label, str) for label in labels):
msg = (
'`group_config` labels array must be string type, '
f'but got `{labels}`'
)
print_message(msg, message_type='error')
return
self.group_config.append(item)
def print_message(message, message_type=None):
"""Helper function to print colorful outputs in GitHub Actions shell"""
# docs: https://docs.github.com/en/actions/reference/workflow-commands-for-github-actions
if not message_type:
return subprocess.run(['echo', f'{message}'])
if message_type == 'endgroup':
return subprocess.run(['echo', '::endgroup::'])
return subprocess.run(['echo', f'::{message_type}::{message}'])
CI_CLASSES = {
PULL_REQUEST: ChangelogCIPullRequest,
COMMIT: ChangelogCICommitMessage
}
if __name__ == '__main__':
# Default environment variable from GitHub
# https://docs.github.com/en/actions/configuring-and-managing-workflows/using-environment-variables
event_path = os.environ['GITHUB_EVENT_PATH']
repository = os.environ['GITHUB_REPOSITORY']
pull_request_branch = os.environ['GITHUB_HEAD_REF']
# User inputs from workflow
filename = os.environ['INPUT_CHANGELOG_FILENAME']
config_file = os.environ['INPUT_CONFIG_FILE']
# Token provided from the workflow
token = os.environ.get('GITHUB_TOKEN')
# Committer username and email address
username = os.environ['INPUT_COMMITTER_USERNAME']
email = os.environ['INPUT_COMMITTER_EMAIL']
# Group: Checkout git repository
print_message('Checkout git repository', message_type='group')
subprocess.run(['git', 'fetch', '--prune', '--unshallow', 'origin', pull_request_branch])
subprocess.run(['git', 'checkout', pull_request_branch])
print_message('', message_type='endgroup')
# Group: Configure Git
print_message('Configure Git', message_type='group')
subprocess.run(['git', 'config', 'user.name', username])
subprocess.run(['git', 'config', 'user.email', email])
print_message('', message_type='endgroup')
print_message('Parse Configuration', message_type='group')
config = ChangelogCIConfiguration(config_file)
print_message('', message_type='endgroup')
# Group: Generate Changelog
print_message('Generate Changelog', message_type='group')
# Get CI class using configuration
changelog_ci_class = CI_CLASSES.get(
config.changelog_type
)
# Initialize the Changelog CI
ci = changelog_ci_class(
repository,
event_path,
config,
pull_request_branch,
filename=filename,
token=token
)
# Run Changelog CI
ci.run()
print_message('', message_type='endgroup')
|
92047
|
from unittest import TestCase
from django_jsonform.utils import normalize_schema
class TestNormalizeSchemaFunction(TestCase):
"""Tests for utils.normalize_schema function"""
def test_normalized_schema_is_same(self):
"""Normalized schema must be the same as input schema
if there are no python objects in the schema.
"""
schema = {
'type': 'dict',
'keys': {
'name': {
'type': 'string',
},
'wishlist': {
'type': 'array',
'items': {
'type': 'string',
}
}
}
}
self.assertEqual(schema, normalize_schema(schema))
|
92095
|
import sys
import logging
def get_logger(name: str):
logger = logging.getLogger(name)
logformat = "[%(asctime)s] %(levelname)s:%(name)s: %(message)s"
logging.basicConfig(level=logging.INFO, stream=sys.stdout,
format=logformat, datefmt="%Y-%m-%d %H:%M:%S")
return logger
|
92135
|
from __future__ import absolute_import, division, print_function
from iotbx.kriber import strudat
from cctbx import geometry_restraints
from cctbx import crystal
from cctbx.array_family import flex
import scitbx.math
from scitbx import matrix
from libtbx.test_utils import approx_equal, show_diff
from libtbx.utils import format_cpu_times
import libtbx.load_env
from libtbx import dict_with_default_0
from six.moves import cStringIO as StringIO
import math
import sys, os
from six.moves import range
from six.moves import zip
def exercise_icosahedron(max_level=2, verbose=0):
for level in range(0,max_level+1):
if (0 or verbose):
print("level:", level)
icosahedron = scitbx.math.icosahedron(level=level)
try:
distance_cutoff = icosahedron.next_neighbors_distance()*(1+1.e-3)
estimated_distance_cutoff = False
except RuntimeError as e:
assert str(e) == "next_neighbors_distance not known."
distance_cutoff = 0.4/(2**(level-1))
estimated_distance_cutoff = True
asu_mappings = crystal.direct_space_asu.non_crystallographic_asu_mappings(
sites_cart=icosahedron.sites)
pair_asu_table = crystal.pair_asu_table(asu_mappings=asu_mappings)
pair_asu_table.add_all_pairs(distance_cutoff=distance_cutoff)
if (0 or verbose):
ps = pair_asu_table.show_distances(sites_cart=icosahedron.sites) \
.distances_info
print("level", level, "min", flex.min(ps.distances))
print(" ", " ", "max", flex.max(ps.distances))
assert ps.pair_counts.all_eq(pair_asu_table.pair_counts())
if (level == 0):
for d in ps.distances:
assert approx_equal(d, 1.0514622242382672)
elif (level < 2):
s = StringIO()
ps = pair_asu_table.show_distances(sites_cart=icosahedron.sites, out=s) \
.distances_info
assert ps.pair_counts.all_eq(pair_asu_table.pair_counts())
assert len(s.getvalue().splitlines()) == [72,320][level]
del s
if (level == 0):
assert pair_asu_table.pair_counts().all_eq(5)
else:
assert pair_asu_table.pair_counts().all_eq(3)
del pair_asu_table
max_distance = crystal.neighbors_fast_pair_generator(
asu_mappings=asu_mappings,
distance_cutoff=distance_cutoff).max_distance_sq()**.5
if (0 or verbose):
print("max_distance:", max_distance)
if (not estimated_distance_cutoff):
assert approx_equal(max_distance, icosahedron.next_neighbors_distance())
assert approx_equal(max_distance/icosahedron.next_neighbors_distance(),1)
def is_sym_equiv_interaction_simple(unit_cell,
i_seq,
site_frac_i,
j_seq,
site_frac_j,
special_op_j,
rt_mx_ji_1,
rt_mx_ji_2):
f = unit_cell.shortest_vector_sq()**.5*.1
trial_shifts = [f*x for x in [math.sqrt(2),math.sqrt(3),math.sqrt(5)]]
frac = unit_cell.fractionalize
orth = unit_cell.orthogonalize
dist = unit_cell.distance
for shifts in [[0,0,0], trial_shifts]:
site_j_mod = special_op_j * frac([x+s
for x,s in zip(orth(site_frac_j),shifts)])
if (shifts == [0,0,0] or j_seq != i_seq):
site_i_mod = site_frac_i
else:
site_i_mod = site_j_mod
d1 = dist(rt_mx_ji_1 * site_j_mod, site_i_mod)
d2 = dist(rt_mx_ji_2 * site_j_mod, site_i_mod)
if (shifts == [0,0,0]):
if (abs(d1-d2) >= 1.e-3):
return False
return abs(d1-d2) < 1.e-3
def check_sym_equiv(structure, bond_asu_table, weak=False):
unit_cell = structure.unit_cell()
asu_mappings = bond_asu_table.asu_mappings()
sites_frac = structure.scatterers().extract_sites()
for i_seq,records in enumerate(bond_asu_table.table()):
rt_mx_i_inv = asu_mappings.get_rt_mx(i_seq, 0).inverse()
for j_seq,j_sym_groups in records.items():
i_group_rt_mx_jis = []
for i_group,j_sym_group in enumerate(j_sym_groups):
for j_sym in j_sym_group:
rt_mx_ji = rt_mx_i_inv.multiply(asu_mappings.get_rt_mx(j_seq, j_sym))
i_group_rt_mx_jis.append((i_group,rt_mx_ji))
for gi,ri in i_group_rt_mx_jis:
for gj,rj in i_group_rt_mx_jis:
is_sym_equiv = is_sym_equiv_interaction_simple(
unit_cell=unit_cell,
i_seq=i_seq,
site_frac_i=sites_frac[i_seq],
j_seq=j_seq,
site_frac_j=sites_frac[j_seq],
special_op_j=asu_mappings.special_op(j_seq),
rt_mx_ji_1=ri,
rt_mx_ji_2=rj)
if (is_sym_equiv):
if (not weak): assert gi == gj
else:
assert gi != gj
def check_connectivities(bond_asu_table, connectivities, verbose=0):
n_mismatches = 0
for records,connectivity in zip(bond_asu_table.table(), connectivities):
n = 0
for j_seq,j_sym_groups in records.items():
for j_sym_group in j_sym_groups:
n += len(j_sym_group)
if (0 or verbose):
print("n, connectivity:", n, connectivity)
assert n == connectivity
def exercise_incremental_pairs(
structure,
distance_cutoff,
reference_pair_asu_table):
ip = structure.incremental_pairs(distance_cutoff=distance_cutoff)
for site_frac in structure.sites_frac():
ip.process_site_frac(original_site=site_frac)
assert ip.pair_asu_table().pair_counts().all_eq(
reference_pair_asu_table.pair_counts())
assert ip.pair_asu_table() == reference_pair_asu_table
def exercise_site_cluster_analysis(
structure,
distance_cutoff,
reference_pair_asu_table):
pat_selection = flex.size_t()
pat_keep = []
for i_seq,pair_asu_dict in enumerate(reference_pair_asu_table.table()):
for j_seq,pair_asu_j_sym_groups in pair_asu_dict.items():
if (j_seq == i_seq):
for j_sym_group in pair_asu_j_sym_groups:
assert 0 not in j_sym_group
pat_keep.append(False)
break
if (j_seq < i_seq and pat_keep[j_seq]):
pat_keep.append(False)
break
else:
pat_keep.append(True)
pat_selection.append(i_seq)
assert reference_pair_asu_table.cluster_pivot_selection().all_eq(
pat_selection)
assert reference_pair_asu_table.cluster_pivot_selection(
max_clusters=3).all_eq(pat_selection[:3])
#
sca = structure.site_cluster_analysis(min_distance=distance_cutoff)
sca_selection = flex.size_t()
for i_seq,site_frac in enumerate(structure.sites_frac()):
if (sca.process_site_frac(original_site=site_frac)):
sca_selection.append(i_seq)
assert sca_selection.all_eq(pat_selection)
#
sca = structure.site_cluster_analysis(min_distance=distance_cutoff)
sca_selection = sca.process_sites_frac(
original_sites=structure.sites_frac(),
site_symmetry_table=structure.site_symmetry_table())
assert sca_selection.all_eq(pat_selection)
#
sca = structure.site_cluster_analysis(min_distance=distance_cutoff)
sca_selection = sca.process_sites_frac(
original_sites=structure.sites_frac(),
site_symmetry_table=structure.site_symmetry_table(),
max_clusters=3)
assert sca_selection.all_eq(pat_selection[:3])
#
sca = structure.site_cluster_analysis(min_distance=distance_cutoff)
sca_selection = sca.process_sites_frac(
original_sites=structure.sites_frac())
assert sca_selection.all_eq(pat_selection)
#
sca = structure.site_cluster_analysis(min_distance=distance_cutoff)
sca_selection = sca.process_sites_frac(
original_sites=structure.sites_frac(),
max_clusters=3)
assert sca_selection.all_eq(pat_selection[:3])
#
sca = structure.site_cluster_analysis(min_distance=distance_cutoff)
sca_selection = sca.process_sites_cart(
original_sites=structure.sites_cart(),
site_symmetry_table=structure.site_symmetry_table())
assert sca_selection.all_eq(pat_selection)
#
sca = structure.site_cluster_analysis(min_distance=distance_cutoff)
sca_selection = sca.process_sites_cart(
original_sites=structure.sites_cart(),
site_symmetry_table=structure.site_symmetry_table(),
max_clusters=3)
assert sca_selection.all_eq(pat_selection[:3])
#
sca = structure.site_cluster_analysis(min_distance=distance_cutoff)
sca_selection = sca.process_sites_cart(
original_sites=structure.sites_cart())
assert sca_selection.all_eq(pat_selection)
#
sca = structure.site_cluster_analysis(min_distance=distance_cutoff)
sca_selection = sca.process_sites_cart(
original_sites=structure.sites_cart(),
max_clusters=3)
assert sca_selection.all_eq(pat_selection[:3])
#
sca = structure.site_cluster_analysis(
min_distance=distance_cutoff,
general_positions_only=True)
sca_selection = sca.process_sites_frac(
original_sites=structure.sites_frac(),
site_symmetry_table=structure.site_symmetry_table())
pat_selection = reference_pair_asu_table.cluster_pivot_selection(
general_positions_only=True)
assert sca_selection.all_eq(pat_selection)
def exercise(
structure,
distance_cutoff,
connectivities=None,
weak_check_sym_equiv=False,
verbose=0):
if (0 or verbose):
print("distance_cutoff:", distance_cutoff)
asu_mappings = structure.asu_mappings(buffer_thickness=distance_cutoff)
for i_pass in range(2):
if (i_pass == 0):
bond_asu_table = crystal.pair_asu_table(asu_mappings=asu_mappings)
bond_asu_table.add_all_pairs(
distance_cutoff=distance_cutoff)
exercise_incremental_pairs(
structure=structure,
distance_cutoff=distance_cutoff,
reference_pair_asu_table=bond_asu_table)
exercise_site_cluster_analysis(
structure=structure,
distance_cutoff=distance_cutoff,
reference_pair_asu_table=bond_asu_table)
else:
bond_sym_table = bond_asu_table.extract_pair_sym_table()
bond_asu_table = crystal.pair_asu_table(asu_mappings=asu_mappings)
bond_asu_table.add_pair_sym_table(
sym_table=bond_sym_table)
def exercise_symmetry_equivalent_pair_interactions():
asu_mappings = bond_asu_table.asu_mappings()
for i_seq, j_seq_dict in enumerate(bond_asu_table.table()):
rt_mx_i = asu_mappings.get_rt_mx(i_seq, 0)
rt_mx_i_inv = rt_mx_i.inverse()
for j_seq,j_sym_group in j_seq_dict.items():
scs = structure.scatterers()
def get_coords(symops):
result = []
for s in symops:
result.append(numstr(s * scs[j_seq].site))
result.sort()
return result
prev_equiv_rt_mx_ji = None
for j_syms in j_sym_group:
equiv_rt_mx_ji = []
for j_sym in j_syms:
rt_mx_ji = rt_mx_i_inv.multiply(
asu_mappings.get_rt_mx(j_seq, j_sym))
equiv_rt_mx_ji.append(rt_mx_ji)
old_coords = get_coords(equiv_rt_mx_ji)
all_sepi = set()
for rt_mx_ji in equiv_rt_mx_ji:
_ = asu_mappings.site_symmetry_table()
sepi_obj = _.symmetry_equivalent_pair_interactions(
i_seq=i_seq, j_seq=j_seq, rt_mx_ji=rt_mx_ji)
sepi = sepi_obj.get()
new_coords = get_coords(sepi)
assert new_coords == old_coords
all_sepi.add(";".join([str(_) for _ in sepi]))
for _ in equiv_rt_mx_ji:
assert sepi_obj.is_equivalent(rt_mx_ji=_)
if (prev_equiv_rt_mx_ji is not None):
for _ in prev_equiv_rt_mx_ji:
assert not sepi_obj.is_equivalent(rt_mx_ji=_)
assert len(all_sepi) == 1
prev_equiv_rt_mx_ji = equiv_rt_mx_ji
exercise_symmetry_equivalent_pair_interactions()
def exercise_pair_sym_table_tidy_and_full_connectivity():
def check_one_way(pst):
for sym_pair in pst.iterator():
i_seq, j_seq = sym_pair.i_seqs()
assert i_seq <= j_seq
assert len(pst[i_seq][j_seq]) > 0
if (i_seq != j_seq):
assert i_seq not in pst[j_seq]
def check_two_way(pst):
for sym_pair in pst.iterator():
i_seq, j_seq = sym_pair.i_seqs()
assert len(pst[i_seq][j_seq]) > 0
assert len(pst[j_seq][i_seq]) > 0
pst_extracted = bond_sym_table.tidy(
site_symmetry_table=structure.site_symmetry_table())
check_one_way(pst_extracted)
sio_extracted = StringIO()
structure.pair_sym_table_show(pst_extracted, out=sio_extracted)
pst = pst_extracted.tidy(
site_symmetry_table=structure.site_symmetry_table())
check_one_way(pst)
sio = StringIO()
structure.pair_sym_table_show(pst, out=sio)
assert not show_diff(sio.getvalue(), sio_extracted.getvalue())
pst = pst_extracted.full_connectivity()
check_two_way(pst)
pst_full = pst_extracted.full_connectivity(
site_symmetry_table=structure.site_symmetry_table())
check_two_way(pst_full)
sio = StringIO()
structure.pair_sym_table_show(
pst_full, is_full_connectivity=True, out=sio)
assert sio.getvalue().find("sym. equiv.") < 0
pst = pst_full.tidy(
site_symmetry_table=structure.site_symmetry_table())
check_one_way(pst)
sio = StringIO()
structure.pair_sym_table_show(pst, out=sio)
assert not show_diff(sio.getvalue(), sio_extracted.getvalue())
pst_full2 = pst_full.full_connectivity(
site_symmetry_table=structure.site_symmetry_table())
check_two_way(pst_full2)
pst = pst_full2.tidy(
site_symmetry_table=structure.site_symmetry_table())
check_one_way(pst)
sio = StringIO()
structure.pair_sym_table_show(pst, out=sio)
assert not show_diff(sio.getvalue(), sio_extracted.getvalue())
exercise_pair_sym_table_tidy_and_full_connectivity()
if (connectivities is not None):
check_connectivities(bond_asu_table, connectivities, verbose)
check_sym_equiv(
structure=structure,
bond_asu_table=bond_asu_table,
weak=weak_check_sym_equiv)
def exercise_bond_sorted_asu_proxies(
structure,
distance_cutoff):
asu_mappings = structure.asu_mappings(buffer_thickness=distance_cutoff)
bond_asu_table = crystal.pair_asu_table(asu_mappings=asu_mappings)
bond_asu_table.add_all_pairs(distance_cutoff=distance_cutoff)
bond_sym_table = bond_asu_table.extract_pair_sym_table()
el = bond_sym_table.simple_edge_list()
es = bond_sym_table.full_simple_connectivity()
assert es.size() == bond_sym_table.size()
for i,j in el:
assert j in es[i]
assert i in es[j]
npis = bond_sym_table.number_of_pairs_involving_symmetry()
assert len(list(bond_sym_table.iterator())) == len(el) + npis
bond_params_table = geometry_restraints.bond_params_table(
structure.scatterers().size())
for i_seq,bond_sym_dict in enumerate(bond_sym_table):
for j_seq in bond_sym_dict.keys():
if (i_seq > j_seq):
j_seq,i_seq = i_seq,j_seq
bond_params_table[i_seq][j_seq] = geometry_restraints.bond_params(
distance_ideal=3.1, weight=1)
proxies_fast = geometry_restraints.bond_sorted_asu_proxies(
bond_params_table=bond_params_table,
bond_asu_table=bond_asu_table)
proxies_conservative = geometry_restraints.bond_sorted_asu_proxies(
pair_asu_table=bond_asu_table)
pair_generator = crystal.neighbors_simple_pair_generator(
asu_mappings=asu_mappings,
distance_cutoff=distance_cutoff,
minimal=False)
proxies_slow = geometry_restraints.bond_sorted_asu_proxies(
asu_mappings=asu_mappings)
for pair in pair_generator:
proxies_slow.process(geometry_restraints.bond_asu_proxy(
pair=pair,
distance_ideal=3.1,
weight=1))
def compare_proxies(proxies_1, proxies_2):
assert proxies_1.simple.size() == proxies_2.simple.size()
assert proxies_1.asu.size() == proxies_2.asu.size()
ctrl = {}
for proxy in proxies_1.simple:
assert proxy.i_seqs not in ctrl
ctrl[proxy.i_seqs] = 0
for proxy in proxies_2.simple:
assert proxy.i_seqs in ctrl
ctrl[proxy.i_seqs] += 1
assert list(ctrl.values()) == [1]*len(ctrl)
ctrl = {}
for proxy in proxies_1.asu:
key = proxy.i_seq,proxy.j_seq,proxy.j_sym
assert key not in ctrl
ctrl[key] = 0
for proxy in proxies_2.asu:
key = proxy.i_seq,proxy.j_seq,proxy.j_sym
assert key in ctrl
ctrl[key] += 1
assert list(ctrl.values()) == [1]*len(ctrl)
compare_proxies(proxies_1=proxies_fast, proxies_2=proxies_conservative)
compare_proxies(proxies_1=proxies_fast, proxies_2=proxies_slow)
sites_cart = structure.sites_cart()
for proxy in proxies_conservative.simple:
i,j = proxy.i_seqs
assert approx_equal(
abs(matrix.col(sites_cart[i]) - matrix.col(sites_cart[j])),
proxy.distance_ideal)
assert proxy.weight == 1
distance = proxies_conservative.asu_mappings().unit_cell().distance
get_rt_mx_ji = proxies_conservative.asu_mappings().get_rt_mx_ji
sites_frac = structure.sites_frac()
for proxy in proxies_conservative.asu:
assert approx_equal(
distance(
sites_frac[proxy.i_seq],
get_rt_mx_ji(pair=proxy) * sites_frac[proxy.j_seq]),
proxy.distance_ideal)
assert proxy.weight == 1
def py_pair_asu_table_angle_pair_asu_table(self):
asu_mappings = self.asu_mappings()
result = crystal.pair_asu_table(asu_mappings=asu_mappings)
for i_seq,asu_dict in enumerate(self.table()):
pair_list = []
for j_seq,j_sym_groups in asu_dict.items():
for i_group,j_sym_group in enumerate(j_sym_groups):
for j_sym in j_sym_group:
pair_list.append((j_seq,j_sym))
for i_jj1 in range(0,len(pair_list)-1):
jj1 = pair_list[i_jj1]
rt_mx_jj1_inv = asu_mappings.get_rt_mx(*jj1).inverse()
for i_jj2 in range(i_jj1+1,len(pair_list)):
jj2 = pair_list[i_jj2]
result.add_pair(
i_seq=jj1[0],
j_seq=jj2[0],
rt_mx_ji=rt_mx_jj1_inv.multiply(asu_mappings.get_rt_mx(*jj2)))
return result
def exercise_angle_pair_asu_table(
structure,
distance_cutoff,
connectivities,
reference_apatanl,
reference_cppc):
sg_asu_mappings = structure.asu_mappings(
buffer_thickness=2*distance_cutoff)
sg_pat = crystal.pair_asu_table(asu_mappings=sg_asu_mappings)
sg_pat.add_all_pairs(
distance_cutoff=distance_cutoff,
min_cubicle_edge=0)
# compare connectivities with reference
assert list(sg_pat.pair_counts()) == connectivities
#
p1_structure = structure.expand_to_p1()
p1_asu_mappings = p1_structure.asu_mappings(
buffer_thickness=2*distance_cutoff)
p1_pat = crystal.pair_asu_table(asu_mappings=p1_asu_mappings)
p1_pat.add_all_pairs(
distance_cutoff=distance_cutoff,
min_cubicle_edge=0)
sg_labels = structure.scatterers().extract_labels()
p1_labels = p1_structure.scatterers().extract_labels()
label_connect = dict(zip(sg_labels, sg_pat.pair_counts()))
for l,c in zip(p1_labels, p1_pat.pair_counts()):
# compare connectivities in original space group and in P1
assert label_connect[l] == c
#
sg_apat_py = py_pair_asu_table_angle_pair_asu_table(self=sg_pat)
sg_apat = sg_pat.angle_pair_asu_table()
assert sg_apat.as_nested_lists() == sg_apat_py.as_nested_lists()
sg_counts = {}
for i_seq,pair_asu_dict in enumerate(sg_apat.table()):
lbl_i = sg_labels[i_seq]
for j_seq,pair_asu_j_sym_groups in pair_asu_dict.items():
lbl_j = sg_labels[j_seq]
for j_sym_group in pair_asu_j_sym_groups:
sg_counts.setdefault(lbl_i, dict_with_default_0())[
lbl_j] += len(j_sym_group)
p1_apat = p1_pat.angle_pair_asu_table()
p1_counts = {}
for i_seq,pair_asu_dict in enumerate(p1_apat.table()):
lbl_i = p1_labels[i_seq]
for j_seq,pair_asu_j_sym_groups in pair_asu_dict.items():
lbl_j = p1_labels[j_seq]
for j_sym_group in pair_asu_j_sym_groups:
p1_counts.setdefault(lbl_i, dict_with_default_0())[
lbl_j] += len(j_sym_group)
# self-consistency check
multiplicities = {}
for sc in structure.scatterers():
multiplicities[sc.label] = sc.multiplicity()
assert sorted(p1_counts.keys()) == sorted(sg_counts.keys())
for lbl_i,sg_lc in sg_counts.items():
p1_lc = p1_counts[lbl_i]
assert sorted(p1_lc.keys()) == sorted(sg_lc.keys())
for lbl_j,sg_c in sg_lc.items():
p1_c = p1_lc[lbl_j]
assert p1_c == sg_c * multiplicities[lbl_i]
# compare with reference
apatanl = str(sg_apat.as_nested_lists()).replace(" ","")
if (reference_apatanl is not None):
assert apatanl == reference_apatanl
#
counts = []
for conserve_angles in [False, True]:
proxies = structure.conservative_pair_proxies(
bond_sym_table=sg_pat.extract_pair_sym_table(),
conserve_angles=conserve_angles)
counts.extend([proxies.bond.simple.size(), proxies.bond.asu.size()])
if (not conserve_angles):
assert proxies.angle is None
else:
counts.extend([proxies.angle.simple.size(), proxies.angle.asu.size()])
cppc = ",".join([str(c) for c in counts])
if (reference_cppc is not None):
assert cppc == reference_cppc
def exercise_all():
verbose = "--verbose" in sys.argv[1:]
exercise_icosahedron(verbose=verbose)
default_distance_cutoff = 3.5
regression_misc = libtbx.env.find_in_repositories("phenix_regression/misc")
if (regression_misc is None):
print("Skipping exercise_all(): phenix_regression/misc not available")
return
def get_reference_dict(file_name):
path = os.path.join(regression_misc, file_name)
if (not os.path.isfile(path)):
print("Skipping some tests: reference file not available:", path)
return None
result = {}
with open(path) as f:
lines = f.read().splitlines()
for line in lines:
tag, data = line.split()
assert not tag in result
result[tag] = data
return result
reference_apatanl_dict = get_reference_dict(
"angle_pair_asu_tables_as_nested_lists")
reference_cppc_dict = get_reference_dict(
"conservative_pair_proxies_counts")
file_names = []
for file_name in ["strudat_zeolite_atlas", "strudat_special_bonds"]:
path = os.path.join(regression_misc, file_name)
if (not os.path.isfile(path)):
print("Skipping %s test: input file not available" % file_name)
else:
file_names.append(path)
for file_name in file_names:
with open(file_name) as f:
strudat_entries = strudat.read_all_entries(f)
for i_entry,entry in enumerate(strudat_entries.entries):
if ( file_name.endswith("strudat_zeolite_atlas")
and not ("--full" in sys.argv[1:] or i_entry % 20 == 0)):
continue
if (0 or verbose):
print("strudat tag:", entry.tag)
structure = entry.as_xray_structure()
if (0 or verbose):
structure.show_summary().show_scatterers()
if (entry.title.startswith("cutoff")):
distance_cutoff = float(entry.title.split()[1])
else:
distance_cutoff = default_distance_cutoff
weak_check_sym_equiv = (
entry.reference.find("weak_check_sym_equiv") >= 0)
connectivities = entry.connectivities(all_or_nothing=True)
if (1):
exercise(
structure=structure,
distance_cutoff=distance_cutoff,
connectivities=connectivities,
weak_check_sym_equiv=weak_check_sym_equiv,
verbose=verbose)
if (0 or verbose):
print()
if (file_name.endswith("strudat_zeolite_atlas")):
exercise_bond_sorted_asu_proxies(
structure=structure,
distance_cutoff=distance_cutoff)
if (reference_apatanl_dict is None):
reference_apatanl = None
else:
assert entry.tag in reference_apatanl_dict
reference_apatanl = reference_apatanl_dict[entry.tag]
if (reference_cppc_dict is None):
reference_cppc = None
else:
assert entry.tag in reference_cppc_dict
reference_cppc = reference_cppc_dict[entry.tag]
exercise_angle_pair_asu_table(
structure=structure,
distance_cutoff=distance_cutoff,
connectivities=connectivities,
reference_apatanl=reference_apatanl,
reference_cppc=reference_cppc)
def run():
exercise_all()
print(format_cpu_times())
if (__name__ == "__main__"):
run()
|
92181
|
import struct
import time
import yaml
import yaml.resolver
from collections import OrderedDict
import data_types
from data_types import TypeID
def now_ns():
return int(time.time() * 10 ** 6)
class Schema:
""" The schema for the data in the atomic multilog.
"""
def __init__(self, columns):
""" Initializes the schema to the list of columns passed in.
Args:
columns: The list of columns that make up the schema.
"""
self.record_size_ = 0
self.columns_ = columns
for c in self.columns_:
self.record_size_ += c.data_type_.size_
def __str__(self):
""" Convert to string
Returns:
String representation of schema
"""
return str(self.columns_)
def record_size(self):
""" Get record size in bytes
Returns:
Record size in bytes
"""
return self.record_size_
def columns(self):
""" Get list of columns
Returns:
List of columns
"""
return self.columns_
def apply(self, data):
""" Adds data to the schema.
Args:
data: The data to add.
Returns:
The record.
"""
return Record(data, self)
def pack(self, rec):
""" Pack data into a record.
Args:
rec: The record to pack
Returns:
Packed record
"""
packed = ""
if len(rec) == len(self.columns_):
off = 1
packed += struct.pack('Q', rec[0])
elif len(rec) == len(self.columns_) - 1:
off = 0
packed += struct.pack('Q', now_ns())
else:
raise ValueError("Record does not conform to schema: incorrect number of fields")
for f, c in zip(rec[off:], self.columns_[1:]):
packed += c.data_type_.pack(f)
return packed
class Column:
""" Container of values for a specific type in the schema.
"""
def __init__(self, idx, offset, data_type, name, min_value, max_value):
""" Initializes a column in the schema.
Args:
idx: The index of the column.
offset: The offset of the column.
data_type: The data type of values in the column.
name: The name of the column.
min_value: The minimum value of the column.
max_value: The maximum value of the column.
"""
self.idx_ = idx
self.offset_ = offset
self.data_type_ = data_type
self.name_ = name.upper()
self.value = min_value
self.min_value_ = self.value
self.max_value = max_value
def __str__(self):
""" Convert to string
Returns:
String representation of the column
"""
return '{} : {}'.format(self.name_, self.data_type_)
def apply(self, data):
""" Adds data to the column.
Args:
data: The data to add.
Returns:
A field containing the data.
"""
return Field(self.idx_, self.data_type_, data[self.offset_: self.offset_ + self.data_type_.size_])
class Record:
""" A collection of values containing different types.
"""
def __init__(self, data, schema):
"""
Initializes a record to the specified values.
Args:
data: The data the record should hold.
schema: The schema for the record.
"""
self.data_ = data
self.fields_ = [c.apply(self.data_) for c in schema.columns()]
def __str__(self):
""" Converts to string
Returns:
String representation of record
"""
return str([str(x.unpack()) for x in self.fields_])
def __getitem__(self, idx):
""" Get element at specified index
Args:
idx: Index into record
Returns:
Element at specified index
"""
return self.fields_[idx].unpack()
class Field:
""" Contains data stored as part of a record.
"""
def __init__(self, idx, data_type, data):
""" Initializes the field to the data passed in.
Args:
idx: The index of the field.
data_type: The data type the value of the field contains.
data: The data that the field contains.
"""
self.idx_ = idx
self.data_type_ = data_type
self.data_ = data
def unpack(self):
""" Unpacks the field to get the data.
Returns:
The data in the field.
"""
tid = self.data_type_.type_id_
if tid == TypeID.STRING:
format_code = str(self.data_type_.size_) + data_types.FORMAT_CODES[tid]
else:
format_code = data_types.FORMAT_CODES[tid]
return struct.unpack(format_code, self.data_)[0]
class SchemaBuilder:
""" Builder of a schema for the atomic multilog.
"""
def __init__(self):
""" Initializes a default schema builder.
"""
self.user_provided_ts_ = False
self.offset_ = 0
self.columns_ = []
timestamp_col = Column(0, 0, data_types.ULONG_TYPE, "TIMESTAMP", None, None)
self.columns_.append(timestamp_col)
self.offset_ += data_types.ULONG_TYPE.size_
def add_column(self, data_type, name, min_value=None, max_value=None):
""" Adds a column to the schema builder.
Args:
data_type: The data type of the column.
name: The name of the column.
min_value: The minimum value of the column.
max_value: The maximum value of the column.
"""
if name.upper() == "TIMESTAMP":
self.user_provided_ts_ = True
if data_type != data_types.ULONG_TYPE:
raise ValueError("TIMESTAMP must be of ULONG_TYPE")
return self
col = Column(len(self.columns_), self.offset_, data_type, name, min_value, max_value)
self.columns_.append(col)
self.offset_ += data_type.size_
return self
def build(self):
""" Builds a schema by returning the list of columns.
Returns:
A list of columns that make up the schema.
"""
return self.columns_
def make_schema(s):
"""Converts a JSON-like string representation of the schema to our internal representation of the schema.
Args:
s: A JSON-like schema string
Returns:
Our internal representation of the schema.
"""
def ordered_load(stream):
class OrderedLoader(yaml.Loader):
pass
def construct_mapping(loader, node):
loader.flatten_mapping(node)
return OrderedDict(loader.construct_pairs(node))
OrderedLoader.add_constructor(yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, construct_mapping)
return yaml.load(stream, OrderedLoader)
s_parsed = ordered_load(s)
sb = SchemaBuilder()
for k in s_parsed:
sb.add_column(data_types.make_type(s_parsed[k]), k)
return Schema(sb.build())
|
92205
|
from cliche import cli
@cli
def exception_example():
raise ValueError("No panic! This is a known error")
|
92234
|
from sqlalchemy import Column, func
from clickhouse_sqlalchemy import types, Table
from tests.testcase import CompilationTestCase
class CountTestCaseBase(CompilationTestCase):
table = Table(
't1', CompilationTestCase.metadata(),
Column('x', types.Int32, primary_key=True)
)
def test_count(self):
self.assertEqual(
self.compile(self.session.query(func.count(self.table.c.x))),
'SELECT count(t1.x) AS count_1 FROM t1'
)
def test_count_distinct(self):
query = self.session.query(func.count(func.distinct(self.table.c.x)))
self.assertEqual(
self.compile(query),
'SELECT count(distinct(t1.x)) AS count_1 FROM t1'
)
def test_count_no_column_specified(self):
query = self.session.query(func.count()).select_from(self.table)
self.assertEqual(
self.compile(query),
'SELECT count(*) AS count_1 FROM t1'
)
|
92241
|
import json
from adsimulator.templates.default_values import DEFAULT_VALUES
def print_all_parameters(parameters):
print("")
print("New Settings:")
print(json.dumps(parameters, indent=4, sort_keys=True))
def get_perc_param_value(node, key, parameters):
try:
if 0 <= parameters[node][key] <= 100:
return parameters[node][key]
else:
return 100
except:
return DEFAULT_VALUES[node][key]
def get_dict_param_value(node, key, parameters):
try:
value = parameters[node][key]
if type(value) == dict:
return value
else:
return DEFAULT_VALUES[node][key]
except:
return DEFAULT_VALUES[node][key]
def get_int_param_value(node, key, parameters):
try:
value = parameters[node][key]
if type(value) == int and value > 0:
return value
else:
return DEFAULT_VALUES[node][key]
except:
return DEFAULT_VALUES[node][key]
def get_int_param_value_with_upper_limit(node, key, parameters, max_value):
try:
value = parameters[node][key]
if type(value) == int and 0 < value <= max_value:
return value
if type(value) == int and value > max_value:
return max_value
else:
return DEFAULT_VALUES[node][key]
except:
return DEFAULT_VALUES[node][key]
def print_computer_generation_parameters(enabled, has_laps, unconstrained_delegation, prob):
print("\t- Enabled computer probability:", str(enabled), "%")
print("\t- HasLaps computer probability:", str(has_laps), "%")
print("\t- Unconstrained delegation computer probability:", str(unconstrained_delegation), "%")
sum = 0
for key in prob.keys():
sum += prob[key]
if sum != 100:
prob = DEFAULT_VALUES["Computer"]["osProbability"]
print("\t- Computer OS probability:")
print("\t\t- Windows XP Professional Service Pack 3:", str(prob["Windows XP Professional Service Pack 3"]), "%")
print("\t\t- Windows 7 Professional Service Pack 1:", str(prob["Windows 7 Professional Service Pack 1"]), "%")
print("\t\t- Windows 7 Ultimate Service Pack 1:", str(prob["Windows 7 Ultimate Service Pack 1"]), "%")
print("\t\t- Windows 7 Enterprise Service Pack 1:", str(prob["Windows 7 Enterprise Service Pack 1"]), "%")
print("\t\t- Windows 10 Pro:", str(prob["Windows 10 Pro"]), "%")
print("\t\t- Windows 10 Enterprise:", str(prob["Windows 10 Enterprise"]), "%")
def print_dc_generation_parameters(enabled, has_laps, prob):
print("\t- Enabled DC probability:", str(enabled), "%")
print("\t- HasLaps DC probability:", str(has_laps), "%")
sum = 0
for key in prob.keys():
sum += prob[key]
if sum != 100:
prob = DEFAULT_VALUES["DC"]["osProbability"]
print("\t- Domain Controller OS probability:")
print("\t\t- Windows Server 2003 Enterprise Edition:", str(prob["Windows Server 2003 Enterprise Edition"]), "%")
print("\t\t- Windows Server 2008 Standard:", str(prob["Windows Server 2008 Standard"]), "%")
print("\t\t- Windows Server 2008 Datacenter:", str(prob["Windows Server 2008 Datacenter"]), "%")
print("\t\t- Windows Server 2008 Enterprise:", str(prob["Windows Server 2008 Enterprise"]), "%")
print("\t\t- Windows Server 2008 R2 Standard:", str(prob["Windows Server 2008 R2 Standard"]), "%")
print("\t\t- Windows Server 2008 R2 Datacenter:", str(prob["Windows Server 2008 R2 Datacenter"]), "%")
print("\t\t- Windows Server 2008 R2 Enterprise:", str(prob["Windows Server 2008 R2 Enterprise"]), "%")
print("\t\t- Windows Server 2012 Standard:", str(prob["Windows Server 2012 Standard"]), "%")
print("\t\t- Windows Server 2012 Datacenter:", str(prob["Windows Server 2012 Datacenter"]), "%")
print("\t\t- Windows Server 2012 R2 Standard:", str(prob["Windows Server 2012 R2 Standard"]), "%")
print("\t\t- Windows Server 2012 R2 Datacenter:", str(prob["Windows Server 2012 R2 Datacenter"]), "%")
print("\t\t- Windows Server 2016 Standard:", str(prob["Windows Server 2016 Standard"]), "%")
print("\t\t- Windows Server 2016 Datacenter:", str(prob["Windows Server 2016 Datacenter"]), "%")
def print_user_generation_parameters(enabled, dontreqpreauth, hasspn, passwordnotreqd, pwdneverexpires, unconstraineddelegation, sidhistory):
print("\t- Enabled user probability:", str(enabled), "%")
print("\t- Dontreqpreauth user probability:", str(dontreqpreauth), "%")
print("\t- Dontreqpreauth user probability:", str(dontreqpreauth), "%")
print("\t- Hasspn user probability:", str(hasspn), "%")
print("\t- Passwordnotreqd user probability:", str(passwordnotreqd), "%")
print("\t- Pwdneverexpires user probability:", str(pwdneverexpires), "%")
print("\t- Unconstrained delegation user probability:", str(unconstraineddelegation), "%")
print("\t- User has SID History probability:", str(sidhistory), "%")
def print_domain_generation_parameters(prob):
sum = 0
for key in prob.keys():
sum += prob[key]
if sum != 100:
prob = DEFAULT_VALUES["Domain"]["functionalLevelProbability"]
print("\t- Functional level probability:")
print("\t\t- 2008:", str(prob["2008"]), "%")
print("\t\t- 2008 R2:", str(prob["2008 R2"]), "%")
print("\t\t- 2012:", str(prob["2012"]), "%")
print("\t\t- 2012 R2:", str(prob["2012 R2"]), "%")
print("\t\t- 2016:", str(prob["2016"]), "%")
print("\t\t- Unknown:", str(prob["Unknown"]), "%")
def print_departments_parameters(prob):
sum = 0
for key in prob.keys():
sum += prob[key]
if sum != 100:
prob = DEFAULT_VALUES["Group"]["departmentProbability"]
print("\t- Department probability:")
print("\t\t- IT:", str(prob["IT"]), "%")
print("\t\t- HR:", str(prob["HR"]), "%")
print("\t\t- MARKETING:", str(prob["MARKETING"]), "%")
print("\t\t- OPERATIONS:", str(prob["OPERATIONS"]), "%")
print("\t\t- BIDNESS:", str(prob["BIDNESS"]), "%")
def print_acls_parameters(prob):
sum = 0
for key in prob.keys():
sum += prob[key]
if sum != 100:
prob = DEFAULT_VALUES["ACLs"]["ACLsProbability"]
print("\t- ACLs probability:")
print("\t\t- GenericAll:", str(prob["GenericAll"]), "%")
print("\t\t- GenericWrite:", str(prob["GenericWrite"]), "%")
print("\t\t- WriteOwner:", str(prob["WriteOwner"]), "%")
print("\t\t- WriteDacl:", str(prob["WriteDacl"]), "%")
print("\t\t- AddMember:", str(prob["AddMember"]), "%")
print("\t\t- ForceChangePassword:", str(prob["ForceChangePassword"]), "%")
print("\t\t- ReadLAPSPassword:", str(prob["ReadLAPSPassword"]), "%")
|
92248
|
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import ConnectionPatch
from matplotlib.transforms import Bbox
import seaborn as sns
import utils
from utils import filters, maxima, segment, merge
import warnings
def pipeline(img, low, high, roi_percentile=85, focal_scope='global', maxima_areas='small', merge_type='blend',
merge_alpha=0.5, filter_type='percentage', filter_percentage=15, filter_threshold=0.6):
"""
Visualization of the whole workflow. Requires the original image and the high and low res CAMs to work. Performs
the following steps:
1. Applies a filter to blur the high-res map.
2. Extracts the ROI from the low-res map through a percentile.
3. Identifies the focal points of the low-res map by locating it's local maxima.
4. Computes the gradient of the high-res map through a sobel filter.
5. Draws a histogram of the gradient. Only considers areas corresponding to the ROI extracted from the low-res map.
6. Calculates a 'lower' and 'upper' bound on the 25th and 75th percentile, respectively.
7. Performs a region-growing segmentation algorithm on the gradient. The boundaries are the previous percentiles,
while the focal points are set as the initial seeds (from where to start growing).
8. Merges the result of the segmentation with the low-res map.
9. Segments the original image according to the result of the previous merger.
Note: it would be more efficient and elegant if I went for 'axes fraction' instead of 'data' for the coordinates
of the ConnectionPatches, but it's too much of a hassle to change.
:param img: Original RBG image, default shape=(224, 224, 3).
:param low: Low-resolution CAM, default shape=(14, 14).
:param high: High-resolution CAM, default shape=(224, 224).
:param roi_percentile: Percentile based on which the ROI will be extracted. The default percentile=85 means that
the ROI will include the 15% highest-intensity pixels from the low-res map.
:param focal_scope: The scope in which the focal points will be identified. 'global' looks for global maxima, while
'local' looks for local maxima. Accepted values: ['global', 'local']
:param maxima_areas: Specifies the size of the focal points. Two options available: 'small' and 'large'.
:param merge_type: Specifies the method of merging the high-res segment map with the low-res map.
Two methods available: 'blend' and 'multiply'. The first is a possibly weighted linear
combination of the two, while the second simply multiplies them.
:param merge_alpha: If merge_type='blend', alpha regulates the importance of each of the two images (i.e. the low
and the high-res maps). Should be a float in [0, 1]. High values result in more influence from
the high-res map.
:param filter_type: Specifies the method of segmenting the original image based on the combined CAM. Two methods are
available: 'percentage' and 'threshold'. The first keeps a percentage of the original image's
pixels while the second relies solely on the values of the combined CAM exceeding a threshold.
:param filter_percentage: Selects the percentage of pixels to be included in the final segment. Only relevant if
filter_type='percentage'. Should be a number between 0 and 100.
:param filter_threshold: Selects the threshold based on which the final segmentation will be performed. Only pixels
of the combined CAM that have an intensity greater than this threshold will be included.
Based on this mask, the original image will be segmented. Should be a float in [0, 1].
"""
# Value checks
# Categorical arguments
if maxima_areas not in ('small', 'large'):
raise ValueError("available options for maxima_areas are: 'small' and 'large'.")
if merge_type not in ('blend', 'multiply'):
raise ValueError("available options for merge_type are: 'blend' and 'multiply'.")
if filter_type not in ('percentage', 'threshold'):
raise ValueError("vailable options for filter_type are: 'percentage' and 'threshold'.")
# Percentage arguments
if roi_percentile <= 0 or roi_percentile >= 100:
raise ValueError('roi_percentile should be a percentage in (0, 100)')
elif roi_percentile < 1:
warnings.warn('roi_percentile value in [0, 1). Should be defined as a percentage in (0, 100), '
'e.g. If the desired percentage is 13%, pass 33 instead of 0.33!')
if filter_percentage <= 0 or filter_percentage >= 100:
raise ValueError('filter_percentage should be a percentage in (0, 100)')
elif filter_percentage < 1:
warnings.warn('filter_percentage value in [0, 1). Should be defined as a percentage in (0, 100), '
'e.g. If the desired percentage is 13%, pass 33 instead of 0.33!')
# Value arguments
if merge_alpha < 0 or merge_alpha > 1:
raise ValueError('merge_alpha should be a float in [0, 1]')
if filter_threshold < 0 or filter_threshold > 1:
raise ValueError('filter_threshold should be a float in [0, 1]')
# Coordinates of the top/bottom/left/right/middle of the input image
left = (0, img.shape[1] / 2)
right = (img.shape[1], img.shape[1] / 2)
bottom = (img.shape[1] / 2, img.shape[1])
top = (img.shape[1] / 2, 0)
midpoint = (img.shape[1] / 2, img.shape[1] / 2)
# Create two 'blank' images for filling empty positions
blank = np.ones(img[0].shape, dtype=np.uint8)
half_blank = blank[::2]
# Initialize 5x7 grid
fig, ax = plt.subplots(5, 7, figsize=(16, 16))
##############################
######## First column ########
##############################
# Fill first, second, fourth and fifth rows with blank images
ax[0, 0].imshow(blank, alpha=0)
ax[0, 0].axis('off')
ax[1, 0].imshow(blank, alpha=0)
ax[1, 0].axis('off')
ax[3, 0].imshow(blank, alpha=0)
ax[3, 0].axis('off')
ax[4, 0].imshow(half_blank, alpha=0)
ax[4, 0].axis('off')
# Add original image to the third row
ax[2, 0].imshow(img[0], zorder=3)
ax[2, 0].axis('off')
ax[2, 0].set_title('Original image', backgroundcolor='white', zorder=2)
# Three crooked lines starting from the first row, represented by thirteen (!) connection patches
# Connection of 'original image' to 'high-res map'
con1a = ConnectionPatch(xyA=top, xyB=midpoint, coordsA='data', coordsB='data',
axesA=ax[2, 0], axesB=ax[1, 0], color='black', lw=2, zorder=1)
con1b = ConnectionPatch(xyA=midpoint, xyB=left, coordsA='data', coordsB='data',
axesA=ax[1, 0], axesB=ax[1, 1], color='black', lw=2, arrowstyle='->')
# Connection of 'original image' to 'low-res map'
con2a = ConnectionPatch(xyA=bottom, xyB=midpoint, coordsA='data', coordsB='data',
axesA=ax[2, 0], axesB=ax[3, 0], color='black', lw=2)
con2b = ConnectionPatch(xyA=midpoint, xyB=left, coordsA='data', coordsB='data',
axesA=ax[3, 0], axesB=ax[3, 1], color='black', lw=2, arrowstyle='->')
# Connection of 'original image' to 'result'
con3b = ConnectionPatch(xyA=midpoint, xyB=bottom, coordsA='data', coordsB='data',
axesA=ax[1, 0], axesB=ax[0, 0], color='black', lw=2)
con3c = ConnectionPatch(xyA=bottom, xyB=bottom, coordsA='data', coordsB='data',
axesA=ax[0, 0], axesB=ax[0, 1], color='black', lw=2)
con3d = ConnectionPatch(xyA=bottom, xyB=bottom, coordsA='data', coordsB='data',
axesA=ax[0, 1], axesB=ax[0, 2], color='black', lw=2)
con3e = ConnectionPatch(xyA=bottom, xyB=bottom, coordsA='data', coordsB='data',
axesA=ax[0, 2], axesB=ax[0, 3], color='black', lw=2)
con3f = ConnectionPatch(xyA=bottom, xyB=bottom, coordsA='data', coordsB='data',
axesA=ax[0, 3], axesB=ax[0, 4], color='black', lw=2)
con3g = ConnectionPatch(xyA=bottom, xyB=bottom, coordsA='data', coordsB='data',
axesA=ax[0, 4], axesB=ax[0, 5], color='black', lw=2)
con3h = ConnectionPatch(xyA=bottom, xyB=bottom, coordsA='data', coordsB='data',
axesA=ax[0, 5], axesB=ax[0, 6], color='black', lw=2)
con3i = ConnectionPatch(xyA=bottom, xyB=midpoint, coordsA='data', coordsB='data',
axesA=ax[0, 6], axesB=ax[1, 6], color='black', lw=2)
con3k = ConnectionPatch(xyA=midpoint, xyB=midpoint, coordsA='data', coordsB='data',
axesA=ax[1, 6], axesB=ax[2, 6], color='black', lw=2)
con3l = ConnectionPatch(xyA=midpoint, xyB=top, coordsA='data', coordsB='data',
axesA=ax[2, 6], axesB=ax[3, 6], color='black', lw=2, arrowstyle='->', zorder=1)
# Add each patch to its respective axis
ax[2, 0].add_artist(con1a)
ax[1, 0].add_artist(con1b)
ax[2, 0].add_artist(con2a)
ax[3, 0].add_artist(con2b)
ax[1, 0].add_artist(con3b)
ax[0, 0].add_artist(con3c)
ax[0, 1].add_artist(con3d)
ax[0, 2].add_artist(con3e)
ax[0, 3].add_artist(con3f)
ax[0, 4].add_artist(con3g)
ax[0, 5].add_artist(con3h)
ax[0, 6].add_artist(con3i)
ax[1, 6].add_artist(con3k)
ax[2, 6].add_artist(con3l)
###############################
######## Second column ########
###############################
# High-res map on the second line
ax[1, 1].imshow(high)
ax[1, 1].axis('off')
ax[1, 1].set_title('High-res CAM')
# Low-res map on the fourth line
ax[3, 1].imshow(utils.resize(low), zorder=3)
ax[3, 1].axis('off')
ax[3, 1].set_title('Low-res CAM', backgroundcolor='white', zorder=2)
# Fill the first, third and fifth lines with blank images
ax[0, 1].imshow(blank, alpha=0)
ax[0, 1].axis('off')
ax[2, 1].imshow(blank, alpha=0)
ax[2, 1].axis('off')
ax[4, 1].imshow(half_blank, alpha=0)
ax[4, 1].axis('off')
# Four lines represented by eleven (!) connection patches
# Connection of 'high-res map' to 'gradient'
con4 = ConnectionPatch(xyA=right, xyB=left, coordsA='data', coordsB='data',
axesA=ax[1, 1], axesB=ax[1, 2], color='black', lw=2, arrowstyle='->')
# Connection of 'low-res map' to 'roi'
con5a = ConnectionPatch(xyA=top, xyB=midpoint, coordsA='data', coordsB='data',
axesA=ax[3, 1], axesB=ax[2, 1], color='black', lw=2, zorder=1)
con5b = ConnectionPatch(xyA=midpoint, xyB=left, coordsA='data', coordsB='data',
axesA=ax[2, 1], axesB=ax[2, 2], color='black', lw=2, arrowstyle='->')
# Connection of 'low-res map' to 'focal points'
con6 = ConnectionPatch(xyA=right, xyB=left, coordsA='data', coordsB='data',
axesA=ax[3, 1], axesB=ax[3, 2], color='black', lw=2, arrowstyle='->')
# Connection of 'low-res map' to 'merger'
con7a = ConnectionPatch(xyA=bottom, xyB=top, coordsA='data', coordsB='data',
axesA=ax[3, 1], axesB=ax[4, 1], color='black', lw=2, zorder=1)
con7b = ConnectionPatch(xyA=top, xyB=top, coordsA='data', coordsB='data',
axesA=ax[4, 1], axesB=ax[4, 2], color='black', lw=2, zorder=1)
con7c = ConnectionPatch(xyA=top, xyB=top, coordsA='data', coordsB='data',
axesA=ax[4, 2], axesB=ax[4, 3], color='black', lw=2, zorder=1)
con7d = ConnectionPatch(xyA=top, xyB=top, coordsA='data', coordsB='data',
axesA=ax[4, 3], axesB=ax[4, 4], color='black', lw=2, zorder=1)
con7e = ConnectionPatch(xyA=top, xyB=top, coordsA='data', coordsB='data',
axesA=ax[4, 4], axesB=ax[4, 5], color='black', lw=2, zorder=1)
con7f = ConnectionPatch(xyA=top, xyB=bottom, coordsA='data', coordsB='data',
axesA=ax[4, 5], axesB=ax[3, 5], color='black', lw=2, zorder=1, arrowstyle='->')
# Add the patches to their respective axes
ax[1, 1].add_artist(con4)
ax[3, 1].add_artist(con5a)
ax[2, 1].add_artist(con5b)
ax[3, 1].add_artist(con6)
ax[3, 1].add_artist(con7a)
ax[4, 1].add_artist(con7b)
ax[4, 2].add_artist(con7c)
ax[4, 3].add_artist(con7d)
ax[4, 4].add_artist(con7e)
ax[4, 5].add_artist(con7f)
##############################
######## Third column ########
##############################
# High-res blur
blurred = filters.blur(high)
ax[1, 2].imshow(blurred)
ax[1, 2].axis('off')
ax[1, 2].set_title('Blurred')
# Region of Interest
roi = utils.resize(low) > utils.percentile(utils.resize(low), roi_percentile)
a = ax[2, 2].imshow(roi)
ax[2, 2].axis('off')
ax[2, 2].set_title('Region of Interest')
# Focal Points
focal_points = maxima.find_focal_points(low, scope=focal_scope, maxima_areas=maxima_areas)
bg, dots = a.get_cmap().colors[0], a.get_cmap().colors[-1]
ax[3, 2].imshow((blank.reshape(-1, 3) * bg).reshape(img.shape[1], img.shape[1], 3))
ax[3, 2].scatter([x[0] for x in focal_points], [x[1] for x in focal_points], marker='x', s=30, c=dots)
ax[3, 2].axis('off')
ax[3, 2].set_title('Focal Points')
# Fill first and fifth rows with blank images
ax[0, 2].imshow(blank, alpha=0)
ax[0, 2].axis('off')
ax[4, 2].imshow(half_blank, alpha=0)
ax[4, 2].axis('off')
# Three lines represented by five connection patches
con8 = ConnectionPatch(xyA=right, xyB=left, coordsA='data', coordsB='data',
axesA=ax[1, 2], axesB=ax[1, 3], color='black', lw=2, arrowstyle='->')
con9 = ConnectionPatch(xyA=right, xyB=(0, 0.5), coordsA='data', coordsB='axes fraction',
axesA=ax[2, 2], axesB=ax[2, 3], color='black', lw=2, arrowstyle='->')
con10a = ConnectionPatch(xyA=right, xyB=midpoint, coordsA='data', coordsB='data',
axesA=ax[3, 2], axesB=ax[3, 3], color='black', lw=2)
con10b = ConnectionPatch(xyA=midpoint, xyB=midpoint, coordsA='data', coordsB='data',
axesA=ax[3, 3], axesB=ax[3, 4], color='black', lw=2)
con10c = ConnectionPatch(xyA=midpoint, xyB=left, coordsA='data', coordsB='data',
axesA=ax[3, 4], axesB=ax[3, 5], color='black', lw=2, arrowstyle='->')
# Add the patches to their respective axes
ax[1, 2].add_artist(con8)
ax[2, 2].add_artist(con9)
ax[3, 2].add_artist(con10a)
ax[3, 3].add_artist(con10b)
ax[3, 4].add_artist(con10c)
###############################
######## Fourth column ########
###############################
# High-res edge detection
grad = utils.normalize_image(filters.sobel(blurred))
ax[1, 3].imshow(grad)
ax[1, 3].axis('off')
ax[1, 3].set_title('Edge detection')
# Gradient percentiles
roi_grad = grad[roi]
lower = utils.percentile(roi_grad, 25)
upper = utils.percentile(roi_grad, 75)
ax[2, 3] = sns.distplot(roi_grad.ravel(), ax=ax[2, 3])
ax[2, 3].plot([lower, lower], [0, 4], c='C1')
ax[2, 3].plot([upper, upper], [0, 4], c='C1')
ax[2, 3].text(lower, -0.5, 'lower', color='C1', horizontalalignment='center')
ax[2, 3].text(upper, 4.5, 'upper', color='C1', horizontalalignment='center')
ax[2, 3].axis('off')
ttl = ax[2, 3].set_title('Edge Histogram')
ttl.set_bbox(dict(color='white', alpha=0.5, zorder=2))
square_axes(ax[2, 3]) # custom function that shrinks the axis object to a square box
# Fill first, fourth and fifth rows
ax[0, 3].imshow(blank, alpha=0)
ax[0, 3].axis('off')
ax[3, 3].imshow(blank, alpha=0)
ax[3, 3].axis('off')
ax[4, 3].imshow(half_blank, alpha=0)
ax[4, 3].axis('off')
# Three lines represented by four connection patches
con11 = ConnectionPatch(xyA=bottom, xyB=(0.5, 1), coordsA='data', coordsB='axes fraction',
axesA=ax[1, 3], axesB=ax[2, 3], color='black', lw=2, arrowstyle='->')
con12a = ConnectionPatch(xyA=right, xyB=midpoint, coordsA='data', coordsB='data',
axesA=ax[1, 3], axesB=ax[1, 4], color='black', lw=2)
con12b = ConnectionPatch(xyA=midpoint, xyB=top, coordsA='data', coordsB='data',
axesA=ax[1, 4], axesB=ax[2, 4], color='black', lw=2, arrowstyle='->', zorder=1)
con13 = ConnectionPatch(xyA=(1, 0.5), xyB=left, coordsA='axes fraction', coordsB='data',
axesA=ax[2, 3], axesB=ax[2, 4], color='black', lw=2, arrowstyle='->')
# Add the patches to their respective axes
ax[1, 3].add_artist(con11)
ax[1, 3].add_artist(con12a)
ax[1, 4].add_artist(con12b)
ax[2, 3].add_artist(con13)
##############################
######## Fifth column ########
##############################
# Region Growing Segmentation
segm = segment.region_growing(grad, seeds=focal_points, lower=lower, upper=upper)
ax[2, 4].imshow(segm, zorder=3)
ax[2, 4].axis('off')
ttl = ax[2, 4].set_title('Region Growing\nSegmentation')
ttl.set_bbox(dict(color='white', alpha=0.5, zorder=2))
# Fill first, second fourth and fifth rows
ax[0, 4].imshow(blank, alpha=0)
ax[0, 4].axis('off')
ax[1, 4].imshow(blank, alpha=0)
ax[1, 4].axis('off')
ax[3, 4].imshow(blank, alpha=0)
ax[3, 4].axis('off')
ax[4, 4].imshow(half_blank, alpha=0)
ax[4, 4].axis('off')
# Just one connection! :)
con14 = ConnectionPatch(xyA=right, xyB=left, coordsA='data', coordsB='data',
axesA=ax[2, 4], axesB=ax[2, 5], color='black', lw=2, arrowstyle='->')
ax[2, 4].add_artist(con14)
##############################
######## Sixth column ########
##############################
# Add edges and fill small holes
edges = (grad >= upper).astype(float)
roi_edges = edges * roi
segm_with_edges = segm + roi_edges
filled = maxima.remove_small_holes(segm_with_edges)
ax[2, 5].imshow(filled)
ax[2, 5].axis('off')
ax[2, 5].set_title('Remove small holes')
# High-Low merger
merged = merge.merge_images(filled, low, method=merge_type, alpha=merge_alpha)
ax[3, 5].imshow(merged)
ax[3, 5].axis('off')
ttl = ax[3, 5].set_title('High-Low Merger')
ttl.set_bbox(dict(color='white', alpha=0.5, zorder=2))
# Fill remaining rows
ax[0, 5].imshow(blank, alpha=0)
ax[0, 5].axis('off')
ax[1, 5].imshow(blank, alpha=0)
ax[1, 5].axis('off')
ax[3, 5].imshow(blank, alpha=0)
ax[3, 5].axis('off')
ax[4, 5].imshow(half_blank, alpha=0)
ax[4, 5].axis('off')
# Last connection patches...
con15 = ConnectionPatch(xyA=bottom, xyB=top, coordsA='data', coordsB='data',
axesA=ax[2, 5], axesB=ax[3, 5], color='black', lw=2, zorder=-1, arrowstyle='->')
con16 = ConnectionPatch(xyA=right, xyB=left, coordsA='data', coordsB='data',
axesA=ax[3, 5], axesB=ax[3, 6], color='black', lw=2, zorder=-1, arrowstyle='->')
ax[2, 5].add_artist(con15)
ax[3, 5].add_artist(con16)
################################
######## Seventh column ########
################################
# Result
if filter_type == 'percentage':
result = merge.keep_percentage(img, merged, percentage=filter_percentage/100)
else:
result = merge.filter_image(img, merged, threshold=filter_threshold)
ax[3, 6].imshow(result, zorder=3)
ax[3, 6].axis('off')
ttl = ax[3, 6].set_title('Result')
ttl.set_bbox(dict(color='white', alpha=0.5, zorder=2))
# Fill remaining rows
ax[0, 6].imshow(blank, alpha=0)
ax[0, 6].axis('off')
ax[1, 6].imshow(blank, alpha=0)
ax[1, 6].axis('off')
ax[2, 6].imshow(blank, alpha=0)
ax[2, 6].axis('off')
ax[4, 6].imshow(half_blank, alpha=0)
ax[4, 6].axis('off')
def plt_to_static(axes):
"""
Should convert an axis object to an image in a numpy.array. Doesn't work as intended!
:param axes: A matplotlib.axes.Axes object
:return: The same object as a numpy.array
"""
fig = plt.figure()
fig.axes.append(axes)
fig.canvas.draw()
buf = fig.canvas.tostring_rgb()
width, height = fig.canvas.get_width_height()
return np.frombuffer(buf, dtype=np.uint8).reshape(height, width, 3)
def square_axes(axes):
"""
Takes a matplotlib.axes.Axes object, finds its height and width and shrinks the largest dimension to match the
smallest one. Caution: it actually changes the object (in-place)!
:param axes: A matplotlib.axes.Axes object.
:return: The new Bbox coordinates.
"""
bbox = axes.get_position()._points.copy()
width = bbox[1, 0] - bbox[0, 0]
height = bbox[1, 1] - bbox[0, 1]
if width < height:
center = bbox[0, 1] + height / 2
bbox[0, 1] = center - width / 2
bbox[1, 1] = center + width / 2
else:
center = bbox[0, 0] + width / 2
bbox[0, 0] = center - height / 2
bbox[1, 0] = center + height / 2
axes.set_position(Bbox(bbox))
return bbox
|
92268
|
import re
from functools import reduce
from operator import or_
from actstream.models import Follow
from django.contrib.contenttypes.models import ContentType
from django.db.models import Q
from django_filters import CharFilter, ChoiceFilter, FilterSet
from machina.apps.forum.models import Forum
from machina.apps.forum_conversation.models import Topic
from grandchallenge.core.filters import FilterForm
from grandchallenge.notifications.models import Notification
BOOLEAN_CHOICES = (
("1", "Read"),
("0", "Unread"),
)
class NotificationFilter(FilterSet):
forum = CharFilter(method="search_filter", label="Forum")
topic = CharFilter(method="search_filter", label="Forum post subject")
read = ChoiceFilter(choices=BOOLEAN_CHOICES, label="Status")
class Meta:
model = Notification
form = FilterForm
fields = ("forum", "topic", "read")
def search_filter(self, queryset, name, value):
if name == "forum":
name_qs = [
x.id for x in Forum.objects.filter(name__icontains=value).all()
]
elif name == "topic":
name_qs = [
x.id
for x in Topic.objects.filter(subject__icontains=value).all()
]
search_fields = (
"action__target_object_id",
"action__action_object_object_id",
)
return queryset.filter(
reduce(
or_, [Q(**{f"{f}__in": name_qs}) for f in search_fields], Q(),
)
)
FOLLOW_CHOICES = (
("forum_forum", "Forums"),
("topic_forum_conversation", "Topics"),
("readerstudy_reader_studies", "Reader studies"),
("archive_archives", "Archives"),
("algorithm_algorithms", "Algorithms"),
("challenge_challenges", "Challenges"),
("externalchallenge_challenges", "External Challenges"),
("phase_evaluation", "Challenge Phase"),
)
class FollowFilter(FilterSet):
forum = CharFilter(method="search_filter", label="Search for a forum")
topic = CharFilter(
method="search_filter", label="Search for a forum topic"
)
forums_for_user = CharFilter(
method="search_forum_topics",
label="Show all topic subscriptions for a specific forum",
)
content_type = ChoiceFilter(
choices=FOLLOW_CHOICES,
method="get_content_type",
label="Filter by subscription type",
)
class Meta:
model = Follow
form = FilterForm
fields = ("forum", "topic", "forums_for_user", "content_type")
def search_filter(self, queryset, name, value):
model_name = name
if model_name == "forum":
app_label = "forum"
model = Forum
kwargs = {"name__icontains": value}
elif model_name == "topic":
app_label = "forum_conversation"
model = Topic
kwargs = {"subject__icontains": value}
name_qs = [x.id for x in model.objects.filter(**kwargs).all()]
return queryset.filter(
**{"object_id__in": name_qs},
**{
"content_type__exact": ContentType.objects.filter(
model=model_name, app_label=app_label
).get()
},
)
def search_forum_topics(self, queryset, name, value):
forums = [
x.id for x in Forum.objects.filter(name__icontains=value).all()
]
name_qs = [
x.id for x in Topic.objects.filter(forum__id__in=forums).all()
]
return queryset.filter(
**{"object_id__in": name_qs},
**{
"content_type__exact": ContentType.objects.filter(
model="topic", app_label="forum_conversation"
).get()
},
)
def get_content_type(self, queryset, name, value):
ct = ContentType.objects.filter(
model=re.split(r"_", value, 1)[0],
app_label=re.split(r"_", value, 1)[1],
).get()
return queryset.filter(content_type__exact=ct)
|
92281
|
from django.shortcuts import render, get_object_or_404
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.auth.views import redirect_to_login
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from django.core.exceptions import ImproperlyConfigured
from django.utils.functional import cached_property
from django.utils.timezone import now
from datetime import timedelta
from invitations.models import Invitation
import csv
from django.http import HttpResponse
class GetUserAdminMixin:
@cached_property
def user_admin(self):
if self.user_was_invited:
invitation = Invitation.objects.filter(
email__iexact=self.request.user.email
).first()
User = get_user_model()
if User.objects.filter(email__iexact=invitation.inviter.email).exists():
return invitation.inviter
return {
"email": "<EMAIL>",
"name": "Portal Super Admin",
}
@cached_property
def user_was_invited(self):
return Invitation.objects.filter(email__iexact=self.request.user.email).exists()
class ThrottledMixin:
throttled_lookup_user_field = "created_by"
throttled_lookup_date_field = "created_at"
throttled_model = None
throttled_limit = None
throttled_time_range = None
def dispatch(self, *args, **kwargs):
self._check()
filter_kwargs = {
f"{self.throttled_lookup_user_field}": self.request.user.id,
f"{self.throttled_lookup_date_field}__gte": now()
- timedelta(seconds=self.throttled_time_range),
}
count = self.throttled_model.objects.filter(**filter_kwargs).count()
if count >= self.throttled_limit:
return self.limit_reached()
return super().dispatch(*args, **kwargs)
def limit_reached(self):
return render(self.request, "throttled/locked.html", status=403)
def _check(self):
if self.throttled_model is None:
raise ImproperlyConfigured(
"Class using ThrottledMixin should always provide a throttled_model"
)
if self.throttled_limit is None:
raise ImproperlyConfigured(
"Class using ThrottledMixin should always provide a throttled_limit"
)
if self.throttled_time_range is None:
raise ImproperlyConfigured(
"Class using ThrottledMixin should always provide a throttled_time_range in seconds"
)
if hasattr(self.throttled_model, self.throttled_lookup_user_field) is False:
raise ImproperlyConfigured(
f"The model passed to ThrottledMixin ({self.throttled_model}) has no field {self.throttled_lookup_user_field} for the look up."
)
if hasattr(self.throttled_model, self.throttled_lookup_date_field) is False:
raise ImproperlyConfigured(
f"The model passed to ThrottledMixin ({self.throttled_model}) has no field {self.throttled_lookup_date_field} for the look up."
)
class Is2FAMixin(LoginRequiredMixin):
"""Verify that the current user is authenticated and using 2FA."""
def dispatch(self, request, *args, **kwargs):
if not request.user.is_authenticated:
return super().dispatch(request, *args, **kwargs)
if not request.user.is_verified():
return redirect_to_login(
request.get_full_path(),
settings.OTP_LOGIN_URL,
self.get_redirect_field_name(),
)
return super().dispatch(request, *args, **kwargs)
class IsAdminMixin(UserPassesTestMixin):
def test_func(self):
# allow if superuser or admin
if self.request.user.is_superuser or self.request.user.is_admin:
return True
return False
class ProvinceAdminMixin(UserPassesTestMixin):
def test_func(self):
# if logged in user is superuser, allow operation
if self.request.user.is_superuser:
return True
# 404 if bad user ID
profile_user = get_object_or_404(get_user_model(), pk=self.kwargs["pk"])
# if same user, allow operation
if self.request.user.id == profile_user.id:
return True
# Don't return superuser profile pages
if profile_user.is_superuser:
return False
# if admin user, return users from the same province
if (
self.request.user.is_admin
and self.request.user.province.id == profile_user.province.id
):
return True
return False
class ExportCsvMixin:
def export_as_csv(self, request, queryset):
meta = self.model._meta
# Will only work if list_display is made up of only model properties
field_names = self.list_display
response = HttpResponse(content_type="text/csv")
response["Content-Disposition"] = "attachment; filename={}.csv".format(meta)
writer = csv.writer(response)
writer.writerow(field_names)
for obj in queryset:
writer.writerow([getattr(obj, field) for field in field_names])
return response
export_as_csv.short_description = "Export Selected"
|
92290
|
import json
import os
import shutil
import sys
from subprocess import CalledProcessError, check_output
import pytest
from click.testing import CliRunner
from deepcov.cli import File
from snapshottest.pytest import PyTestSnapshotTest
from tests.util import RESOURCES
from deepcov import cli # isort:skip
pytest_plugins = "pytester"
@pytest.fixture
def tested_dir():
try:
check_output(f"{sys.executable} -m pytest", cwd="tests/resources", shell=True)
except CalledProcessError as err:
assert err.returncode == 1
os.chdir(RESOURCES / ".deepcov")
class TestCli:
def test_when_test_file_then_success(
self, tested_dir: object, snapshot: PyTestSnapshotTest
):
runner = CliRunner()
source = RESOURCES / "src" / "test_lib.py"
assert source.exists()
print(f"Running {source}")
result = runner.invoke(cli.run, source.as_posix(), catch_exceptions=False)
print(result.stdout)
f = File(**json.loads(result.stdout))
[snapshot.assert_match({line: f.lines[line]}) for line in sorted(f.lines)]
def test_when_src_file_then_success(
self, tested_dir: object, snapshot: PyTestSnapshotTest
):
runner = CliRunner()
source = RESOURCES / "src" / "lib.py"
assert source.exists()
print(f"Running {source}")
result = runner.invoke(cli.run, source.as_posix(), catch_exceptions=False)
print(result.stdout)
f = File(**json.loads(result.stdout))
[snapshot.assert_match({line: f.lines[line]}) for line in sorted(f.lines)]
def test_when_no_junit_then_error(
self,
tested_dir: object,
testdir: pytest.Testdir,
):
shutil.copyfile(RESOURCES / ".deepcov" / ".coverage", ".coverage")
runner = CliRunner()
source = RESOURCES / "src" / "lib.py"
assert source.exists()
print(f"Running {source}")
result = runner.invoke(cli.run, source.as_posix(), catch_exceptions=False)
assert "error" in json.loads(result.stdout)
def test_when_no_cov_then_error(self, tested_dir: object, testdir: pytest.Testdir):
shutil.copyfile(RESOURCES / ".deepcov" / "junit.xml", "junit.xml")
runner = CliRunner()
source = RESOURCES / "src" / "lib.py"
assert source.exists()
print(f"Running {source}")
result = runner.invoke(cli.run, source.as_posix(), catch_exceptions=False)
assert "error" in json.loads(result.stdout)
def test_when_unknown_file_then_error(self, tested_dir: object):
runner = CliRunner()
source = RESOURCES / "src" / "asdf.py"
assert not source.exists()
print(f"Running {source}")
result = runner.invoke(cli.run, source.as_posix(), catch_exceptions=False)
assert json.loads(result.stdout)["error"].startswith("No cov")
def test_when_out_of_cov_scope_then_error(self, tested_dir: object):
runner = CliRunner()
source = RESOURCES / "out_of_cov_scope.py"
assert source.exists()
print(f"Running {source}")
result = runner.invoke(cli.run, source.as_posix(), catch_exceptions=False)
assert json.loads(result.stdout)["error"].startswith("No cov")
def test_when_status_then_time_given(self, tested_dir: object):
runner = CliRunner()
result = runner.invoke(cli.run, catch_exceptions=False)
assert json.loads(result.stdout)["time_since_run"] == "just now"
def test_when_status_no_data_then_null(self, testdir: pytest.Testdir):
runner = CliRunner()
result = runner.invoke(cli.run, catch_exceptions=False)
assert json.loads(result.stdout)["time_since_run"] == None
|
92296
|
from __future__ import annotations
import sys
from argparse import ArgumentParser, Namespace
from pathlib import Path
import pytermgui as ptg
def _process_arguments(argv: list[str] | None = None) -> Namespace:
"""Processes command line arguments.
Note that you don't _have to_ use the bultin argparse module for this; it
is just what the module uses.
Args:
argv: A list of command line arguments, not including the binary path
(sys.argv[0]).
"""
parser = ArgumentParser(description="A simple text editor.")
parser.add_argument("file", help="The file to read.", type=Path, nargs="?")
parser.add_argument(
"-s",
"--string",
help="String to edit, when file is not given.",
metavar="STR",
)
parser.add_argument(
"--highlight", action="store_true", help="Enable Python syntax highlighting."
)
return parser.parse_args(argv)
def _create_aliases() -> None:
"""Creates all the TIM aliases used by the application."""
ptg.tim.alias("editor.header", "@157 240")
ptg.tim.alias("editor.footer", "inverse editor.header")
def _configure_widgets() -> None:
"""Defines all the global widget configurations."""
ptg.boxes.EMPTY.set_chars_of(ptg.Window)
ptg.Splitter.set_char("separator", "")
def _define_layout() -> ptg.Layout:
"""Defines the application layout."""
layout = ptg.Layout()
layout.add_slot("Header", height=1)
layout.add_break()
layout.add_slot("Body")
layout.add_break()
layout.add_slot("Footer", height=1)
return layout
def get_watcher(obj: object, *attrs: str | tuple[str, str]) -> Callable[[str], str]:
"""Creates a macro callable for retrieving attributes of an object.
Args:
obj: The object to get attributes of.
*attrs: The name of the attributes to look at. Each attribute is looked
up with using getattr, and the given name will be formatted into the
template when the macro is called.
Example of usage:
field = InputField()
tim.define("!cursor", get_watcher(field.cursor, "row", "col"))
tim.print("[!cursor]Cursor: {row}:{col}")
"""
valid_attrs = []
for attr in attrs:
if isinstance(attr, str):
valid_attrs.append((attr, attr))
continue
alias, real = attr
valid_attrs.append((alias, real))
def _macro(fmt: str) -> None:
try:
return fmt.format(
**{alias: getattr(obj, real) for alias, real in valid_attrs}
)
except Exception as err:
return str(err)
return _macro
def _read_content(path: Path) -> str:
with open(path, "r") as file:
return file.read()
def main(argv: list[str] | None = None, tim: ptg.MarkupLanguage = ptg.tim) -> None:
"""Runs the application."""
_create_aliases()
_configure_widgets()
args = _process_arguments(argv)
if args.file is not None:
content = _read_content(args.file).replace("[", r"\[")
elif args.string is not None:
content = args.string.replace("[", r"\[")
else:
print("Please provide either a file or a string to edit.")
return
field = ptg.InputField(content, multiline=True)
if args.highlight:
field.styles.value = lambda _, text: ptg.tim.parse(ptg.highlight_python(text))
tim.define("!cursor", get_watcher(field.cursor, "row", "col"))
tim.define("!select_len", get_watcher(field, ("select_len", "_selection_length")))
tim.define("!select_text", get_watcher(field, ("select_text", "selection")))
with ptg.WindowManager() as manager:
manager.layout = _define_layout()
header = ptg.Window("[editor.header bold]Code Editor", box="EMPTY")
header.styles.fill = "editor.header"
footer = ptg.Window(
ptg.Splitter(
ptg.Label(
"[editor.footer !cursor] Cursor: {row}:{col}"
+ " // [/!cursor !select_len]{select_len}",
parent_align=0,
),
ptg.Label(
"[editor.footer !select_text]{select_text}",
parent_align=2,
),
).styles(fill="editor.footer"),
box="EMPTY",
)
footer.styles.fill = "editor.footer"
body = ptg.Window(field, overflow=ptg.Overflow.SCROLL)
manager.add(header, assign="header")
manager.add(body, assign="body")
manager.add(footer, assign="footer")
if __name__ == "__main__":
main(sys.argv[1:])
|
92305
|
r"""
Basic analysis of a MD simulation
=================================
In this example, we will analyze a trajectory of a *Gromacs* MD
simulation:
The trajectory contains simulation data of lysozyme over the course of
1 ns.
The data is the result of the famous *Gromacs*
'`Lysozyme in Water <http://www.mdtutorials.com/gmx/lysozyme/index.html>`_'
tutorial.
The trajectory file can be downloaded
:download:`here </examples/download/lysozyme_md.xtc>`
and the template PDB can be downloaded
:download:`here </examples/download/lysozyme_md.pdb>`.
We begin by loading the template PDB file as :class:`AtomArray`, sanitizing it
and using it to load the trajectory as :class:`AtomArrayStack`.
"""
# Code source: <NAME>
# License: BSD 3 clause
import biotite
import biotite.structure as struc
import biotite.structure.io as strucio
import biotite.structure.io.xtc as xtc
import numpy as np
import matplotlib.pyplot as plt
# Put here the path of the downloaded files
templ_file_path = "../../download/lysozyme_md.pdb"
traj_file_path = "../../download/lysozyme_md.xtc"
# Gromacs does not set the element symbol in its PDB files,
# but Biotite guesses the element names from the atom names,
# emitting a warning
template = strucio.load_structure(templ_file_path)
# The structure still has water and ions, that are not needed for our
# calculations, we are only interested in the protein itself
# These are removed for the sake of computational speed using a boolean
# mask
protein_mask = struc.filter_amino_acids(template)
template = template[protein_mask]
# We could have loaded the trajectory also with
# 'strucio.load_structure()', but in this case we only want to load
# those coordinates that belong to the already selected atoms of the
# template structure.
# Hence, we use the 'XTCFile' class directly to load the trajectory
# This gives us the additional option that allows us to select the
# coordinates belonging to the amino acids.
xtc_file = xtc.XTCFile.read(traj_file_path, atom_i=np.where(protein_mask)[0])
trajectory = xtc_file.get_structure(template)
# Get simulation time for plotting purposes
time = xtc_file.get_time()
########################################################################
# Since the MD simulation used periodic boundaries, the protein might be
# segmented over the box boundary.
# For further analysis we need to reassemble the protein chain into a
# whole molecule, without periodic boundaries.
# in *Gromacs* we could have used ``gmx trjconv`` for this, but this
# problem can be handled in *Biotite*, too.
trajectory = struc.remove_pbc(trajectory)
########################################################################
# Now our trajectory is ready for some analysis!
# At first we want to see if the simulation converged.
# For this purpose we take the RMSD of a frame compared to the initial
# model as measure. In order to calculate the RMSD we must
# superimpose all models onto a reference, in this case we also choose
# the initial structure.
trajectory, transform = struc.superimpose(trajectory[0], trajectory)
rmsd = struc.rmsd(trajectory[0], trajectory)
figure = plt.figure(figsize=(6,3))
ax = figure.add_subplot(111)
ax.plot(time, rmsd, color=biotite.colors["dimorange"])
ax.set_xlim(time[0], time[-1])
ax.set_ylim(0, 2)
ax.set_xlabel("Time (ps)")
ax.set_ylabel("RMSD (Å)")
figure.tight_layout()
########################################################################
# As we can see the simulation seems to converge already early in the
# simulation.
# After a about 200 ps the RMSD stays in a range of approx. 1 - 2 Å.
#
# In order to futher evaluate the unfolding of our enzyme in the
# course of simulation, we calculate and plot the radius of gyration
# (a measure for the protein radius).
radius = struc.gyration_radius(trajectory)
figure = plt.figure(figsize=(6,3))
ax = figure.add_subplot(111)
ax.plot(time, radius, color=biotite.colors["dimorange"])
ax.set_xlim(time[0], time[-1])
ax.set_ylim(14.0, 14.5)
ax.set_xlabel("Time (ps)")
ax.set_ylabel("Radius of gyration (Å)")
figure.tight_layout()
########################################################################
# From this perspective, the protein seems really stable.
# The radius does merely fluctuate in a range of approximately 0.3 Å
# during the entire simulation.
#
# Let's have a look at single amino acids:
# Which residues fluctuate most?
# For answering this question we calculate the RMSF
# (Root mean square fluctuation).
# It is similar to the RMSD, but instead of averaging over the atoms
# and looking at each time step, we average over the time and look at
# each residue.
# Usually the average model is taken as reference
# (compared to the starting model for RMSD).
#
# Since side chain atoms fluctuate quite a lot, they are not suitable
# for evaluation of the residue flexibility. Therefore, we consider only
# CA atoms.
# In all models, mask the CA atoms
ca_trajectory = trajectory[:, trajectory.atom_name == "CA"]
rmsf = struc.rmsf(struc.average(ca_trajectory), ca_trajectory)
figure = plt.figure(figsize=(6,3))
ax = figure.add_subplot(111)
res_count = struc.get_residue_count(trajectory)
ax.plot(np.arange(1, res_count+1), rmsf, color=biotite.colors["dimorange"])
ax.set_xlim(1, res_count)
ax.set_ylim(0, 1.5)
ax.set_xlabel("Residue")
ax.set_ylabel("RMSF (Å)")
figure.tight_layout()
plt.show()
|
92313
|
from itertools import cycle
from unittest.mock import patch
from django.utils import timezone as djangotime
from model_bakery import baker, seq
from tacticalrmm.test import TacticalTestCase
from logs.models import PendingAction
class TestAuditViews(TacticalTestCase):
def setUp(self):
self.authenticate()
self.setup_coresettings()
def create_audit_records(self):
# create clients for client filter
site = baker.make("clients.Site")
agent1 = baker.make_recipe("agents.agent", site=site, hostname="AgentHostname1")
agent2 = baker.make_recipe("agents.agent", hostname="AgentHostname2")
agent0 = baker.make_recipe("agents.agent", hostname="AgentHostname")
# user jim agent logs
baker.make_recipe(
"logs.agent_logs",
username="jim",
agent="AgentHostname1",
agent_id=agent1.id,
_quantity=15,
)
baker.make_recipe(
"logs.agent_logs",
username="jim",
agent="AgentHostname2",
agent_id=agent2.id,
_quantity=8,
)
# user james agent logs
baker.make_recipe(
"logs.agent_logs",
username="james",
agent="AgentHostname1",
agent_id=agent1.id,
_quantity=7,
)
baker.make_recipe(
"logs.agent_logs",
username="james",
agent="AgentHostname2",
agent_id=agent2.id,
_quantity=10,
)
# generate agent logs with random usernames
baker.make_recipe(
"logs.agent_logs",
agent=seq("AgentHostname"),
agent_id=seq(agent1.id),
_quantity=5,
)
# generate random object data
baker.make_recipe(
"logs.object_logs",
username="james",
_quantity=17,
)
# generate login data for james
baker.make_recipe(
"logs.login_logs",
username="james",
_quantity=11,
)
# generate login data for jim
baker.make_recipe(
"logs.login_logs",
username="jim",
_quantity=13,
)
return {"site": site, "agents": [agent0, agent1, agent2]}
def test_get_audit_logs(self):
url = "/logs/auditlogs/"
# create data
data = self.create_audit_records()
# test data and result counts
data = [
{"filter": {"timeFilter": 30}, "count": 86},
{
"filter": {
"timeFilter": 45,
"agentFilter": [data["agents"][2].id],
},
"count": 19,
},
{
"filter": {
"userFilter": ["jim"],
"agentFilter": [data["agents"][1].id],
},
"count": 15,
},
{
"filter": {
"timeFilter": 180,
"userFilter": ["james"],
"agentFilter": [data["agents"][1].id],
},
"count": 7,
},
{"filter": {}, "count": 86},
{"filter": {"agentFilter": [500]}, "count": 0},
{
"filter": {
"timeFilter": 35,
"userFilter": ["james", "jim"],
"agentFilter": [
data["agents"][1].id,
data["agents"][2].id,
],
},
"count": 40,
},
{"filter": {"timeFilter": 35, "userFilter": ["james", "jim"]}, "count": 81},
{"filter": {"objectFilter": ["user"]}, "count": 26},
{"filter": {"actionFilter": ["login"]}, "count": 12},
{
"filter": {"clientFilter": [data["site"].client.id]},
"count": 23,
},
]
pagination = {
"rowsPerPage": 25,
"page": 1,
"sortBy": "entry_time",
"descending": True,
}
for req in data:
resp = self.client.patch(
url, {**req["filter"], "pagination": pagination}, format="json"
)
self.assertEqual(resp.status_code, 200)
self.assertEqual(
len(resp.data["audit_logs"]), # type:ignore
pagination["rowsPerPage"]
if req["count"] > pagination["rowsPerPage"]
else req["count"],
)
self.assertEqual(resp.data["total"], req["count"]) # type:ignore
self.check_not_authenticated("patch", url)
def test_get_pending_actions(self):
url = "/logs/pendingactions/"
agent1 = baker.make_recipe("agents.online_agent")
agent2 = baker.make_recipe("agents.online_agent")
baker.make(
"logs.PendingAction",
agent=agent1,
action_type="chocoinstall",
details={"name": "googlechrome", "output": None, "installed": False},
_quantity=12,
)
baker.make(
"logs.PendingAction",
agent=agent2,
action_type="chocoinstall",
status="completed",
details={"name": "adobereader", "output": None, "installed": False},
_quantity=14,
)
data = {"showCompleted": False}
r = self.client.patch(url, data, format="json")
self.assertEqual(r.status_code, 200)
self.assertEqual(len(r.data["actions"]), 12) # type: ignore
self.assertEqual(r.data["completed_count"], 14) # type: ignore
self.assertEqual(r.data["total"], 26) # type: ignore
PendingAction.objects.filter(action_type="chocoinstall").update(
status="completed"
)
data = {"showCompleted": True}
r = self.client.patch(url, data, format="json")
self.assertEqual(r.status_code, 200)
self.assertEqual(len(r.data["actions"]), 26) # type: ignore
self.assertEqual(r.data["completed_count"], 26) # type: ignore
self.assertEqual(r.data["total"], 26) # type: ignore
data = {"showCompleted": True, "agentPK": agent1.pk}
r = self.client.patch(url, data, format="json")
self.assertEqual(r.status_code, 200)
self.assertEqual(len(r.data["actions"]), 12) # type: ignore
self.assertEqual(r.data["completed_count"], 12) # type: ignore
self.assertEqual(r.data["total"], 12) # type: ignore
self.check_not_authenticated("patch", url)
@patch("agents.models.Agent.nats_cmd")
def test_cancel_pending_action(self, nats_cmd):
nats_cmd.return_value = "ok"
url = "/logs/pendingactions/"
agent = baker.make_recipe("agents.online_agent")
action = baker.make(
"logs.PendingAction",
agent=agent,
action_type="schedreboot",
details={
"time": "2021-01-13 18:20:00",
"taskname": "TacticalRMM_SchedReboot_wYzCCDVXlc",
},
)
data = {"pk": action.pk} # type: ignore
r = self.client.delete(url, data, format="json")
self.assertEqual(r.status_code, 200)
nats_data = {
"func": "delschedtask",
"schedtaskpayload": {"name": "TacticalRMM_SchedReboot_wYzCCDVXlc"},
}
nats_cmd.assert_called_with(nats_data, timeout=10)
# try request again and it should 404 since pending action doesn't exist
r = self.client.delete(url, data, format="json")
self.assertEqual(r.status_code, 404)
nats_cmd.reset_mock()
action2 = baker.make(
"logs.PendingAction",
agent=agent,
action_type="schedreboot",
details={
"time": "2021-01-13 18:20:00",
"taskname": "TacticalRMM_SchedReboot_wYzCCDVXlc",
},
)
data = {"pk": action2.pk} # type: ignore
nats_cmd.return_value = "error deleting sched task"
r = self.client.delete(url, data, format="json")
self.assertEqual(r.status_code, 400)
self.assertEqual(r.data, "error deleting sched task") # type: ignore
self.check_not_authenticated("delete", url)
def test_get_debug_log(self):
url = "/logs/debuglog/"
# create data
agent = baker.make_recipe("agents.agent")
baker.make(
"logs.DebugLog",
log_level=cycle(["error", "info", "warning", "critical"]),
log_type="agent_issues",
agent=agent,
_quantity=4,
)
logs = baker.make(
"logs.DebugLog",
log_type="system_issues",
log_level=cycle(["error", "info", "warning", "critical"]),
_quantity=15,
)
# test agent filter
data = {"agentFilter": agent.id}
resp = self.client.patch(url, data, format="json")
self.assertEqual(resp.status_code, 200)
self.assertEqual(len(resp.data), 4) # type: ignore
# test log type filter and agent
data = {"agentFilter": agent.id, "logLevelFilter": "warning"}
resp = self.client.patch(url, data, format="json")
self.assertEqual(resp.status_code, 200)
self.assertEqual(len(resp.data), 1) # type: ignore
# test time filter with other
data = {"logTypeFilter": "system_issues", "logLevelFilter": "error"}
resp = self.client.patch(url, data, format="json")
self.assertEqual(resp.status_code, 200)
self.assertEqual(len(resp.data), 4) # type: ignore
self.check_not_authenticated("patch", url)
class TestLogTasks(TacticalTestCase):
def test_prune_debug_log(self):
from .models import DebugLog
from .tasks import prune_debug_log
# setup data
debug_log = baker.make(
"logs.DebugLog",
_quantity=50,
)
days = 0
for item in debug_log: # type:ignore
item.entry_time = djangotime.now() - djangotime.timedelta(days=days)
item.save()
days = days + 5
# delete AgentHistory older than 30 days
prune_debug_log(30)
self.assertEqual(DebugLog.objects.count(), 6)
def test_prune_audit_log(self):
from .models import AuditLog
from .tasks import prune_audit_log
# setup data
audit_log = baker.make(
"logs.AuditLog",
_quantity=50,
)
days = 0
for item in audit_log: # type:ignore
item.entry_time = djangotime.now() - djangotime.timedelta(days=days)
item.save()
days = days + 5
# delete AgentHistory older than 30 days
prune_audit_log(30)
self.assertEqual(AuditLog.objects.count(), 6)
|
92318
|
from main import db
from sqlalchemy import Column, Integer, DateTime, Text, ForeignKey
class Mail(db.Model):
__tablename__ = "mail"
id = Column(Integer, primary_key=True)
# 发送者用户ID
from_id = Column(Integer, ForeignKey(
"user.id", ondelete="CASCADE"), index=True, nullable=False)
# 接收者用户ID
to_id = Column(Integer, ForeignKey(
"user.id", ondelete="CASCADE"), index=True, nullable=False)
# 发送时间
time = Column(DateTime, nullable=False, index=True)
# 内容
text = Column(Text)
|
92330
|
import os
import warnings
from itertools import tee
from copy import copy
from collections import namedtuple
import numpy as np
import pybullet as p
from pybullet_planning.interfaces.env_manager.pose_transformation import get_distance
from pybullet_planning.utils import MAX_DISTANCE, EPS, INF
from pybullet_planning.interfaces.robots.joint import get_joint_positions, get_custom_limits, get_movable_joints, set_joint_positions, \
get_configuration, get_custom_max_velocity
from pybullet_planning.interfaces.robots.collision import get_collision_fn
from pybullet_planning.interfaces.robots.body import clone_body, remove_body, get_link_pose
from .ladder_graph import LadderGraph, EdgeBuilder, append_ladder_graph
from .dag_search import DAGSearch
#####################################
NullSpace = namedtuple('Nullspace', ['lower', 'upper', 'range', 'rest'])
def get_null_space(robot, joints, custom_limits={}):
rest_positions = get_joint_positions(robot, joints)
lower, upper = get_custom_limits(robot, joints, custom_limits)
lower = np.maximum(lower, -10*np.ones(len(joints)))
upper = np.minimum(upper, +10*np.ones(len(joints)))
joint_ranges = 10*np.ones(len(joints))
return NullSpace(list(lower), list(upper), list(joint_ranges), list(rest_positions))
def plan_cartesian_motion(robot, first_joint, target_link, waypoint_poses,
max_iterations=200, custom_limits={}, get_sub_conf=False, **kwargs):
"""Compute a joint trajectory for a given sequence of workspace poses. Only joint limit is considered.
Collision checking using `get_collision_fn` is often performed on the path computed by this function.
Parameters
----------
robot : int
robot body index
first_joint : int
the first joint index in the kinematics chain.
target_link : int
end effector link index.
waypoint_poses : a list of Pose
a list of end effector workspace poses in the world coord.
max_iterations : int, optional
[description], by default 200
custom_limits : dict, optional
[description], by default {}
get_sub_conf : bool, optional
return sub-kinematics chain configuration if set to True, by default False
Returns
-------
[type]
[description]
Example
-------
```
ik_joints = joints_from_names(robot_uid, ik_joint_names)
ik_tool_link = link_from_name(robot_uid, tool_link_name)
cart_conf_vals = plan_cartesian_motion(robot_uid, ik_joints[0], ik_tool_link, world_from_ee_poses)
```
"""
from pybullet_planning.interfaces.env_manager.pose_transformation import all_between
from pybullet_planning.interfaces.robots.link import get_link_subtree, prune_fixed_joints
from pybullet_planning.interfaces.kinematics import inverse_kinematics_helper, is_pose_close
# TODO: fix stationary joints
# TODO: pass in set of movable joints and take least common ancestor
# TODO: update with most recent bullet updates
# https://github.com/bulletphysics/bullet3/blob/master/examples/pybullet/examples/inverse_kinematics.py
# https://github.com/bulletphysics/bullet3/blob/master/examples/pybullet/examples/inverse_kinematics_husky_kuka.py
# TODO: plan a path without needing to following intermediate waypoints
lower_limits, upper_limits = get_custom_limits(robot, get_movable_joints(robot), custom_limits)
selected_links = get_link_subtree(robot, first_joint) # TODO: child_link_from_joint?
selected_movable_joints = prune_fixed_joints(robot, selected_links)
assert(target_link in selected_links)
selected_target_link = selected_links.index(target_link)
sub_robot = clone_body(robot, links=selected_links, visual=False, collision=False) # TODO: joint limits
sub_movable_joints = get_movable_joints(sub_robot)
#null_space = get_null_space(robot, selected_movable_joints, custom_limits=custom_limits)
null_space = None
solutions = []
for target_pose in waypoint_poses:
for iteration in range(max_iterations):
sub_kinematic_conf = inverse_kinematics_helper(sub_robot, selected_target_link, target_pose, null_space=null_space)
if sub_kinematic_conf is None:
remove_body(sub_robot)
return None
set_joint_positions(sub_robot, sub_movable_joints, sub_kinematic_conf)
if is_pose_close(get_link_pose(sub_robot, selected_target_link), target_pose, **kwargs):
set_joint_positions(robot, selected_movable_joints, sub_kinematic_conf)
kinematic_conf = get_configuration(robot)
if not all_between(lower_limits, kinematic_conf, upper_limits):
#movable_joints = get_movable_joints(robot)
#print([(get_joint_name(robot, j), l, v, u) for j, l, v, u in
# zip(movable_joints, lower_limits, kinematic_conf, upper_limits) if not (l <= v <= u)])
#print("Limits violated")
#wait_for_user()
remove_body(sub_robot)
return None
#print("IK iterations:", iteration)
if not get_sub_conf:
solutions.append(kinematic_conf)
else:
solutions.append(sub_kinematic_conf)
break
else:
remove_body(sub_robot)
return None
remove_body(sub_robot)
return solutions
def sub_inverse_kinematics(robot, first_joint, target_link, target_pose, **kwargs):
solutions = plan_cartesian_motion(robot, first_joint, target_link, [target_pose], **kwargs)
if solutions:
return solutions[0]
return None
#####################################
MAX_SAMPLE_ITER = int(1e4)
def plan_cartesian_motion_lg(robot, joints, waypoint_poses, sample_ik_fn=None, collision_fn=None, sample_ee_fn=None,
max_sample_ee_iter=MAX_SAMPLE_ITER, custom_vel_limits={}, ee_vel=None, jump_threshold={}, **kwargs):
"""ladder graph cartesian planning, better leveraging ikfast for sample_ik_fn
Parameters
----------
robot : [type]
[description]
joints : [type]
[description]
waypoint_poses : [type]
[description]
sample_ik_fn : [type], optional
[description], by default None
collision_fn : [type], optional
[description], by default None
ee_sample_fn : [type], optional
please please please remember to put an end to the sampling loop!
jump_threshold : [type], optional
Returns
-------
[type]
[description]
"""
assert sample_ik_fn is not None, 'Sample fn must be specified!'
# TODO sanity check samplers
ik_sols = [[] for _ in range(len(waypoint_poses))]
for i, task_pose in enumerate(waypoint_poses):
candidate_poses = [task_pose]
if sample_ee_fn is not None:
# extra dof release, copy to reuse generator
current_ee_fn = copy(sample_ee_fn)
cnt = 0
for p in current_ee_fn(task_pose):
if cnt > max_sample_ee_iter:
warnings.warn('EE dof release generator is called over {} times, likely that you forget to put an exit in the generator. ' + \
'We stop generating here for you.')
break
candidate_poses.append(p)
cnt += 1
for ee_pose in candidate_poses:
conf_list = sample_ik_fn(ee_pose)
if collision_fn is not None:
conf_list = [conf for conf in conf_list if conf and not collision_fn(conf, **kwargs)]
ik_sols[i].extend(conf_list)
# assemble the ladder graph
dof = len(joints)
graph = LadderGraph(dof)
graph.resize(len(ik_sols))
# assign rung data
for pt_id, ik_confs_pt in enumerate(ik_sols):
graph.assign_rung(pt_id, ik_confs_pt)
joint_jump_threshold = []
for joint in joints:
if joint in custom_vel_limits:
joint_jump_threshold.append(custom_vel_limits[joint])
else:
joint_jump_threshold.append(INF)
# build edges within current pose family
for i in range(graph.get_rungs_size()-1):
st_rung_id = i
end_rung_id = i + 1
jt1_list = graph.get_data(st_rung_id)
jt2_list = graph.get_data(end_rung_id)
st_size = graph.get_rung_vert_size(st_rung_id)
end_size = graph.get_rung_vert_size(end_rung_id)
# if st_size == 0 or end_size == 0:
# print(ik_sols)
assert st_size > 0, 'Ladder graph not valid: rung {}/{} is a zero size rung'.format(st_rung_id, graph.get_rungs_size())
assert end_size > 0, 'Ladder graph not valid: rung {}/{} is a zero size rung'.format(end_rung_id, graph.get_rungs_size())
# TODO: preference_cost using pose deviation
# fully-connected ladder graph
edge_builder = EdgeBuilder(st_size, end_size, dof,
jump_threshold=joint_jump_threshold,
preference_cost=1.0)
for k in range(st_size):
st_jt_id = k * dof
for j in range(end_size):
end_jt_id = j * dof
edge_builder.consider(jt1_list[st_jt_id : st_jt_id+dof], jt2_list[end_jt_id : end_jt_id+dof], j)
edge_builder.next(k)
# print(edge_builder.max_dtheta_)
# print(edge_builder.edge_scratch_)
# print(edge_builder.result)
# TODO: more report information here
if not edge_builder.has_edges:
print('Ladder graph: no edge built between {}-{} | joint threshold: {}, max delta jt: {}'.format(
st_rung_id, end_rung_id, joint_jump_threshold, edge_builder.max_dtheta_))
return None, None
edges = edge_builder.result
graph.assign_edges(i, edges)
# * use current conf in the env as start_conf
start_conf = get_joint_positions(robot, joints)
st_graph = LadderGraph(graph.dof)
st_graph.resize(1)
st_graph.assign_rung(0, [start_conf])
# TODO: upper_tim here
unified_graph = append_ladder_graph(st_graph, graph, jump_threshold=joint_jump_threshold)
if unified_graph is None:
return None, None
# perform DAG search
dag_search = DAGSearch(unified_graph)
min_cost = dag_search.run()
# list of confs
path = dag_search.shortest_path()
# delete the start conf
del path[0]
assert len(path) == len(waypoint_poses)
if len(path) == 0:
return None, None
return path, min_cost
|
92333
|
import numpy as np
from torch.utils.data import DataLoader, SequentialSampler
HIDDEN_SIZE_BERT = 768
def flat_accuracy(preds, labels):
preds = preds.squeeze()
my_round = lambda x: 1 if x >= 0.5 else 0
pred_flat = np.fromiter(map(my_round, preds), dtype=np.int).flatten()
labels_flat = labels.flatten()
return np.sum(pred_flat == labels_flat) / len(labels_flat)
def get_max_len(sentences):
max_len = 0
# For every sentence...
for sent in sentences:
# Update the maximum sentence length.
max_len = max(max_len, len(sent))
#print('Max sentence length: ', max_len)
return max_len
def get_max_len_cap(sentences, cap: int = 128) -> (int, bool):
is_capped = False
max_len = 0
# For every sentence...
for sent in sentences:
# Update the maximum sentence length.
max_len = max(max_len, len(sent))
# check if the value is higher than the cap
if max_len >= cap:
is_capped = True
max_len = cap
break
#print('Max sentence length: ', max_len)
#print('Is capped: ', is_capped)
return max_len, is_capped
def create_data_loaders(train_dataset, val_dataset, batch_size=3):
# The DataLoader needs to know our batch size for training, so we specify it
# here. For fine-tuning BERT on a specific task, the authors recommend a batch
# size of 16 or 32.
# Create the DataLoaders for our training and validation sets.
# We'll take training samples in random order.
train_dataloader = DataLoader(
train_dataset, # The training samples.
sampler=SequentialSampler(train_dataset), # Select batches sequentially
batch_size=batch_size # Trains with this batch size.
)
# For validation the order doesn't matter, so we'll just read them sequentially.
validation_dataloader = DataLoader(
val_dataset, # The validation samples.
sampler=SequentialSampler(val_dataset), # Pull out batches sequentially.
batch_size=batch_size # Evaluate with this batch size.
)
return train_dataloader, validation_dataloader
def format_time(elapsed):
import datetime
'''
Takes a time in seconds and returns a string hh:mm:ss
'''
# Round to the nearest second.
elapsed_rounded = int(round((elapsed)))
# Format as hh:mm:ss
return str(datetime.timedelta(seconds=elapsed_rounded))
|
92350
|
import EasyGA
import random
# Create the Genetic algorithm
ga = EasyGA.GA()
ga.save_data = False
def is_it_5(chromosome):
"""A very simple case test function - If the chromosomes gene value is a 5 add one
to the chromosomes overall fitness value."""
# Overall fitness value
fitness = 0
# For each gene in the chromosome
for gene in chromosome.gene_list:
# Check if its value = 5
if(gene.value == 5):
# If its value is 5 then add one to
# the overal fitness of the chromosome.
fitness += 1
return fitness
ga.fitness_function_impl = is_it_5
# Create random genes from 0 to 10
ga.gene_impl = lambda: random.randint(0, 10)
ga.evolve()
# Print your default genetic algorithm
ga.print_generation()
ga.print_population()
|
92352
|
import tfnn
from tfnn.body.layer import Layer
class PoolingLayer(object):
def __init__(self, pooling='max', strides=(2, 2), ksize=(2, 2), padding='SAME', name=None):
self.pooling = pooling
self.strides = strides
self.ksize = ksize
self.padding = padding
self.name = name
def pool(self, image, layer_size, n_filters):
# stride [1, x_movement, y_movement, 1]
k_x, k_y = self.ksize[0], self.ksize[1]
stride_x, stride_y = self.strides[0], self.strides[1]
if self.pooling == 'max':
self.output = tfnn.nn.max_pool(
value=image, ksize=[1, k_x, k_y, 1],
strides=[1, stride_x, stride_y, 1], padding=self.padding)
elif self.pooling == 'average':
self.output = tfnn.nn.avg_pool(
value=image, ksize=[1, k_x, k_y, 1],
strides=[1, stride_x, stride_y, 1], padding=self.padding)
else:
raise ValueError('Not support %s pooling' % self.pooling)
length = layer_size[0] / stride_x
width = layer_size[1] / stride_y
features = n_filters
if not (type(length) == int) and (type(width) == int):
raise ValueError('pooling dimension error')
else:
self.out_size = [int(length), int(width), features]
return [self.output, self.out_size]
class ConvLayer(Layer):
def __init__(self,
patch_x, patch_y, n_filters, activator=None,
strides=(1, 1), padding='SAME',
pooling='max', pool_strides=(2, 2), pool_k=(2, 2),
pool_padding='SAME', image_shape=None,
dropout_layer=False, w_initial='xavier', name=None,):
super(ConvLayer, self).__init__(activator, dropout_layer, w_initial,
name, layer_type='conv')
self._check_activator(activator)
self.patch_x = patch_x
self.patch_y = patch_y
self.n_filters = n_filters
self.padding = padding
self.strides = strides
self.pooling = pooling
self.pool_strides = pool_strides
self.pool_k = pool_k
self.pool_padding = pool_padding
self.image_shape = image_shape
self.pooling_layer = PoolingLayer(
pooling=self.pooling,
strides=self.pool_strides,
ksize=self.pool_k,
padding=self.pool_padding,
)
self._params = {
'patch_x': self.patch_x, 'patch_y': self.patch_y, 'n_filters': self.n_filters,
'activator': self.activator_name, 'strides': self.strides,
'padding': self.padding, 'pooling': self.pooling,
'pool_strides': self.pool_strides, 'pool_k': self.pool_k,
'pool_padding': self.pool_padding, 'dropout_layer': self.dropout_layer,
'image_shape': self.image_shape, 'w_initial': self.w_initial, 'name': self.name,
}
def construct(self, layers_configs, layers_results):
self._check_image_shape(layers_configs, layers_results)
self.name = self._check_name(layers_configs)
# in conv, the _in_size should be the [length, width, channels]
_in_size = layers_configs['neural_structure'][-1]['output_size']
with tfnn.variable_scope(self.name):
with tfnn.variable_scope('weights') as weights_scope:
self.W = self._weight_variable([
self.patch_x, # patch length
self.patch_y, # patch width
_in_size[-1], # filter height / channels
self.n_filters
],
self.w_initial) # number of filters
tfnn.histogram_summary(self.name + '/weights', self.W)
# the image summary for visualizing filters
weights_scope.reuse_variables()
weights = tfnn.get_variable('weights', trainable=False)
# scale weights to [0 255] and convert to uint8 (maybe change scaling?)
x_min = tfnn.reduce_min(weights)
x_max = tfnn.reduce_max(weights)
weights_0_to_1 = (weights - x_min) / (x_max - x_min)
weights_0_to_255_uint8 = tfnn.image.convert_image_dtype(weights_0_to_1, dtype=tfnn.uint8)
# to tf.image_summary format [batch_size, height, width, channels]
W_transposed = tfnn.transpose(weights_0_to_255_uint8, [3, 0, 1, 2])
# image Tensor must be 4-D with last dim 1, 3, or 4,
# (n_filter, length, width, channel)
channels_to_look = 3
if W_transposed._shape[-1] > channels_to_look:
n_chunks = int(W_transposed._shape[-1] // channels_to_look)
W_transposed = tfnn.split(3, n_chunks,
W_transposed[:, :, :, :n_chunks * channels_to_look])[0]
# this will display random 5 filters from the n_filters in conv
tfnn.image_summary(self.name + '/filters',
W_transposed, max_images=10)
with tfnn.variable_scope('biases'):
self.b = self._bias_variable([self.n_filters, ])
tfnn.histogram_summary(self.name + '/biases', self.b)
with tfnn.name_scope('Wx_plus_b'):
product = tfnn.nn.conv2d(
input=layers_results['final'][-1],
filter=self.W,
strides=[1, self.strides[0], self.strides[1], 1],
padding=self.padding) \
+ self.b
if self.activator is None:
activated_product = product
else:
activated_product = self.activator(product)
tfnn.histogram_summary(self.name + '/activated_product', activated_product)
# pooling process
with tfnn.name_scope('pooling'):
pooled_product, _out_size = self.pooling_layer.pool(
image=activated_product, layer_size=_in_size, n_filters=self.n_filters)
tfnn.histogram_summary(self.name + '/pooled_product', pooled_product)
_do_dropout = layers_configs['params'][0]['do_dropout']
if _do_dropout and self.dropout_layer:
_keep_prob = layers_results['reg_value']
dropped_product = tfnn.nn.dropout(
pooled_product,
_keep_prob,
name='dropout')
final_product = dropped_product # don't have to rescale it back, tf dropout has done this
else:
dropped_product = None
final_product = pooled_product
self.configs_dict = {
'type': 'conv',
'name': self.name,
'neural_structure': {'input_size': _in_size, 'output_size': _out_size},
'params': self._params,
}
self.results_dict = {
'Layer': self,
'Wx_plus_b': product,
'activated': activated_product,
'dropped': dropped_product,
'final': final_product}
def _check_image_shape(self, layers_configs, layers_results):
"""
have effect only on the first conv layer
"""
if self.image_shape is not None:
if len(layers_configs['type']) == 1:
if isinstance(self.image_shape, tuple):
self.image_shape = list(self.image_shape)
elif not isinstance(self.image_shape, list):
raise ValueError('image_shape can only be a tuple or list')
# image shape have to be (x, y, channel)
layers_configs['neural_structure'][-1]['output_size'] = self.image_shape
_xs_placeholder = layers_results['final'][-1]
replaced_image_shape = self.image_shape.copy()
replaced_image_shape.insert(0, -1)
with tfnn.name_scope('reshape_inputs'):
_image_placeholder = tfnn.reshape(_xs_placeholder, replaced_image_shape)
layers_results['activated'][-1] = _image_placeholder
layers_results['final'][-1] = _image_placeholder
else:
raise IndexError('This is not the first conv layer, leave image_shape as default')
|
92364
|
import numpy as np
import sys
import tensorflow as tf
import cv2
import time
import sys
from .utils import cv2_letterbox_resize, download_from_url
import zipfile
import os
@tf.function
def transform_targets_for_output(y_true, grid_y, grid_x, anchor_idxs, classes):
# y_true: (N, boxes, (x1, y1, x2, y2, class, best_anchor))
N = tf.shape(y_true)[0]
# y_true_out: (N, grid, grid, anchors, [x, y, w, h, obj, class])
y_true_out = tf.zeros((N, grid_y, grid_x, tf.shape(anchor_idxs)[0], 6))
anchor_idxs = tf.cast(anchor_idxs, tf.int32)
indexes = tf.TensorArray(tf.int32, 1, dynamic_size=True)
updates = tf.TensorArray(tf.float32, 1, dynamic_size=True)
idx = 0
for i in tf.range(N):
for j in tf.range(tf.shape(y_true)[1]):
if tf.equal(y_true[i][j][2], 0):
continue
anchor_eq = tf.equal(anchor_idxs, tf.cast(y_true[i][j][5], tf.int32))
if tf.reduce_any(anchor_eq):
box = y_true[i][j][0:4]
box_xy = (y_true[i][j][0:2] + y_true[i][j][2:4]) / 2.
anchor_idx = tf.cast(tf.where(anchor_eq), tf.int32)
grid_size = tf.cast(tf.stack([grid_x, grid_y], axis=-1), tf.float32)
grid_xy = tf.cast(box_xy * grid_size, tf.int32)
# grid[y][x][anchor] = (tx, ty, bw, bh, obj, class)
indexes = indexes.write(idx, [i, grid_xy[1], grid_xy[0], anchor_idx[0][0]])
updates = updates.write(idx, [box[0], box[1], box[2], box[3], 1, y_true[i][j][4]])
idx += 1
y_ture_out = tf.tensor_scatter_nd_update(y_true_out, indexes.stack(), updates.stack())
return y_ture_out
def transform_targets(y_train, size, anchors, anchor_masks, classes, tiny=True):
y_outs = []
if tiny:
grid_y, grid_x = size[0] // 16, size[1] // 16
else:
grid_y, grid_x = size[0] // 32, size[1] // 32
# calculate anchor index for true boxes
anchors = tf.cast(anchors, tf.float32)
anchor_area = anchors[..., 0] * anchors[..., 1]
box_wh = y_train[..., 2:4] - y_train[..., 0:2]
box_wh = tf.tile(tf.expand_dims(box_wh, -2), (1, 1, tf.shape(anchors)[0], 1))
box_area = box_wh[..., 0] * box_wh[..., 1]
intersection = tf.minimum(box_wh[..., 0], anchors[..., 0]) * tf.minimum(box_wh[..., 1], anchors[..., 1])
iou = intersection / (box_area + anchor_area - intersection)
anchor_idx = tf.cast(tf.argmax(iou, axis=-1), tf.float32)
anchor_idx = tf.expand_dims(anchor_idx, axis=-1)
y_train = tf.concat([y_train, anchor_idx], axis=-1)
for anchor_idxs in anchor_masks:
y_out = transform_targets_for_output(y_train, grid_y, grid_x, anchor_idxs, classes)
y_outs.append(y_out)
grid_x *= 2
grid_y *= 2
return tuple(y_outs)
def decode_line(line, size):
# Decode the line to tensor
line = line.numpy().decode()
line_parts = line.strip().split()
imgname = line_parts[0]
x_train = cv2.imread(imgname)
#x_train = transform_images(x_train, size)
x_train, amat = cv2_letterbox_resize(x_train, (size, size))
x_train = x_train / 255.
xmins, ymins, xmaxs, ymaxs, labels = [], [], [], [], []
bbox_with_labels = line_parts[1:]
for bbox_with_label in bbox_with_labels:
bbox_with_label_parts = bbox_with_label.split(',')
xmin = float(bbox_with_label_parts[0])
ymin = float(bbox_with_label_parts[1])
xmax = float(bbox_with_label_parts[2])
ymax = float(bbox_with_label_parts[3])
tl = np.array([xmin, ymin, 1], np.float32)
br = np.array([xmax, ymax, 1], np.float32)
tl = np.dot(amat, tl)
br = np.dot(amat, br)
xmin, ymin = tl[0], tl[1]
xmax, ymax = br[0], br[1]
xmins.append(xmin / size)
ymins.append(ymin / size)
xmaxs.append(xmax / size)
ymaxs.append(ymax / size)
labels.append(float(bbox_with_label_parts[4]))
assert np.all(np.array(xmins) <= 1)
y_train = np.stack((xmins, ymins, xmaxs, ymaxs, labels), axis=1)
paddings = [[0, 100 - y_train.shape[0]], [0, 0]]
y_train = np.pad(y_train, paddings, mode='constant')
return x_train, y_train
def load_textline_dataset(file_pattern, size):
dataset = tf.data.TextLineDataset(file_pattern)
return dataset.map(lambda x: tf.py_function(func=decode_line, inp=[x, size], Tout=(tf.float32, tf.float32)))
def download_m2nist_if_not_exist():
data_rootdir = os.path.expanduser(os.path.join('~', '.m2nist'))
m2nist_zip_path = os.path.join(data_rootdir, 'm2nist.zip')
if os.path.exists(m2nist_zip_path):
return
os.makedirs(data_rootdir, exist_ok=True)
m2nist_zip_url = 'https://raw.githubusercontent.com/akkaze/datasets/master/m2nist.zip'
fail_counter = 0
while True:
try:
print('Trying to download m2nist...')
download_from_url(m2nist_zip_url, m2nist_zip_path)
break
except Exception as exc:
fail_counter += 1
print('Errors occured : {0}'.format(exc))
if fail_counter >= 6:
print(
'Please try to download dataset from {0} by yourself and put it under the directory {1}'.format(
m2nist_zip_path), data_rootdir)
time.sleep(5)
continue
zipf = zipfile.ZipFile(m2nist_zip_path)
zipf.extractall(data_rootdir)
zipf.close()
def load_m2nist_dataset(dst_size=(64, 64), val_ratio=0.2):
download_m2nist_if_not_exist()
data_rootdir = os.path.expanduser(os.path.join('~', '.m2nist'))
imgs = np.load(os.path.join(data_rootdir, 'combined.npy')).astype(np.uint8)
num_data = imgs.shape[0]
num_train = int(num_data * (1 - val_ratio))
def transform_target(img, line, expected_size):
img = img.numpy()
line = line.numpy().decode()
expected_size = tuple(expected_size.numpy())
img, amat = cv2_letterbox_resize(img, expected_size)
bbox_with_labels = line.strip().split()[1:]
xmins, xmaxs, ymins, ymaxs, labels = [], [], [], [], []
for bbox_with_label in bbox_with_labels:
bbox_with_label_parts = bbox_with_label.split(',')
xmin = float(bbox_with_label_parts[0])
ymin = float(bbox_with_label_parts[1])
xmax = float(bbox_with_label_parts[2])
ymax = float(bbox_with_label_parts[3])
label = float(bbox_with_label_parts[4])
tl = np.array([xmin, ymin, 1], np.float32)
br = np.array([xmax, ymax, 1], np.float32)
tl = np.dot(amat, tl)
br = np.dot(amat, br)
xmin, ymin = tl[0], tl[1]
xmax, ymax = br[0], br[1]
xmins.append(xmin / expected_size[0])
ymins.append(ymin / expected_size[1])
xmaxs.append(xmax / expected_size[0])
ymaxs.append(ymax / expected_size[1])
labels.append(label)
img = img.astype(np.float32) / 255.
bbox = np.stack((xmins, ymins, xmaxs, ymaxs, labels), axis=1)
paddings = [[0, 100 - bbox.shape[0]], [0, 0]]
bbox = np.pad(bbox, paddings, mode='constant')
return img, bbox
def tf_transform_target(img, line):
img, mask = tf.py_function(func=transform_target, inp=[img, line, dst_size], Tout=[tf.float32, tf.float32])
img.set_shape((*dst_size[::-1], 1))
mask.set_shape((100, 5))
return img, mask
img_dataset = tf.data.Dataset.from_tensor_slices(imgs)
bbox_dataset = tf.data.TextLineDataset(os.path.join(data_rootdir, 'bbox.txt'))
dataset = tf.data.Dataset.zip((img_dataset, bbox_dataset))
dataset = dataset.map(lambda x, y: tf_transform_target(x, y))
train_dataset = dataset.take(num_train)
val_dataset = dataset.skip(num_train)
return train_dataset, val_dataset
|
92406
|
import timeit
class TimingsEntry(object):
"""A log of the runtime for an operation.
"""
def __init__(self, op):
self.op = op
self.evals = 0
self.total_time = 0
self.lastticstamp = None
@property
def avg_time(self):
if self.evals == 0:
return 0
else:
return self.total_time / self.evals
def record_timing(self, elapsed):
"""Updates the log with the new time.
"""
self.evals += 1
self.total_time += elapsed
def tic(self):
""" Default timer
Example: t = tic()
... code
elapsed = toc(t)
print( '{0}: {1:.4f}ms'.format(message, elapsed) )
"""
t = timeit.default_timer()
self.lastticstamp = t
return t
def toc(self):
""" See tic f
"""
# Last tic
if self.lastticstamp is None:
raise Exception('Error: Call to toc did never call tic before.')
else:
t = self.lastticstamp
# Measure time in ms
elapsed = (timeit.default_timer() - t) * 1000.0 # in ms
# Update recrod.
self.record_timing(elapsed)
self.lastticstamp = None
return elapsed
def __str__(self):
return "op = %s, evals = %s, total_time (ms) = %s, avg_time (ms) = %s" % (
self.op, self.evals, self.total_time, self.avg_time)
class TimingsLog(object):
"""A log of the runtime for a set of operations.
"""
def __init__(self, ops):
self.ops = ops
self.data = {}
for op in self.ops:
self.data[op] = TimingsEntry(op)
def __getitem__(self, item):
return self.data[item]
def __str__(self):
logs = []
for op in self.ops:
if self[op].evals > 0:
logs += [str(self.data[op])]
return '\n'.join(logs)
|
92471
|
import os
import yaml
import time
import shutil
import torch
import random
import argparse
import numpy as np
import copy
import timeit
import statistics
import datetime
from torch.utils import data
from tqdm import tqdm
import cv2
from ptsemseg.process_img import generate_noise
from ptsemseg.models import get_model
from ptsemseg.loss import get_loss_function
from ptsemseg.loader import get_loader
from ptsemseg.utils import get_logger, init_weights
from ptsemseg.metrics import runningScore, averageMeter
from ptsemseg.augmentations import get_composed_augmentations
from ptsemseg.schedulers import get_scheduler
from ptsemseg.optimizers import get_optimizer
from ptsemseg.utils import convert_state_dict
from tensorboardX import SummaryWriter
class Trainer_LearnWhen2Com(object):
def __init__(self, cfg, writer, logger, model, loss_fn, trainloader, valloader, optimizer, scheduler, device):
self.cfg = cfg
self.writer = writer
self.logger = logger
self.model = model
self.trainloader = trainloader
self.valloader = valloader
self.optimizer = optimizer
self.scheduler = scheduler
self.loss_fn = loss_fn
self.n_classes = 11
self.MO_flag = self.cfg['model']['multiple_output']
self.running_metrics_val = runningScore(self.n_classes)
self.device = device
if 'commun_label' in self.cfg["data"]:
self.if_commun_label = cfg["data"]['commun_label']
else:
self.if_commun_label = 'None'
def train(self):
start_iter = 0
# resume the training
if self.cfg["training"]["resume"] is not None:
if os.path.isfile(self.cfg["training"]["resume"]):
self.logger.info(
"Loading model and optimizer from checkpoint '{}'".format(cfg["training"]["resume"])
)
checkpoint = torch.load(self.cfg["training"]["resume"])
self.model.load_state_dict(checkpoint["model_state"])
self.optimizer.load_state_dict(checkpoint["optimizer_state"])
self.scheduler.load_state_dict(checkpoint["scheduler_state"])
start_iter = checkpoint["epoch"]
self.logger.info(
"Loaded checkpoint '{}' (iter {})".format(
self.cfg["training"]["resume"], checkpoint["epoch"]
)
)
else:
self.logger.info("No checkpoint found at '{}'".format(self.cfg["training"]["resume"]))
val_loss_meter = averageMeter()
time_meter = averageMeter()
best_iou = -100.0
i = start_iter
flag = True
# Training
while i <= self.cfg["training"]["train_iters"] and flag:
for data_list in self.trainloader:
# iteration timer
i += 1
# load data from dataloader
if self.if_commun_label != 'None':
images_list, labels_list, commun_label = data_list
else:
images_list, labels_list = data_list
# image and labels list 2 tensor
labels = labels_list[0]
images = torch.cat(tuple(images_list), dim=1)
# timer started
start_ts = time.time()
self.scheduler.step()
self.model.train() # matters for batchnorm/dropout
# from cpu to gpu
images = images.to(self.device)
labels = labels.to(self.device)
if self.if_commun_label != 'None':
commun_label = commun_label.to(self.device)
# clean the optimizer
self.optimizer.zero_grad()
# model inference
outputs, log_action, action_argmax = self.model(images, training=True)
# compute loss
loss = self.loss_fn(input=outputs, target=labels)
# compute the gradient for each variable
loss.backward()
# update the weight
self.optimizer.step()
# compute the used time
time_meter.update(time.time() - start_ts)
# Process display on screen
if (i + 1) % self.cfg["training"]["print_interval"] == 0:
fmt_str = "Iter [{:d}/{:d}] Loss: {:.4f} Time/Image: {:.4f}"
print_str = fmt_str.format(
i + 1,
self.cfg["training"]["train_iters"],
loss.item(),
time_meter.avg / self.cfg["training"]["batch_size"],
)
print(print_str)
self.logger.info(print_str)
self.writer.add_scalar("loss/train_loss", loss.item(), i + 1)
time_meter.reset()
# Validation (During training)
if i % self.cfg["training"]["val_interval"] == 0 or i == self.cfg["training"]["train_iters"]:
self.model.eval()
with torch.no_grad():
total = 0
correct_when = 0
correct_who = 0
for i_val, data_list in tqdm(enumerate(self.valloader)):
if self.if_commun_label != 'None':
images_val_list, labels_val_list, commun_label = data_list
else:
images_val_list, labels_val_list = data_list
labels_val = labels_val_list[0]
images_val = torch.cat(tuple(images_val_list), dim=1)
labels_val = labels_val.to(self.device)
if self.if_commun_label != 'None':
commun_label = commun_label.to(self.device)
images_val = images_val.to(self.device)
gt = labels_val.data.cpu().numpy()
# image loss
outputs, _, action_argmax = self.model(images_val, training=True)
val_loss = self.loss_fn(input=outputs, target=labels_val)
pred = outputs.data.max(1)[1].cpu().numpy()
action_argmax = torch.squeeze(action_argmax)
# compute action accuracy
if self.if_commun_label != 'None':
self.running_metrics_val.update_div(self.if_commun_label, gt, pred, commun_label)
self.running_metrics_val.update_selection(self.if_commun_label, commun_label, action_argmax)
self.running_metrics_val.update(gt, pred)
val_loss_meter.update(val_loss.item())
if self.if_commun_label != 'None':
when2com_acc, who2com_acc = self.running_metrics_val.get_selection_accuracy()
print('Validation when2com accuracy:{}'.format(when2com_acc))
print('Validation who2com accuracy:{}'.format(who2com_acc))
self.writer.add_scalar("val_metrics/when_com_accuacy", when2com_acc, i)
self.writer.add_scalar("val_metrics/who_com_accuracy", who2com_acc, i)
else:
when2com_acc = 0
who2com_acc = 0
self.writer.add_scalar("loss/val_loss", val_loss_meter.avg, i)
self.logger.info("Iter %d Loss: %.4f" % (i, val_loss_meter.avg))
scorers = [self.running_metrics_val]
for idx, scorer in enumerate(scorers):
score, class_iou = scorer.get_scores()
for k, v in score.items():
self.logger.info("{}: {}".format(k, v))
self.writer.add_scalar("head_{}_val_metrics/{}".format(idx, k), v, i)
for k, v in class_iou.items():
self.logger.info("{}: {}".format(k, v))
self.writer.add_scalar("head_{}_val_metrics/cls_{}".format(idx, k), v, i)
# print
print('Normal')
score, class_iou = self.running_metrics_val.get_only_normal_scores()
self.running_metrics_val.print_score(self.n_classes, score, class_iou)
print('Noise')
score, class_iou = self.running_metrics_val.get_only_noise_scores()
self.running_metrics_val.print_score(self.n_classes, score, class_iou)
print("Overall")
score, class_iou = self.running_metrics_val.get_scores()
self.running_metrics_val.print_score(self.n_classes, score, class_iou)
val_loss_meter.reset()
self.running_metrics_val.reset()
if score["Mean IoU : \t"] >= best_iou:
best_iou = score["Mean IoU : \t"]
state = {
"epoch": i,
"model_state": self.model.state_dict(),
"optimizer_state": self.optimizer.state_dict(),
"scheduler_state": self.scheduler.state_dict(),
"best_iou": best_iou,
}
save_path = os.path.join(
self.writer.file_writer.get_logdir(),
"{}_{}_best_model.pkl".format(self.cfg["model"]["arch"], self.cfg["data"]["dataset"]),
)
torch.save(state, save_path)
if i == self.cfg["training"]["train_iters"]:
flag = False
break
return save_path
def load_weight(self, model_path):
state = convert_state_dict(torch.load(model_path)["model_state"])
self.model.load_state_dict(state, strict=False)
def evaluate(self, testloader, inference_mode='activated'): # "val_split"
running_metrics = runningScore(self.n_classes)
# Setup Model
self.model.eval()
self.model.to(self.device)
for i, data_list in enumerate(testloader):
if self.if_commun_label:
images_list, labels_list, commun_label = data_list
commun_label = commun_label.to(self.device)
else:
images_list, labels_list = data_list
# multi-view inputs
images = torch.cat(tuple(images_list), dim=1)
# multiple output
if self.MO_flag:
labels = torch.cat(tuple(labels_list), dim=0)
else: # single output
labels = labels_list[0]
images = images.to(self.device)
outputs, _, action_argmax, _ = self.model(images, training=False, inference=inference_mode)
pred = outputs.data.max(1)[1].cpu().numpy()
gt = labels.numpy()
# measurement results
running_metrics.update(gt, pred)
if self.if_commun_label:
running_metrics.update_div(self.if_commun_label, gt, pred, commun_label)
print('Normal')
score, class_iou = running_metrics.get_only_normal_scores()
running_metrics.print_score(self.n_classes, score, class_iou)
print('Noise')
score, class_iou = running_metrics.get_only_noise_scores()
running_metrics.print_score(self.n_classes, score, class_iou)
print("Overall")
score, class_iou = running_metrics.get_scores()
running_metrics.print_score(self.n_classes, score, class_iou)
return score, class_iou
class Trainer_LearnWho2Com(object):
def __init__(self, cfg, writer, logger, model, loss_fn, trainloader, valloader, optimizer, scheduler, device):
self.cfg = cfg
self.writer = writer
self.logger = logger
self.model = model
self.trainloader = trainloader
self.valloader = valloader
self.optimizer = optimizer
self.scheduler = scheduler
self.loss_fn = loss_fn
self.MO_flag = self.cfg['model']['multiple_output']
self.n_classes = 11
self.running_metrics_val = runningScore(self.n_classes)
self.device = device
# some datasets have no labels for communication
if 'commun_label' in self.cfg["data"]:
self.if_commun_label = cfg["data"]['commun_label']
else:
self.if_commun_label = 'None'
def train(self):
start_iter = 0
print('learnwho2com trainer')
# resume the training
if self.cfg["training"]["resume"] is not None:
if os.path.isfile(self.cfg["training"]["resume"]):
self.logger.info(
"Loading model and optimizer from checkpoint '{}'".format(cfg["training"]["resume"])
)
checkpoint = torch.load(self.cfg["training"]["resume"])
self.model.load_state_dict(checkpoint["model_state"])
self.optimizer.load_state_dict(checkpoint["optimizer_state"])
self.scheduler.load_state_dict(checkpoint["scheduler_state"])
start_iter = checkpoint["epoch"]
self.logger.info(
"Loaded checkpoint '{}' (iter {})".format(
self.cfg["training"]["resume"], checkpoint["epoch"]
)
)
else:
self.logger.info("No checkpoint found at '{}'".format(self.cfg["training"]["resume"]))
val_loss_meter = averageMeter()
time_meter = averageMeter()
best_iou = -100.0
i = start_iter
flag = True
# Training
while i <= self.cfg["training"]["train_iters"] and flag:
for data_list in self.trainloader:
# iteration timer
i += 1
# load data from dataloader
if self.if_commun_label != 'None':
images_list, labels_list, commun_label = data_list
else:
images_list, labels_list = data_list
# image and labels list 2 tensor
labels = labels_list[0]
images = torch.cat(tuple(images_list), dim=1)
# timer started
start_ts = time.time()
self.scheduler.step()
self.model.train() # matters for batchnorm/dropout
# from cpu to gpu
images = images.to(self.device)
labels = labels.to(self.device)
if self.if_commun_label != 'None':
commun_label = commun_label.to(self.device)
# clean the optimizer
self.optimizer.zero_grad()
# model inference
outputs, log_action, action_argmax = self.model(images, training=True)
# compute loss
loss = self.loss_fn(input=outputs, target=labels)
# compute the gradient for each variable
loss.backward()
# update the weight
self.optimizer.step()
# compute the used time
time_meter.update(time.time() - start_ts)
# Process display on screen
if (i + 1) % self.cfg["training"]["print_interval"] == 0:
fmt_str = "Iter [{:d}/{:d}] Loss: {:.4f} Time/Image: {:.4f}"
print_str = fmt_str.format(
i + 1,
self.cfg["training"]["train_iters"],
loss.item(),
time_meter.avg / self.cfg["training"]["batch_size"],
)
print(print_str)
self.logger.info(print_str)
self.writer.add_scalar("loss/train_loss", loss.item(), i + 1)
time_meter.reset()
# Validation (During training)
if i % self.cfg["training"]["val_interval"] == 0 or i == self.cfg["training"]["train_iters"]:
self.model.eval()
with torch.no_grad():
total = 0
correct_when = 0
correct_who = 0
for i_val, data_list in tqdm(enumerate(self.valloader)):
if self.if_commun_label != 'None':
images_val_list, labels_val_list, commun_label = data_list
else:
images_val_list, labels_val_list = data_list
labels_val = labels_val_list[0]
images_val = torch.cat(tuple(images_val_list), dim=1)
labels_val = labels_val.to(self.device)
if self.if_commun_label != 'None':
commun_label = commun_label.to(self.device)
images_val = images_val.to(self.device)
gt = labels_val.data.cpu().numpy()
# image loss
outputs, _, action_argmax = self.model(images_val, training=True)
val_loss = self.loss_fn(input=outputs, target=labels_val)
pred = outputs.data.max(1)[1].cpu().numpy()
action_argmax = torch.squeeze(action_argmax)
# compute action accuracy
if self.if_commun_label != 'None':
self.running_metrics_val.update_div(self.if_commun_label, gt, pred, commun_label)
self.running_metrics_val.update_selection(self.if_commun_label, commun_label, action_argmax + 1)
# plus one since target is not included in "alwaysCom" model
self.running_metrics_val.update(gt, pred)
val_loss_meter.update(val_loss.item())
if self.if_commun_label != 'None':
when2com_acc, who2com_acc = self.running_metrics_val.get_selection_accuracy()
print('Validation when2com accuracy:{}'.format(when2com_acc))
print('Validation who2com accuracy:{}'.format(who2com_acc))
self.writer.add_scalar("val_metrics/when_com_accuacy", when2com_acc, i)
self.writer.add_scalar("val_metrics/who_com_accuracy", who2com_acc, i)
else:
when2com_acc = 0
who2com_acc = 0
# for tensorboard
self.writer.add_scalar("loss/val_loss", val_loss_meter.avg, i)
self.logger.info("Iter %d Loss: %.4f" % (i, val_loss_meter.avg))
scorers = [self.running_metrics_val]
for idx, scorer in enumerate(scorers):
score, class_iou = scorer.get_scores()
for k, v in score.items():
self.logger.info("{}: {}".format(k, v))
self.writer.add_scalar("head_{}_val_metrics/{}".format(idx, k), v, i)
for k, v in class_iou.items():
self.logger.info("{}: {}".format(k, v))
self.writer.add_scalar("head_{}_val_metrics/cls_{}".format(idx, k), v, i)
# print
print('Normal')
score, class_iou = self.running_metrics_val.get_only_normal_scores()
self.running_metrics_val.print_score(self.n_classes, score, class_iou)
print('Noise')
score, class_iou = self.running_metrics_val.get_only_noise_scores()
self.running_metrics_val.print_score(self.n_classes, score, class_iou)
print("Overall")
score, class_iou = self.running_metrics_val.get_scores()
self.running_metrics_val.print_score(self.n_classes, score, class_iou)
val_loss_meter.reset()
self.running_metrics_val.reset()
if score["Mean IoU : \t"] >= best_iou:
best_iou = score["Mean IoU : \t"]
state = {
"epoch": i,
"model_state": self.model.state_dict(),
"optimizer_state": self.optimizer.state_dict(),
"scheduler_state": self.scheduler.state_dict(),
"best_iou": best_iou,
}
save_path = os.path.join(
self.writer.file_writer.get_logdir(),
"{}_{}_best_model.pkl".format(self.cfg["model"]["arch"], self.cfg["data"]["dataset"]),
)
torch.save(state, save_path)
if i == self.cfg["training"]["train_iters"]:
flag = False
break
return save_path
def load_weight(self, model_path):
state = convert_state_dict(torch.load(model_path)["model_state"])
self.model.load_state_dict(state, strict=False)
def evaluate(self, testloader, inference_mode='argmax_test'): # "val_split"
running_metrics = runningScore(self.n_classes)
# Setup Model
self.model.eval()
self.model.to(self.device)
for i, data_list in enumerate(testloader):
if self.if_commun_label:
images_list, labels_list, commun_label = data_list
commun_label = commun_label.to(self.device)
else:
images_list, labels_list = data_list
# multi-view inputs
images = torch.cat(tuple(images_list), dim=1)
# multi-view output
if self.MO_flag:
labels = torch.cat(tuple(labels_list), dim=0)
else: # single output
labels = labels_list[0]
images = images.to(self.device)
# MODEL INFERENCE
outputs, action, action_argmax = self.model(images, training=False, inference=inference_mode)
pred = outputs.data.max(1)[1].cpu().numpy()
gt = labels.numpy()
# measurement results
running_metrics.update(gt, pred)
if self.if_commun_label:
running_metrics.update_div(self.if_commun_label, gt, pred, commun_label)
running_metrics.update_selection(self.if_commun_label, commun_label, action_argmax+1)
if self.if_commun_label:
when2com_acc, who2com_acc = running_metrics.get_selection_accuracy()
print('Validation when2com accuracy:{}'.format(when2com_acc))
print('Validation who2com accuracy:{}'.format(who2com_acc))
else:
when2com_acc = 0
who2com_acc = 0
print('Normal')
score, class_iou = running_metrics.get_only_normal_scores()
running_metrics.print_score(self.n_classes, score, class_iou)
print('Noise')
score, class_iou = running_metrics.get_only_noise_scores()
running_metrics.print_score(self.n_classes, score, class_iou)
print("Overall")
score, class_iou = running_metrics.get_scores()
running_metrics.print_score(self.n_classes, score, class_iou)
return score, class_iou
class Trainer_MIMOcom(object):
def __init__(self, cfg, writer, logger, model, loss_fn, trainloader, valloader, optimizer, scheduler, device):
self.cfg = cfg
self.writer = writer
self.logger = logger
self.model = model
self.trainloader = trainloader
self.valloader = valloader
self.optimizer = optimizer
self.scheduler = scheduler
self.n_classes = 11
self.loss_fn = loss_fn
self.running_metrics_val = runningScore(self.n_classes)
self.device = device
self.MO_flag = self.cfg['model']['multiple_output']
# some datasets have no labels for communication
if 'commun_label' in self.cfg["data"]:
self.if_commun_label = cfg["data"]['commun_label']
else:
self.if_commun_label = 'None'
def train(self):
# load model
print('LearnMIMOCom_Trainer')
start_iter = 0
if self.cfg["training"]["resume"] is not None:
if os.path.isfile(self.cfg["training"]["resume"]):
self.logger.info(
"Loading model and optimizer from checkpoint '{}'".format(self.cfg["training"]["resume"])
)
checkpoint = torch.load(self.cfg["training"]["resume"])
self.model.load_state_dict(checkpoint["model_state"])
self.optimizer.load_state_dict(checkpoint["optimizer_state"])
self.scheduler.load_state_dict(checkpoint["scheduler_state"])
start_iter = checkpoint["epoch"]
self.logger.info(
"Loaded checkpoint '{}' (iter {})".format(
self.cfg["training"]["resume"], checkpoint["epoch"]
)
)
else:
self.logger.info("No checkpoint found at '{}'".format(self.cfg["training"]["resume"]))
val_loss_meter = averageMeter()
time_meter = averageMeter()
best_iou = -100.0
i = start_iter
flag = True
# training
while i <= self.cfg["training"]["train_iters"] and flag:
for data_list in self.trainloader:
i += 1
start_ts = time.time()
if self.if_commun_label != 'None':
images_list, labels_list, commun_label = data_list
else:
images_list, labels_list = data_list
images = torch.cat(tuple(images_list), dim=1)
if self.MO_flag: # multiple output
labels = torch.cat(tuple(labels_list), dim=0)
else: # single output
labels = labels_list[0]
self.scheduler.step()
self.model.train() # matters for batchnorm/dropout
images = images.to(self.device)
labels = labels.to(self.device)
if self.if_commun_label != 'None':
commun_label = commun_label.to(self.device)
# image loss
self.optimizer.zero_grad()
outputs, log_action, action_argmax, _ = self.model(images, training=True, MO_flag=self.MO_flag)
loss = self.loss_fn(input=outputs, target=labels)
loss.backward()
self.optimizer.step()
time_meter.update(time.time() - start_ts)
# Process display on screen
if (i + 1) % self.cfg["training"]["print_interval"] == 0:
fmt_str = "Iter [{:d}/{:d}] Loss: {:.4f} Time/Image: {:.4f}"
print_str = fmt_str.format(
i + 1,
self.cfg["training"]["train_iters"],
loss.item(),
time_meter.avg / self.cfg["training"]["batch_size"],
)
print(print_str)
self.logger.info(print_str)
self.writer.add_scalar("loss/train_loss", loss.item(), i + 1)
time_meter.reset()
### Validation
if i % self.cfg["training"]["val_interval"] == 0 or i == self.cfg["training"]["train_iters"]:
self.model.eval()
with torch.no_grad():
for i_val, data_list in tqdm(enumerate(self.valloader)):
if self.if_commun_label != 'None':
images_val_list, labels_val_list, commun_label = data_list
commun_label = commun_label.to(self.device)
else:
images_val_list, labels_val_list = data_list
images_val = torch.cat(tuple(images_val_list), dim=1)
if self.MO_flag: # obtain multiple ground-truth
labels_val = torch.cat(tuple(labels_val_list), dim=0)
else: # only select one view gt mask
labels_val = labels_val_list[0]
labels_val = labels_val.to(self.device)
images_val = images_val.to(self.device)
gt = labels_val.data.cpu().numpy()
# image loss
outputs, _, action_argmax, _ = self.model(images_val, training=True, MO_flag=self.MO_flag)
val_loss = self.loss_fn(input=outputs, target=labels_val)
pred = outputs.data.max(1)[1].cpu().numpy()
# compute action accuracy
if self.if_commun_label != 'None':
self.running_metrics_val.update_div(self.if_commun_label, gt, pred, commun_label)
self.running_metrics_val.update_selection(self.if_commun_label, commun_label, action_argmax)
self.running_metrics_val.update(gt, pred)
val_loss_meter.update(val_loss.item())
if self.if_commun_label != 'None':
when2com_acc, who2com_acc = self.running_metrics_val.get_selection_accuracy()
print('Validation when2com accuracy:{}'.format(when2com_acc))
print('Validation who2com accuracy:{}'.format(who2com_acc))
self.writer.add_scalar("val_metrics/when_com_accuacy", when2com_acc, i)
self.writer.add_scalar("val_metrics/who_com_accuracy", who2com_acc, i)
self.writer.add_scalar("loss/val_loss", val_loss_meter.avg, i)
self.logger.info("Iter %d Loss: %.4f" % (i, val_loss_meter.avg))
print('Normal')
score, class_iou = self.running_metrics_val.get_only_normal_scores()
self.running_metrics_val.print_score(self.n_classes, score, class_iou)
print('Noise')
score, class_iou = self.running_metrics_val.get_only_noise_scores()
self.running_metrics_val.print_score(self.n_classes, score, class_iou)
print("Overall")
score, class_iou = self.running_metrics_val.get_scores()
self.running_metrics_val.print_score(self.n_classes, score, class_iou)
val_loss_meter.reset()
self.running_metrics_val.reset()
# store the best model
if score["Mean IoU : \t"] >= best_iou:
best_iou = score["Mean IoU : \t"]
state = {
"epoch": i,
"model_state": self.model.state_dict(),
"optimizer_state": self.optimizer.state_dict(),
"scheduler_state": self.scheduler.state_dict(),
"best_iou": best_iou,
}
save_path = os.path.join(
self.writer.file_writer.get_logdir(),
"{}_{}_best_model.pkl".format(self.cfg["model"]["arch"], self.cfg["data"]["dataset"]),
)
torch.save(state, save_path)
if i == self.cfg["training"]["train_iters"]:
flag = False
break
return save_path
def load_weight(self, model_path):
state = convert_state_dict(torch.load(model_path)["model_state"])
self.model.load_state_dict(state, strict=False)
def evaluate(self, testloader,inference_mode='activated'): # "val_split"
running_metrics = runningScore(self.n_classes)
# Setup Model
self.model.eval()
self.model.to(self.device)
for i, data_list in enumerate(testloader):
if self.if_commun_label:
images_list, labels_list, commun_label = data_list
commun_label = commun_label.to(self.device)
else:
images_list, labels_list = data_list
# multi-view inputs
images = torch.cat(tuple(images_list), dim=1)
# multiple output
if self.MO_flag:
labels = torch.cat(tuple(labels_list), dim=0)
else: # single output
labels = labels_list[0]
images = images.to(self.device)
outputs, _, action_argmax, bandW = self.model(images, training=False, MO_flag=self.MO_flag, inference=inference_mode)
pred = outputs.data.max(1)[1].cpu().numpy()
gt = labels.numpy()
# measurement results
running_metrics.update(gt, pred)
running_metrics.update_bandW(bandW)
if self.if_commun_label:
running_metrics.update_div(self.if_commun_label, gt, pred, commun_label)
running_metrics.update_selection(self.if_commun_label, commun_label, action_argmax)
if self.if_commun_label:
when2com_acc, who2com_acc = running_metrics.get_selection_accuracy()
print('Validation when2com accuracy:{}'.format(when2com_acc))
print('Validation who2com accuracy:{}'.format(who2com_acc))
else:
when2com_acc = 0
who2com_acc = 0
avg_bandW = running_metrics.get_avg_bandW()
print('Bandwidth: ' + str(avg_bandW))
print('Normal')
score, class_iou = running_metrics.get_only_normal_scores()
running_metrics.print_score(self.n_classes, score, class_iou)
print('Noise')
score, class_iou = running_metrics.get_only_noise_scores()
running_metrics.print_score(self.n_classes, score, class_iou)
print("Overall")
score, class_iou = running_metrics.get_scores()
running_metrics.print_score(self.n_classes, score, class_iou)
return score, class_iou
class Trainer_MIMOcomWho(object):
def __init__(self, cfg, writer, logger, model, loss_fn, trainloader, valloader, optimizer, scheduler, device):
self.cfg = cfg
self.writer = writer
self.logger = logger
self.model = model
self.trainloader = trainloader
self.valloader = valloader
self.optimizer = optimizer
self.scheduler = scheduler
self.n_classes = 11
self.loss_fn = loss_fn
self.running_metrics_val = runningScore(self.n_classes)
self.device = device
self.MO_flag = self.cfg['model']['multiple_output']
# some datasets have no labels for communication
if 'commun_label' in self.cfg["data"]:
self.if_commun_label = cfg["data"]['commun_label']
else:
self.if_commun_label = 'None'
def train(self):
# load model
print('LearnMIMOComWho_Trainer')
start_iter = 0
if self.cfg["training"]["resume"] is not None:
if os.path.isfile(self.cfg["training"]["resume"]):
self.logger.info(
"Loading model and optimizer from checkpoint '{}'".format(self.cfg["training"]["resume"])
)
checkpoint = torch.load(self.cfg["training"]["resume"])
self.model.load_state_dict(checkpoint["model_state"])
self.optimizer.load_state_dict(checkpoint["optimizer_state"])
self.scheduler.load_state_dict(checkpoint["scheduler_state"])
start_iter = checkpoint["epoch"]
self.logger.info(
"Loaded checkpoint '{}' (iter {})".format(
self.cfg["training"]["resume"], checkpoint["epoch"]
)
)
else:
self.logger.info("No checkpoint found at '{}'".format(self.cfg["training"]["resume"]))
val_loss_meter = averageMeter()
time_meter = averageMeter()
best_iou = -100.0
i = start_iter
flag = True
# training
while i <= self.cfg["training"]["train_iters"] and flag:
for data_list in self.trainloader:
i += 1
start_ts = time.time()
if self.if_commun_label != 'None':
images_list, labels_list, commun_label = data_list
else:
images_list, labels_list = data_list
images = torch.cat(tuple(images_list), dim=1)
if self.MO_flag: # multiple output
labels = torch.cat(tuple(labels_list), dim=0)
else: # single output
labels = labels_list[0]
self.scheduler.step()
self.model.train() # matters for batchnorm/dropout
images = images.to(self.device)
labels = labels.to(self.device)
if self.if_commun_label != 'None':
commun_label = commun_label.to(self.device)
# image loss
self.optimizer.zero_grad()
outputs, log_action, action_argmax, _ = self.model(images, training=True, MO_flag=self.MO_flag)
loss = self.loss_fn(input=outputs, target=labels)
loss.backward()
self.optimizer.step()
time_meter.update(time.time() - start_ts)
# Process display on screen
if (i + 1) % self.cfg["training"]["print_interval"] == 0:
fmt_str = "Iter [{:d}/{:d}] Loss: {:.4f} Time/Image: {:.4f}"
print_str = fmt_str.format(
i + 1,
self.cfg["training"]["train_iters"],
loss.item(),
time_meter.avg / self.cfg["training"]["batch_size"],
)
print(print_str)
self.logger.info(print_str)
self.writer.add_scalar("loss/train_loss", loss.item(), i + 1)
time_meter.reset()
### Validation
if i % self.cfg["training"]["val_interval"] == 0 or i == self.cfg["training"]["train_iters"]:
self.model.eval()
with torch.no_grad():
for i_val, data_list in tqdm(enumerate(self.valloader)):
if self.if_commun_label != 'None':
images_val_list, labels_val_list, commun_label = data_list
commun_label = commun_label.to(self.device)
else:
images_val_list, labels_val_list = data_list
images_val = torch.cat(tuple(images_val_list), dim=1)
if self.MO_flag: # obtain multiple ground-truth
labels_val = torch.cat(tuple(labels_val_list), dim=0)
else: # only select one view gt mask
labels_val = labels_val_list[0]
labels_val = labels_val.to(self.device)
images_val = images_val.to(self.device)
gt = labels_val.data.cpu().numpy()
# image loss
outputs, _, action_argmax, _ = self.model(images_val, training=True, MO_flag=self.MO_flag)
val_loss = self.loss_fn(input=outputs, target=labels_val)
pred = outputs.data.max(1)[1].cpu().numpy()
# compute action accuracy
if self.if_commun_label != 'None':
self.running_metrics_val.update_div(self.if_commun_label, gt, pred, commun_label)
self.running_metrics_val.update_selection(self.if_commun_label, commun_label, action_argmax)
self.running_metrics_val.update(gt, pred)
val_loss_meter.update(val_loss.item())
if self.if_commun_label != 'None':
when2com_acc, who2com_acc = self.running_metrics_val.get_selection_accuracy()
print('Validation when2com accuracy:{}'.format(when2com_acc))
print('Validation who2com accuracy:{}'.format(who2com_acc))
self.writer.add_scalar("val_metrics/when_com_accuacy", when2com_acc, i)
self.writer.add_scalar("val_metrics/who_com_accuracy", who2com_acc, i)
self.writer.add_scalar("loss/val_loss", val_loss_meter.avg, i)
self.logger.info("Iter %d Loss: %.4f" % (i, val_loss_meter.avg))
print('Normal')
score, class_iou = self.running_metrics_val.get_only_normal_scores()
self.running_metrics_val.print_score(self.n_classes, score, class_iou)
print('Noise')
score, class_iou = self.running_metrics_val.get_only_noise_scores()
self.running_metrics_val.print_score(self.n_classes, score, class_iou)
print("Overall")
score, class_iou = self.running_metrics_val.get_scores()
self.running_metrics_val.print_score(self.n_classes, score, class_iou)
val_loss_meter.reset()
self.running_metrics_val.reset()
# store the best model
if score["Mean IoU : \t"] >= best_iou:
best_iou = score["Mean IoU : \t"]
state = {
"epoch": i,
"model_state": self.model.state_dict(),
"optimizer_state": self.optimizer.state_dict(),
"scheduler_state": self.scheduler.state_dict(),
"best_iou": best_iou,
}
save_path = os.path.join(
self.writer.file_writer.get_logdir(),
"{}_{}_best_model.pkl".format(self.cfg["model"]["arch"], self.cfg["data"]["dataset"]),
)
torch.save(state, save_path)
if i == self.cfg["training"]["train_iters"]:
flag = False
break
return save_path
def load_weight(self, model_path):
state = convert_state_dict(torch.load(model_path)["model_state"])
self.model.load_state_dict(state, strict=False)
def evaluate(self, testloader,inference_mode='activated'): # "val_split"
running_metrics = runningScore(self.n_classes)
# Setup Model
self.model.eval()
self.model.to(self.device)
for i, data_list in enumerate(testloader):
if self.if_commun_label:
images_list, labels_list, commun_label = data_list
commun_label = commun_label.to(self.device)
else:
images_list, labels_list = data_list
# multi-view inputs
images = torch.cat(tuple(images_list), dim=1)
# multiple output
if self.MO_flag:
labels = torch.cat(tuple(labels_list), dim=0)
else: # single output
labels = labels_list[0]
images = images.to(self.device)
outputs, _, action_argmax, bandW = self.model(images, training=False, MO_flag=self.MO_flag, inference=inference_mode)
pred = outputs.data.max(1)[1].cpu().numpy()
gt = labels.numpy()
# measurement results
running_metrics.update(gt, pred)
running_metrics.update_bandW(bandW)
if self.if_commun_label:
running_metrics.update_div(self.if_commun_label, gt, pred, commun_label)
running_metrics.update_selection(self.if_commun_label, commun_label, action_argmax)
if self.if_commun_label:
when2com_acc, who2com_acc = running_metrics.get_selection_accuracy()
print('Validation when2com accuracy:{}'.format(when2com_acc))
print('Validation who2com accuracy:{}'.format(who2com_acc))
else:
when2com_acc = 0
who2com_acc = 0
avg_bandW = running_metrics.get_avg_bandW()
print('Bandwidth: ' + str(avg_bandW))
print('Normal')
score, class_iou = running_metrics.get_only_normal_scores()
running_metrics.print_score(self.n_classes, score, class_iou)
print('Noise')
score, class_iou = running_metrics.get_only_noise_scores()
running_metrics.print_score(self.n_classes, score, class_iou)
print("Overall")
score, class_iou = running_metrics.get_scores()
running_metrics.print_score(self.n_classes, score, class_iou)
return score, class_iou
class Trainer_MIMO_All_agents(object):
def __init__(self, cfg, writer, logger, model, loss_fn, trainloader, valloader, optimizer, scheduler, device):
self.cfg = cfg
self.writer = writer
self.logger = logger
self.model = model
self.trainloader = trainloader
self.valloader = valloader
self.optimizer = optimizer
self.scheduler = scheduler
self.n_classes = 11
self.loss_fn = loss_fn
self.running_metrics_val = runningScore(self.n_classes)
self.device = device
self.MO_flag = self.cfg['model']['multiple_output']
if 'commun_label' in self.cfg["data"]:
self.if_commun_label = cfg["data"]['commun_label']
else:
self.if_commun_label = 'None'
def train(self):
print('MIMO_All_Agent_Trainer')
start_iter = 0
if self.cfg["training"]["resume"] is not None:
if os.path.isfile(self.cfg["training"]["resume"]):
self.logger.info(
"Loading model and optimizer from checkpoint '{}'".format(self.cfg["training"]["resume"])
)
checkpoint = torch.load(self.cfg["training"]["resume"])
self.model.load_state_dict(checkpoint["model_state"])
self.optimizer.load_state_dict(checkpoint["optimizer_state"])
self.scheduler.load_state_dict(checkpoint["scheduler_state"])
start_iter = checkpoint["epoch"]
self.logger.info(
"Loaded checkpoint '{}' (iter {})".format(
self.cfg["training"]["resume"], checkpoint["epoch"]
)
)
else:
self.logger.info("No checkpoint found at '{}'".format(self.cfg["training"]["resume"]))
val_loss_meter = averageMeter()
time_meter = averageMeter()
best_iou = -100.0
i = start_iter
flag = True
while i <= self.cfg["training"]["train_iters"] and flag:
for data_list in self.trainloader:
if self.if_commun_label != 'None':
images_list, labels_list, commun_label = data_list
else:
images_list, labels_list = data_list
# only first image
images = images_list[0]
labels = labels_list[0]
images_list[0] = images
images = torch.cat(tuple(images_list), dim=1)
if self.cfg['model']['multiple_output']:
labels = torch.cat(tuple(labels_list), dim=0)
i += 1
start_ts = time.time()
self.scheduler.step()
self.model.train() # matters for batchnorm/dropout
images = images.to(self.device)
labels = labels.to(self.device)
self.optimizer.zero_grad()
if self.cfg['model']['shuffle_features'] == 'selection':
outputs, rand_action = self.model(images)
else:
outputs = self.model(images)
loss = self.loss_fn(input=outputs, target=labels)
loss.backward()
self.optimizer.step()
time_meter.update(time.time() - start_ts)
# Process display on screen
if i % self.cfg["training"]["print_interval"] == 0:
fmt_str = "Iter [{:d}/{:d}] Loss: {:.4f} Time/Image: {:.4f}"
print_str = fmt_str.format(
i,
self.cfg["training"]["train_iters"],
loss.item(),
time_meter.avg / self.cfg["training"]["batch_size"],
)
print(print_str)
self.logger.info(print_str)
self.writer.add_scalar("loss/train_loss", loss.item(), i)
time_meter.reset()
# Validation
if (i) % self.cfg["training"]["val_interval"] == 0 or (i) == self.cfg["training"]["train_iters"]:
self.model.eval()
with torch.no_grad():
for i_val, (data_list) in tqdm(enumerate(self.valloader), ncols=20,
desc='Validation'):
if self.if_commun_label != 'None':
images_val_list, labels_val_list, commun_label = data_list
else:
images_val_list, labels_val_list = data_list
images_val = torch.cat(tuple(images_val_list), dim=1)
labels_val = labels_val_list[0]
if self.cfg['model']['multiple_output']: # mimo single
labels_val = torch.cat(tuple(labels_val_list), dim=0)
images_val = images_val.to(self.device)
labels_val = labels_val.to(self.device)
if self.cfg['model']['shuffle_features'] == 'selection':
outputs, rand_action = self.model(images_val)
else:
outputs = self.model(images_val)
val_loss = self.loss_fn(input=outputs, target=labels_val)
pred = outputs.data.max(1)[1].cpu().numpy()
gt = labels_val.data.cpu().numpy()
self.running_metrics_val.update(gt, pred)
val_loss_meter.update(val_loss.item())
self.writer.add_scalar("loss/val_loss", val_loss_meter.avg, i)
self.logger.info("Iter %d Loss: %.4f" % (i, val_loss_meter.avg))
score, class_iou = self.running_metrics_val.get_scores()
for k, v in score.items():
print(k, v)
self.logger.info("{}: {}".format(k, v))
self.writer.add_scalar("val_metrics/{}".format(k), v, i)
for k, v in class_iou.items():
self.logger.info("{}: {}".format(k, v))
self.writer.add_scalar("val_metrics/cls_{}".format(k), v, i)
val_loss_meter.reset()
self.running_metrics_val.reset()
if score["Mean IoU : \t"] >= best_iou:
best_iou = score["Mean IoU : \t"]
state = {
"epoch": i,
"model_state": self.model.state_dict(),
"optimizer_state": self.optimizer.state_dict(),
"scheduler_state": self.scheduler.state_dict(),
"best_iou": best_iou,
}
save_path = os.path.join(
self.writer.file_writer.get_logdir(),
"{}_{}_best_model.pkl".format(self.cfg["model"]["arch"], self.cfg["data"]["dataset"]),
)
torch.save(state, save_path)
if i == self.cfg["training"]["train_iters"]:
flag = False
break
return save_path
def load_weight(self, model_path):
state = convert_state_dict(torch.load(model_path)["model_state"])
self.model.load_state_dict(state, strict=False)
def evaluate(self, testloader): # "val_split"
running_metrics = runningScore(self.n_classes)
# Setup Model
self.model.eval()
self.model.to(self.device)
for i, data_list in enumerate(testloader):
if self.if_commun_label:
images_list, labels_list, commun_label = data_list
commun_label = commun_label.to(self.device)
else:
images_list, labels_list = data_list
# multi-view inputs
images = torch.cat(tuple(images_list), dim=1)
# multi-view output
if self.MO_flag:
labels = torch.cat(tuple(labels_list), dim=0)
else: # single output
labels = labels_list[0]
images = images.to(self.device)
# MODEL INFERENCE
if self.cfg['model']['shuffle_features'] == 'selection':
outputs, action_argmax = self.model(images)
else:
outputs = self.model(images)
pred = outputs.data.max(1)[1].cpu().numpy()
gt = labels.numpy()
# measurement results
running_metrics.update(gt, pred)
if self.if_commun_label:
running_metrics.update_div(self.if_commun_label, gt, pred, commun_label)
print('Normal')
score, class_iou = running_metrics.get_only_normal_scores()
running_metrics.print_score(self.n_classes, score, class_iou)
print('Noise')
score, class_iou = running_metrics.get_only_noise_scores()
running_metrics.print_score(self.n_classes, score, class_iou)
print("Overall")
score, class_iou = running_metrics.get_scores()
running_metrics.print_score(self.n_classes, score, class_iou)
return score, class_iou
class Trainer_Single_agent(object):
def __init__(self, cfg, writer, logger, model, loss_fn, trainloader, valloader, optimizer, scheduler, device):
self.cfg = cfg
self.writer = writer
self.logger = logger
self.model = model
self.trainloader = trainloader
self.valloader = valloader
self.optimizer = optimizer
self.scheduler = scheduler
self.n_classes = 11
self.loss_fn = loss_fn
self.running_metrics_val = runningScore(self.n_classes)
self.device = device
self.MO_flag = self.cfg['model']['multiple_output']
def train(self):
print('Training')
start_iter = 0
if self.cfg["training"]["resume"] is not None:
if os.path.isfile(self.cfg["training"]["resume"]):
self.logger.info(
"Loading model and optimizer from checkpoint '{}'".format(self.cfg["training"]["resume"])
)
checkpoint = torch.load(self.cfg["training"]["resume"])
self.model.load_state_dict(checkpoint["model_state"])
self.optimizer.load_state_dict(checkpoint["optimizer_state"])
self.scheduler.load_state_dict(checkpoint["scheduler_state"])
start_iter = checkpoint["epoch"]
self.logger.info(
"Loaded checkpoint '{}' (iter {})".format(
self.cfg["training"]["resume"], checkpoint["epoch"]
)
)
else:
self.logger.info("No checkpoint found at '{}'".format(self.cfg["training"]["resume"]))
val_loss_meter = averageMeter()
time_meter = averageMeter()
best_iou = -100.0
i = start_iter
flag = True
while i <= self.cfg["training"]["train_iters"] and flag:
for data_list in self.trainloader:
images_list, labels_list = data_list
# only first image
images = images_list[0]
labels = labels_list[0]
if self.cfg['model']['multiple_output']: # mimo single
labels = torch.cat(tuple(labels_list), dim=0)
images = torch.cat(tuple(images_list), dim=0)
i += 1
start_ts = time.time()
self.scheduler.step()
self.model.train() # matters for batchnorm/dropout
images = images.to(self.device)
labels = labels.to(self.device)
self.optimizer.zero_grad()
outputs = self.model(images)
loss = self.loss_fn(input=outputs, target=labels)
loss.backward()
self.optimizer.step()
time_meter.update(time.time() - start_ts)
# Process display on screen
if i % self.cfg["training"]["print_interval"] == 0:
fmt_str = "Iter [{:d}/{:d}] Loss: {:.4f} Time/Image: {:.4f}"
print_str = fmt_str.format(
i,
self.cfg["training"]["train_iters"],
loss.item(),
time_meter.avg / self.cfg["training"]["batch_size"],
)
print(print_str)
self.logger.info(print_str)
self.writer.add_scalar("loss/train_loss", loss.item(), i)
time_meter.reset()
# Validation
if i % self.cfg["training"]["val_interval"] == 0 or i == self.cfg["training"]["train_iters"]:
self.model.eval()
with torch.no_grad():
for i_val, (images_val_list, labels_val_list) in tqdm(enumerate(self.valloader), ncols=20,
desc='Validation'):
images_val = images_val_list[0]
labels_val = labels_val_list[0]
if self.cfg['model']['multiple_output']: # mimo single
labels_val = torch.cat(tuple(labels_val_list), dim=0)
if self.cfg["model"]["arch"] == 'Single_agent':
images_val = torch.cat(tuple(images_val_list), dim=0)
images_val = images_val.to(self.device)
labels_val = labels_val.to(self.device)
outputs = self.model(images_val)
val_loss = self.loss_fn(input=outputs, target=labels_val)
pred = outputs.data.max(1)[1].cpu().numpy()
gt = labels_val.data.cpu().numpy()
self.running_metrics_val.update(gt, pred)
val_loss_meter.update(val_loss.item())
self.writer.add_scalar("loss/val_loss", val_loss_meter.avg, i)
self.logger.info("Iter %d Loss: %.4f" % (i, val_loss_meter.avg))
score, class_iou = self.running_metrics_val.get_scores()
for k, v in score.items():
print(k, v)
self.logger.info("{}: {}".format(k, v))
self.writer.add_scalar("val_metrics/{}".format(k), v, i)
for k, v in class_iou.items():
self.logger.info("{}: {}".format(k, v))
self.writer.add_scalar("val_metrics/cls_{}".format(k), v, i)
val_loss_meter.reset()
self.running_metrics_val.reset()
if score["Mean IoU : \t"] >= best_iou:
best_iou = score["Mean IoU : \t"]
state = {
"epoch": i,
"model_state": self.model.state_dict(),
"optimizer_state": self.optimizer.state_dict(),
"scheduler_state": self.scheduler.state_dict(),
"best_iou": best_iou,
}
save_path = os.path.join(
self.writer.file_writer.get_logdir(),
"{}_{}_best_model.pkl".format(self.cfg["model"]["arch"], self.cfg["data"]["dataset"]),
)
torch.save(state, save_path)
if i == self.cfg["training"]["train_iters"]:
flag = False
break
return save_path
def load_weight(self, model_path):
state = convert_state_dict(torch.load(model_path)["model_state"])
self.model.load_state_dict(state, strict=False)
def evaluate(self, testloader):
# local evalutation metric
running_metrics = runningScore(self.n_classes)
# Setup Model for evaluaton model
self.model.eval()
self.model.to(self.device)
for i, data_list in enumerate(testloader):
images_list, labels_list = data_list
# multi-view inputs
images = torch.cat(tuple(images_list), dim=0)
# multi-view output
if self.cfg['model']['multiple_output']:
labels = torch.cat(tuple(labels_list), dim=0)
else: # single output
labels = labels_list[0]
images = images.to(self.device)
outputs = self.model(images)
pred = outputs.data.max(1)[1].cpu().numpy()
gt = labels.numpy()
# measurement results
running_metrics.update(gt, pred)
print("Overall")
score, class_iou = running_metrics.get_scores()
running_metrics.print_score(self.n_classes, score, class_iou)
return score, class_iou
class Trainer_All_agents(object):
def __init__(self, cfg, writer, logger, model, loss_fn, trainloader, valloader, optimizer, scheduler, device):
self.cfg = cfg
self.writer = writer
self.logger = logger
self.model = model
self.trainloader = trainloader
self.valloader = valloader
self.optimizer = optimizer
self.scheduler = scheduler
self.n_classes = 11
self.loss_fn = loss_fn
self.running_metrics_val = runningScore(self.n_classes)
self.device = device
self.MO_flag = self.cfg['model']['multiple_output']
if 'commun_label' in self.cfg["data"]:
self.if_commun_label = cfg["data"]['commun_label']
else:
self.if_commun_label = 'None'
def train(self):
print('Training')
start_iter = 0
if self.cfg["training"]["resume"] is not None:
if os.path.isfile(self.cfg["training"]["resume"]):
self.logger.info(
"Loading model and optimizer from checkpoint '{}'".format(self.cfg["training"]["resume"])
)
checkpoint = torch.load(self.cfg["training"]["resume"])
self.model.load_state_dict(checkpoint["model_state"])
self.optimizer.load_state_dict(checkpoint["optimizer_state"])
self.scheduler.load_state_dict(checkpoint["scheduler_state"])
start_iter = checkpoint["epoch"]
self.logger.info(
"Loaded checkpoint '{}' (iter {})".format(
self.cfg["training"]["resume"], checkpoint["epoch"]
)
)
else:
self.logger.info("No checkpoint found at '{}'".format(self.cfg["training"]["resume"]))
val_loss_meter = averageMeter()
time_meter = averageMeter()
best_iou = -100.0
i = start_iter
flag = True
while i <= self.cfg["training"]["train_iters"] and flag:
for data_list in self.trainloader:
if self.if_commun_label != 'None':
images_list, labels_list, commun_label = data_list
else:
images_list, labels_list = data_list
images = torch.cat(tuple(images_list), dim=1)
labels = labels_list[0]
if self.cfg['model']['multiple_output']: # multiple output
labels = torch.cat(tuple(labels_list), dim=0)
i += 1
start_ts = time.time()
self.scheduler.step()
self.model.train()
images = images.to(self.device)
labels = labels.to(self.device)
self.optimizer.zero_grad()
if self.cfg['model']['shuffle_features'] == 'selection': # randcom
outputs, rand_action = self.model(images)
else: # catall
outputs = self.model(images)
loss = self.loss_fn(input=outputs, target=labels)
loss.backward()
self.optimizer.step()
time_meter.update(time.time() - start_ts)
# Process display on screen
if i % self.cfg["training"]["print_interval"] == 0:
fmt_str = "Iter [{:d}/{:d}] Loss: {:.4f} Time/Image: {:.4f}"
print_str = fmt_str.format(
i,
self.cfg["training"]["train_iters"],
loss.item(),
time_meter.avg / self.cfg["training"]["batch_size"],
)
print(print_str)
self.logger.info(print_str)
self.writer.add_scalar("loss/train_loss", loss.item(), i)
time_meter.reset()
# Validation
if i % self.cfg["training"]["val_interval"] == 0 or i == self.cfg["training"]["train_iters"]:
self.model.eval()
with torch.no_grad():
for i_val, (data_val_list) in tqdm(enumerate(self.valloader), ncols=20,
desc='Validation'):
if self.if_commun_label != 'None':
images_val_list, labels_val_list, commun_label = data_list
else:
images_val_list, labels_val_list = data_list
images_val = torch.cat(tuple(images_val_list), dim=1)
labels_val = labels_val_list[0]
if self.cfg['model']['multiple_output']:
labels_val = torch.cat(tuple(labels_val_list), dim=0)
images_val = images_val.to(self.device)
labels_val = labels_val.to(self.device)
if self.cfg['model']['shuffle_features'] == 'selection':
outputs, rand_action = self.model(images_val)
else:
outputs = self.model(images_val)
val_loss = self.loss_fn(input=outputs, target=labels_val)
pred = outputs.data.max(1)[1].cpu().numpy()
gt = labels_val.data.cpu().numpy()
self.running_metrics_val.update(gt, pred)
val_loss_meter.update(val_loss.item())
self.writer.add_scalar("loss/val_loss", val_loss_meter.avg, i)
self.logger.info("Iter %d Loss: %.4f" % (i, val_loss_meter.avg))
score, class_iou = self.running_metrics_val.get_scores()
for k, v in score.items():
print(k, v)
self.logger.info("{}: {}".format(k, v))
self.writer.add_scalar("val_metrics/{}".format(k), v, i)
for k, v in class_iou.items():
self.logger.info("{}: {}".format(k, v))
self.writer.add_scalar("val_metrics/cls_{}".format(k), v, i)
val_loss_meter.reset()
self.running_metrics_val.reset()
if score["Mean IoU : \t"] >= best_iou:
best_iou = score["Mean IoU : \t"]
state = {
"epoch": i,
"model_state": self.model.state_dict(),
"optimizer_state": self.optimizer.state_dict(),
"scheduler_state": self.scheduler.state_dict(),
"best_iou": best_iou,
}
save_path = os.path.join(
self.writer.file_writer.get_logdir(),
"{}_{}_best_model.pkl".format(self.cfg["model"]["arch"], self.cfg["data"]["dataset"]),
)
torch.save(state, save_path)
if i == self.cfg["training"]["train_iters"]:
flag = False
break
return save_path
def load_weight(self, model_path):
state = convert_state_dict(torch.load(model_path)["model_state"])
self.model.load_state_dict(state, strict=False)
def evaluate(self, testloader): # "val_split"
running_metrics = runningScore(self.n_classes)
# Setup Model
self.model.eval()
self.model.to(self.device)
for i, data_list in enumerate(testloader):
if self.if_commun_label:
images_list, labels_list, commun_label = data_list
commun_label = commun_label.to(self.device)
else:
images_list, labels_list = data_list
# multi-view inputs
images = torch.cat(tuple(images_list), dim=1)
# multi-view output
if self.MO_flag:
labels = torch.cat(tuple(labels_list), dim=0)
else: # single output
labels = labels_list[0]
images = images.to(self.device)
if self.cfg['model']['shuffle_features'] == 'selection':
outputs, rand_action = self.model(images)
else:
outputs = self.model(images)
pred = outputs.data.max(1)[1].cpu().numpy()
gt = labels.numpy()
# measurement results
running_metrics.update(gt, pred)
if self.if_commun_label:
running_metrics.update_div(self.if_commun_label, gt, pred, commun_label)
print('Normal')
score, class_iou = running_metrics.get_only_normal_scores()
running_metrics.print_score(self.n_classes, score, class_iou)
print('Noise')
score, class_iou = running_metrics.get_only_noise_scores()
running_metrics.print_score(self.n_classes, score, class_iou)
print("Overall")
score, class_iou = running_metrics.get_scores()
running_metrics.print_score(self.n_classes, score, class_iou)
return score, class_iou
|
92482
|
import numpy as np
from .. import inf
from ... import blm
from . import learning
from .prior import prior
class model:
def __init__(self, lik, mean, cov, inf='exact'):
self.lik = lik
self.prior = prior(mean=mean, cov=cov)
self.inf = inf
self.num_params = self.lik.num_params + self.prior.num_params
self.params = self.cat_params(self.lik.params, self.prior.params)
self.stats = ()
def cat_params(self, lik_params, prior_params):
''' concatinate the likelihood and prior parameters '''
params = np.append(lik_params, prior_params)
return params
def decomp_params(self, params=None):
if params is None:
params = np.copy(self.params)
lik_params = params[0:self.lik.num_params]
prior_params = params[self.lik.num_params:]
return lik_params, prior_params
def set_params(self, params):
self.params = params
lik_params, prior_params = self.decomp_params(params)
self.lik.set_params(lik_params)
self.prior.set_params(prior_params)
def sub_sampling(self, X, t, N):
num_data = X.shape[0]
if N is None or N < num_data:
index = np.random.permutation(num_data)
subX = X[index[0:N], :]
subt = t[index[0:N]]
else:
subX = X
subt = t
return subX, subt
def export_blm(self, num_basis):
if not hasattr(self.prior.cov, "rand_expans"):
raise ValueError('The kernel must be.')
basis_params = self.prior.cov.rand_expans(num_basis)
basis = blm.basis.fourier(basis_params)
prior = blm.prior.gauss(num_basis)
lik = blm.lik.gauss(blm.lik.linear(basis, bias=self.prior.get_mean(1)),
blm.lik.cov(self.lik.params))
blr = blm.model(lik, prior)
return blr
def eval_marlik(self, params, X, t, N=None):
subX, subt = self.sub_sampling(X, t, N)
if self.inf is 'exact':
marlik = inf.exact.eval_marlik(self, subX, subt, params=params)
else:
pass
return marlik
def get_grad_marlik(self, params, X, t, N=None):
subX, subt = self.sub_sampling(X, t, N)
if self.inf is 'exact':
grad_marlik = inf.exact.get_grad_marlik(self, subX, subt,
params=params)
return grad_marlik
def get_params_bound(self):
if self.lik.num_params != 0:
bound = self.lik.get_params_bound()
if self.prior.mean.num_params != 0:
bound.extend(self.prior.mean.get_params_bound())
if self.prior.cov.num_params != 0:
bound.extend(self.prior.cov.get_params_bound())
return bound
def prepare(self, X, t, params=None):
if params is None:
params = np.copy(self.params)
if self.inf is 'exact':
self.stats = inf.exact.prepare(self, X, t, params)
else:
pass
def get_post_fmean(self, X, Z, params=None):
if params is None:
params = np.copy(self.params)
if self.inf is 'exact':
post_fmu = inf.exact.get_post_fmean(self, X, Z, params)
return post_fmu
def get_post_fcov(self, X, Z, params=None, diag=True):
if params is None:
params = np.copy(self.params)
if self.inf is 'exact':
post_fcov = inf.exact.get_post_fcov(self, X, Z, params, diag)
return post_fcov
def post_sampling(self, X, Z, params=None, N=1, alpha=1):
if params is None:
params = np.copy(self.params)
fmean = self.get_post_fmean(X, Z, params=None)
fcov = self.get_post_fcov(X, Z, params=None, diag=False)
return np.random.multivariate_normal(fmean, fcov * alpha**2, N)
def predict_sampling(self, X, Z, params=None, N=1):
if params is None:
params = np.copy(self.params)
ndata = Z.shape[0]
fmean = self.get_post_fmean(X, Z, params=None)
fcov = self.get_post_fcov(X, Z, params=None, diag=False) \
+ self.lik.get_cov(ndata)
return np.random.multivariate_normal(fmean, fcov, N)
def print_params(self):
print('\n')
if self.lik.num_params != 0:
print('likelihood parameter = ', self.lik.params)
if self.prior.mean.num_params != 0:
print('mean parameter in GP prior: ', self.prior.mean.params)
print('covariance parameter in GP prior: ', self.prior.cov.params)
print('\n')
def get_cand_params(self, X, t):
''' candidate for parameters '''
params = np.zeros(self.num_params)
if self.lik.num_params != 0:
params[0:self.lik.num_params] = self.lik.get_cand_params(t)
temp = self.lik.num_params
if self.prior.mean.num_params != 0:
params[temp:temp + self.prior.mean.num_params] \
= self.prior.mean.get_cand_params(t)
temp += self.prior.mean.num_params
if self.prior.cov.num_params != 0:
params[temp:] = self.prior.cov.get_cand_params(X, t)
return params
def fit(self, X, t, config):
method = config.learning.method
if method == 'adam':
adam = learning.adam(self, config)
params = adam.run(X, t)
if method in ('bfgs', 'batch'):
bfgs = learning.batch(self, config)
params = bfgs.run(X, t)
self.set_params(params)
|
92485
|
import numpy as np
import labels as L
import sys
import tensorflow.contrib.keras as keras
import tensorflow as tf
from keras import backend as K
K.set_learning_phase(0)
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils import to_categorical
from keras.engine import Layer, InputSpec, InputLayer
from keras.models import Model, Sequential, load_model
from keras.layers import Dropout, Embedding, concatenate
from keras.layers import Conv1D, MaxPooling1D, GlobalAveragePooling1D, Conv2D, MaxPool2D, ZeroPadding1D
from keras.layers import Dense, Input, Flatten, BatchNormalization
from keras.layers import Concatenate, Dot, Merge, Multiply, RepeatVector
from keras.layers import Bidirectional, TimeDistributed
from keras.layers import SimpleRNN, LSTM, GRU, Lambda, Permute
from keras.layers.core import Reshape, Activation
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint,EarlyStopping,TensorBoard
from keras.constraints import maxnorm
from keras.regularizers import l2
from keras.metrics import top_k_categorical_accuracy
import keras.metrics
def top_3_accuracy(y_true, y_pred):
return top_k_categorical_accuracy(y_true, y_pred, k=3)
keras.metrics.top_3_accuracy = top_3_accuracy
import pickle
EMBEDDING_DIM = 300
MAX_SEQUENCE_LENGTH = 100
MAX_NUMBER_WORDS = 136085
traces = []
with open('/app/dataset.txt', 'r') as tracesf:
traces = list(tracesf.readlines())
names = [ t.split('|')[0].strip() for t in traces ]
traces = [
(t.split('|')[1].strip(), t.split('|')[2].strip(), t.split('|')[3])
for t in traces
]
labels = [ L.LABELS[t[0]][0] for t in traces ]
texts = [ t[1] for t in traces ]
wrongs = [ t[2].strip() for t in traces ]
for t in traces:
assert len(t) <= MAX_SEQUENCE_LENGTH
tokenizer = None
with open('/app/assets/tokenizer.pkl', 'rb') as wordf:
tokenizer = pickle.load(wordf)
sequences = tokenizer.texts_to_sequences(texts)
word_index = tokenizer.word_index
print('Found %s unique tokens.' % len(word_index))
data = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)
labels = to_categorical(np.asarray(labels), num_classes=L.NUM_LABELS)
print('Shape of data tensor:', data.shape)
print('Shape of label tensor:', labels.shape)
model = load_model('/app/{}'.format(sys.argv[1]))
answers = model.predict(data, batch_size=32)
get = lambda i: [ l[0] for l in L.LABELS.items() if l[1][0] == i ][0]
top1 = 0
top3 = 0
top5 = 0
mistakes3 = 0
mistakes5 = 0
total = 0
for j,answer in enumerate(answers):
goal = get(np.argmax(labels[j]))
print('GOAL == {}:'.format(goal))
idx = 0
total += 1
for r in sorted([ (get(i), answer[i]) for i in range(0, len(answer)) ], key=lambda x: -x[1])[:5]:
idx += 1
print(' {} ({:.2%})'.format(*r))
if r[0] == goal and idx <= 1:
top1 += 1
if r[0] == goal and idx <= 3:
top3 += 1
if r[0] == goal and idx <= 5:
top5 += 1
if r[0] == wrongs[j] and idx <= 3:
mistakes3 += 1
if r[0] == wrongs[j] and idx <= 5:
mistakes5 += 1
print('Accuracy @3: {:.2%} ({}/{})'.format(float(top3)/float(total), top3, total))
print('Accuracy @5: {:.2%} ({}/{})'.format(float(top5)/float(total), top5, total))
print('----')
print('Mistakes @3: {:.2%} ({}/{})'.format(float(mistakes3)/float(total), mistakes3, total))
print('Mistakes @5: {:.2%} ({}/{})'.format(float(mistakes5)/float(total), mistakes5, total))
|
92529
|
from unittest import TestCase
import numpy as np
from source.analysis.performance.curve_performance import ROCPerformance, PrecisionRecallPerformance
class TestROCPerformance(TestCase):
def test_properties(self):
true_positive_rates = np.array([1, 2])
false_positive_rates = np.array([3, 4])
roc_performance = ROCPerformance(true_positive_rates=true_positive_rates,
false_positive_rates=false_positive_rates)
self.assertListEqual(true_positive_rates.tolist(), roc_performance.true_positive_rates.tolist())
self.assertListEqual(false_positive_rates.tolist(), roc_performance.false_positive_rates.tolist())
class TestPRPerformance(TestCase):
def test_properties(self):
precisions = np.array([1, 2])
recalls = np.array([3, 4])
precision_recall_performance = PrecisionRecallPerformance(precisions=precisions,
recalls=recalls)
self.assertListEqual(precisions.tolist(), precision_recall_performance.precisions.tolist())
self.assertListEqual(recalls.tolist(), precision_recall_performance.recalls.tolist())
|
92538
|
from tg_bot.handlers import (
start,
registration,
info,
main_menu,
schedule,
attestation,
suburbans,
editor,
rate,
other_text,
inline_handlers
)
|
92556
|
from __future__ import print_function
import numpy as np
import pandas as pd
import inspect
import os
import time
from . import Model
from . import Utils as U
#------------------------------
#FINDING NEAREST NEIGHBOR
#------------------------------
def mindistance(x,xma,Nx):
distx = 0
mindist = 1000000 * U.PC * U.AU
j = None
for i in range(Nx):
distx = abs(x-xma[i])
if (distx<mindist):
mindist=distx
j=i
return j
#------------------------------
#OVERLAPING SUBMODELS INTO GRID
#------------------------------
def overlap(GRID, submodels = [''], folder = './Subgrids/',
T_min = 30., rho_min = 1.e9,
all = False, radmc3d = False):
func_name = inspect.stack()[0][3]
if folder[-1] != '/': folder = folder + '/'
t0 = time.time()
num=int(np.loadtxt(os.popen("ls -1 %s*.dat| wc -l"%folder)))
data=os.popen("ls -1 %s*.dat"%folder).read().split('\n',num)[:-1]
if all:
names = [name for name in data] # if '_S.' in name or '_MSW' in name]
files = [np.loadtxt(name) for name in names]
else:
submodels = [folder + sub for sub in submodels]
names = [name for name in submodels]
files = [np.loadtxt(name) for name in names]
detected = [tmp.split(folder)[1] for tmp in data]
read = [tmp.split(folder)[1] for tmp in names]
print ("Running function '%s'..."%func_name)
print ('Files detected (%d):'%num, detected,
'\nFiles to merge in grid (%d):'%len(files), read)
NTotal = GRID.NPoints
Nx, Ny, Nz = GRID.Nodes
cm3_to_m3 = 1e6
gamma = 7./5 #Gamma for diatomic molecules
DENS = -1*np.ones(NTotal) #, dtype='float64') * 0.5 # * dens_back
TEMP = np.zeros(NTotal) # * temp_back * dens_back
ab0 = 5e-8
ABUND = np.zeros(NTotal) #np.ones(NTotal) * ab0
gtd0 = 100.
GTD = np.zeros(NTotal) #np.ones(NTotal) * gtd0
VEL = [np.zeros(NTotal),np.zeros(NTotal),np.ones(NTotal)*7e8]
#----------------------
#----------------------
#-------------------
#SUBGRIDS DEFINITION
#-------------------
NFiles = len(files); CountFiles = np.arange(NFiles)
lenFiles = [len(file) for file in files]
dens_tmp = [[{},{}] for i in CountFiles]
temp_tmp = [{} for i in CountFiles]
vel_tmp = [[{} for i in CountFiles] for i in range(3)]
abund_tmp = [{} for i in CountFiles]
gtd_tmp = [{} for i in CountFiles]
hg=0
IDList = [[] for i in CountFiles]
Xgrid, Ygrid, Zgrid = GRID.XYZgrid
for m in range(NFiles):
for n in files[m]:
x,y,z = n[1], n[2], n[3]
i = mindistance(x,Xgrid,Nx)
j = mindistance(y,Ygrid,Ny)
k = mindistance(z,Zgrid,Nz)
Num = i*(Ny)*(Nz)+j*(Nz)+k; #ID for the Global Grid
#if Num in IDList[m]: #Really slow as the size of IDList increases
try:
dens_tmp[m][0][Num] += n[4]
dens_tmp[m][1][Num] += 1
temp_tmp[m][Num] += n[4] * n[5]
vel_tmp[0][m][Num] += n[4] * n[6]
vel_tmp[1][m][Num] += n[4] * n[7]
vel_tmp[2][m][Num] += n[4] * n[8]
abund_tmp[m][Num] += n[4] * n[9]
gtd_tmp[m][Num] += n[4] * n[10]
except KeyError:
#else:
dens_tmp[m][0][Num] = n[4]
dens_tmp[m][1][Num] = 1
temp_tmp[m][Num] = n[4] * n[5]
vel_tmp[0][m][Num] = n[4] * n[6]
vel_tmp[1][m][Num] = n[4] * n[7]
vel_tmp[2][m][Num] = n[4] * n[8]
abund_tmp[m][Num] = n[4] * n[9]
gtd_tmp[m][Num] = n[4] * n[10]
IDList[m].append(Num)
#hg+=1
#if hg%50000 == 0: print (hg)
print ('Finished merging for: %s'%names[m])
print ('Computing combined densities, temperatures, etc....')
for m in range(NFiles):
for ind in IDList[m]:
dens_tot = dens_tmp[m][0][ind]
temp_tmp[m][ind] = temp_tmp[m][ind] / dens_tot
abund_tmp[m][ind] = abund_tmp[m][ind] / dens_tot
gtd_tmp[m][ind]= gtd_tmp[m][ind] / dens_tot
vel_tmp[0][m][ind] = vel_tmp[0][m][ind] / dens_tot
vel_tmp[1][m][ind] = vel_tmp[1][m][ind] / dens_tot
vel_tmp[2][m][ind] = vel_tmp[2][m][ind] / dens_tot
dens_tmp[m][0][ind] = dens_tot / dens_tmp[m][1][ind]
#-------------------
#FOR THE GLOBAL GRID
#-------------------
dens_dum = dens_tmp[m][0][ind]
temp_dum = temp_tmp[m][ind]
vel0_dum = vel_tmp[0][m][ind]
vel1_dum = vel_tmp[1][m][ind]
vel2_dum = vel_tmp[2][m][ind]
abund_dum = abund_tmp[m][ind]
gtd_dum = gtd_tmp[m][ind]
DENS[ind] += dens_dum
TEMP[ind] += dens_dum * temp_dum
VEL[0][ind] += dens_dum * vel0_dum
VEL[1][ind] += dens_dum * vel1_dum
VEL[2][ind] += dens_dum * vel2_dum
ABUND[ind] += dens_dum * abund_dum
GTD[ind] += dens_dum * gtd_dum
TEMP = TEMP / DENS
ABUND = ABUND / DENS
GTD = GTD / DENS
VEL[0] = VEL[0] / DENS
VEL[1] = VEL[1] / DENS
VEL[2] = VEL[2] / DENS
VEL = Model.Struct( **{'x': VEL[0], 'y': VEL[1], 'z': VEL[2]})
ind = np.where(DENS == -1.0)
DENS[ind] = rho_min
ABUND[ind] = ab0 #?
GTD[ind] = gtd0 #?
DENS = np.where(DENS < rho_min, rho_min, DENS)
TEMP = np.where(TEMP == 0., T_min, TEMP)
if radmc3d: Model.Datatab_RADMC3D_FreeFree(DENS,TEMP,GRID)
else: Model.DataTab_LIME(DENS,TEMP,VEL,ABUND,GTD,GRID)
AllProp = Model.Struct( **{'GRID': GRID, 'density': DENS, 'temperature': TEMP, 'vel': VEL, 'abundance': ABUND, 'gtd': GTD})
print ('%s is done!'%func_name)
print ('Ellapsed time: %.3fs' % (time.time() - t0))
print ('-------------------------------------------------\n-------------------------------------------------')
return AllProp
|
92614
|
pkgname = "python-sphinxcontrib-serializinghtml"
pkgver = "1.1.5"
pkgrel = 0
build_style = "python_module"
hostmakedepends = ["python-setuptools"]
checkdepends = ["python-sphinx"]
depends = ["python"]
pkgdesc = "Sphinx extension which outputs serialized HTML document"
maintainer = "q66 <<EMAIL>>"
license = "BSD-2-Clause"
url = "http://sphinx-doc.org"
source = f"$(PYPI_SITE)/s/sphinxcontrib-serializinghtml/sphinxcontrib-serializinghtml-{pkgver}.tar.gz"
sha256 = "aa5f6de5dfdf809ef505c4895e51ef5c9eac17d0f287933eb49ec495280b6952"
# circular checkdepends
options = ["!check"]
def post_install(self):
self.install_license("LICENSE")
|
92629
|
import enum
from typing import Union
@enum.unique
class PriorityStatus(enum.Enum):
HIGHEST = 0
HIGHER = 10
HIGH = 20
NORMAL = 30
LOW = 40
LOWER = 50
LOWEST = 60
class HookBase:
"""HookBase is the base class of all hook and is registered in EngineBase class.
The subclasses of HookBase implement the following six methods according to needed.
::
hook.before_train()
for iter in range(start_iter,max_iter):
hook.before_train_iter()
train.run_train_iter()
hook.after_train_iter()
hook.after_epoch()
hook.after_train()
"""
def __init__(self):
self._level = PriorityStatus.NORMAL
def before_train(self):
pass
def after_train(self):
pass
def before_train_iter(self):
pass
def after_train_iter(self):
pass
def before_train_epoch(self):
pass
def after_train_epoch(self):
pass
@property
def level(self):
return self._level
@level.setter
def level(self, level: Union[PriorityStatus, str, int]):
assert isinstance(level, (PriorityStatus, str, int))
if isinstance(level, PriorityStatus):
self._level = level
else:
level_name = list(PriorityStatus.__members__.keys())
level_value = list(lv.value for lv in PriorityStatus.__members__.values())
if level in level_name or level in level_value:
self._level = PriorityStatus[level.upper()] if isinstance(level_name, str) else PriorityStatus(level)
else:
import logging
logger = logging.getLogger(__name__)
logger.warning(f'because level{level} is not a PriorityStatus type,it will be set as default type')
self._level = PriorityStatus.NORMAL
|
92632
|
from typing import *
import pprint
from onmt.utils.report_manager import ReportMgrBase
from onmt.utils.statistics import Statistics
from seutil import LoggingUtils
class CustomReportMgr(ReportMgrBase):
logger = LoggingUtils.get_logger(__name__)
def __init__(self, report_every, start_time=-1.):
super().__init__(report_every, start_time)
self.training_history: List[dict] = list()
self.step_history: List[dict] = list()
return
def _report_training(self,
step: int,
num_steps: int,
learning_rate: float,
report_stats: Statistics,
):
new_history = {
"step": step,
"learning_rate": learning_rate,
"accuracy": report_stats.accuracy(),
"ppl": report_stats.ppl(),
"xent": report_stats.xent(),
"elapsed_time": report_stats.elapsed_time(),
}
self.logger.info(f"training reported: \n{pprint.pformat(new_history)}")
self.training_history.append(new_history)
return
def _report_step(self,
lr: float,
step: int,
train_stats: Optional[Statistics],
valid_stats: Statistics,
):
new_history = {
"learning_rate": lr,
"step": step,
"accuracy": valid_stats.accuracy(),
"ppl": valid_stats.ppl(),
"xent": valid_stats.xent(),
"elapsed_time": valid_stats.elapsed_time(),
}
self.logger.info(f"step reported: \n{pprint.pformat(new_history)}")
self.step_history.append(new_history)
return
def get_joint_history(self):
if len(self.training_history) != len(self.step_history):
LoggingUtils.log_and_raise(self.logger, f"Cannot join two mismatch history!", Exception)
# end if
joint_history: List[dict] = list()
for idx in range(len(self.training_history)):
if self.training_history[idx]["step"] != self.step_history[idx]["step"]:
LoggingUtils.log_and_raise(self.logger, f"Cannot join two mismatch history!", Exception)
# end if
joint_history.append({
"step": self.training_history[idx]["step"],
"elapsed_time": self.training_history[idx]["elapsed_time"],
"learning_rate": self.training_history[idx]["learning_rate"],
"train_accuracy": self.training_history[idx]["accuracy"],
"train_ppl": self.training_history[idx]["ppl"],
"train_xent": self.training_history[idx]["xent"],
"val_accuracy": self.step_history[idx]["accuracy"],
"val_ppl": self.step_history[idx]["ppl"],
"val_xent": self.step_history[idx]["xent"],
})
# end for
return joint_history
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.