input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
labels for each record in fasta file found in test_dir
using the vowpal-wabbit model. Note that there must be a label file of the
same basename with matching ids for each of the fasta lines.
test_dir (string): must be a path to a directory with a single fasta
and label file
model_dir (string): must be a path to a directory with a vw model file
predict_dir (string):output directory of predictions
Unpacking args:
kmer_length (int): size of k-mers used
ncpus (int): number of cpus to be used
precise (flag): if set, two prediction files will be generated -- one with read
ids and labels that have probabilities above cutoff, other with
label probabilities for strong predictions and three top possible
labels with probabilities for weak predictions
cutoff (float): user-defined probability cut off for precise mode
Returns a tuple with (reffile, predicted_labels_file) for easy input
into evaluate_predictions.
'''
# Unpack args
kmer = args.kmer_length
ncpus = args.ncpus
cutoff = 0.0
if args.precise:
cutoff = args.cutoff
try:
fasta = [os.path.basename(x) for x in glob.glob(os.path.join(test_dir,'*.fasta'))][0]
except IndexError:
raise RuntimeError("Could not find fasta file in:" + test_dir)
safe_makedirs(predict_dir)
starttime = datetime.now()
print(
'''================================================
Predicting using Carnelian + vowpal-wabbit
{:%Y-%m-%d %H:%M:%S}
'''.format(starttime) + '''
k-mer length: {kmer}
------------------------------------------------
Fasta input: {fasta}
------------------------------------------------'''.format(
kmer=kmer,
fasta=fasta))
sys.stdout.flush()
if ncpus > 1:
p=Pool(ncpus)
tmp_path = os.path.join(predict_dir, 'tmp')
safe_makedirs(tmp_path)
infastapath = os.path.join(test_dir, fasta)
splits = sequtil.split_fasta2(infastapath, tmp_path, ncpus, my_env)
#splits = sequtil.split_fasta(infastapath, tmp_path, ncpus)
print(splits)
test_dirlist = [os.path.dirname(x) for x in glob.glob(os.path.join(tmp_path,'*/*.fasta'))]
pred_dirlist = [os.path.join(f,'predict') for f in test_dirlist]
arglist=[model_dir+'|'+test_dirlist[i]+'|'+pred_dirlist[i]+'|'+str(kmer) for i in range(ncpus)]
if args.precise:
arglist = [x+'|'+str(args.precise)+'|'+str(cutoff) for x in arglist]
p.map(predict_unpack, arglist)
filelist = [glob.glob(os.path.join(d,'*.label'))[0] for d in pred_dirlist]
#print(filelist)
prefix = sequtil.merge_files2(filelist,predict_dir,fasta,'label', my_env)
vw_prefix = sequtil.merge_files2(filelist,predict_dir,fasta,'vw', my_env)
if args.precise:
filelist = [glob.glob(os.path.join(d,'*.tsv'))[0] for d in pred_dirlist]
prob_file = sequtil.merge_files2(filelist,predict_dir,fasta,'tsv',my_env)
shutil.rmtree(tmp_path, ignore_errors=True)
else:
prefix = predictOne(model_dir, test_dir, predict_dir, kmer, args.precise, cutoff)
pred_file = os.path.join(os.path.dirname(prefix),fasta.split('.')[0]+'.label')
os.system("mv "+prefix+' '+pred_file)
orig_file = prefix.rsplit('.',1)[0]+'.vw'
vw_file = os.path.join(os.path.dirname(prefix),fasta.split('.')[0]+'.vw')
os.system("mv "+orig_file+' '+vw_file)
if args.precise:
prob_file = os.path.join(os.path.dirname(prefix),fasta.split('.')[0]+'.tsv')
orig_file = os.path.join(os.path.dirname(prefix),'label-probabilities.tsv')
os.system("mv "+orig_file+' '+prob_file)
prefix = pred_file
print('''------------------------------------------------
Predicted labels: {pl}
Total wall clock runtime (sec): {s}
================================================'''.format(
pl=prefix,
s=(datetime.now() - starttime).total_seconds()))
sys.stdout.flush()
return (prefix.strip())
def predict_unpack(args):
s = args.split('|')
model_dir = s[0]
test_dir = s[1]
predict_dir = s[2]
k = int(s[3])
precise = False
cutoff=0.0
if len(s) > 5:
precise = bool(s[4])
cutoff = float(s[5])
predictOne(model_dir, test_dir, predict_dir, k, precise, cutoff)
def predictOne(model_dir, test_dir, predict_dir, kmer, precise, cutoff=0.0):
# Don't need to get labels until eval
#fasta, labels = get_fasta_and_label(test_dir)
try:
fasta = glob.glob(test_dir + "/*.fasta")[0]
except:
raise RuntimeError("No fasta file found in: " + test_dir)
model = get_final_model(model_dir)
dico = os.path.join(model_dir, "vw-dico.txt")
pattern_file = os.path.join(model_dir, "patterns.txt")
safe_makedirs(predict_dir)
prefix = os.path.join(predict_dir, "test.fragments-db")
# get vw predictions
fasta2skm_param_list = ["fasta2skm",
"-i", fasta,
"-k", str(kmer),
"-p", pattern_file]
vw_param_list = ["vw", "-t",
"-i", model,
"-p", prefix + ".preds.vw"]
if precise:
vw_param_list.append("--probabilities")
ps = subprocess.Popen(fasta2skm_param_list, env=my_env,
stdout=subprocess.PIPE)
vwps = subprocess.Popen(vw_param_list, env=my_env,
stdin=ps.stdout, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
while vwps.poll() is None:
l = vwps.stdout.readline()
sys.stdout.write(l)
sys.stdout.flush()
if cutoff > 0:
vw_class_to_label2(prefix + '.preds.vw', dico, prefix + '.preds.label', cutoff)
else:
vw_class_to_label(prefix + '.preds.vw', dico, prefix + '.preds.label')
return (prefix + '.preds.label')
def evaluatePreds(ref_file, pred_file):
'''Evaluates how good a predicted list is compared to a reference gold standard'''
with open(pred_file, "r") as fin:
pred = fin.read().splitlines()
with open(ref_file, "r") as fin:
ref = fin.read().splitlines()
correct = [False for _ in ref]
for i in xrange(len(ref)):
if ref[i] == pred[i]:
correct[i] = True
perf = pd.DataFrame({"pred":pred, "ref":ref, "correct":correct})
tmp = perf.groupby("ref")
recall_i = tmp["correct"].agg(np.mean)
micro_recall = np.mean(correct)
macro_recall = np.mean(recall_i)
median_recall = np.median(recall_i)
tmp = perf.groupby("pred")
precision_i = tmp["correct"].agg(np.mean)
micro_precision = micro_recall
macro_precision = np.mean(precision_i)
median_precision = np.median(precision_i)
micro_f1 = 2*micro_precision*micro_recall/(micro_precision+micro_recall)
f1score_i = 2*recall_i*(precision_i+0.00000001)/(recall_i+precision_i+0.00000001)
macro_f1 = np.mean(f1score_i)
print("micro recall = {:.2f}".format(micro_recall*100))
print("macro recall = {:.2f}".format(macro_recall*100))
print("median recall = {:.2f}".format(median_recall*100))
print("micro precision = {:.2f}".format(micro_precision*100))
print("macro precision = {:.2f}".format(macro_precision*100))
print("median precision = {:.2f}".format(median_precision*100))
print("micro F1 = {:.2f}".format(micro_f1*100))
print("macro F1 = {:.2f}".format(macro_f1*100))
sys.stdout.flush()
return 0
def evaluatePredsPrecise(reffile, predfile):
'''Evaluates how good a predicted list is compared to a reference gold standard'''
with open(predfile, "r") as fin:
pred_l = fin.read().splitlines()
with open(reffile, "r") as fin:
ref = fin.read().splitlines()
pred = ['NA' for _ in ref]
for l in pred_l:
x = l.strip().split('\t')
id = int(x[0]) - 1
pred[id] = x[1]
correct = [False for _ in ref]
for i in xrange(len(ref)):
if ref[i] == pred[i]:
correct[i] = True
perf = pd.DataFrame({"pred":pred, "ref":ref, "correct":correct})
tmp = perf.groupby("ref")
recall_i = tmp["correct"].agg(np.mean)
micro_recall = np.mean(correct)
macro_recall = np.mean(recall_i)
median_recall = np.median(recall_i)
tmp = perf.groupby("pred")
precision_i = tmp["correct"].agg(np.mean)
micro_precision = micro_recall
macro_precision = np.mean(precision_i)
median_precision = np.median(precision_i)
micro_f1 = 2*micro_precision*micro_recall/(micro_precision+micro_recall)
f1score_i = 2*recall_i*precision_i/(recall_i+precision_i)
macro_f1 = np.mean(f1score_i)
print("micro recall = {:.2f}".format(micro_recall*100))
print("macro recall = {:.2f}".format(macro_recall*100))
print("median recall = {:.2f}".format(median_recall*100))
print("micro precision = {:.2f}".format(micro_precision*100))
print("macro precision = {:.2f}".format(macro_precision*100))
print("median precision = {:.2f}".format(median_precision*100))
print("micro F1 = {:.2f}".format(micro_f1*100))
print("macro F1 = {:.2f}".format(macro_f1*100))
sys.stdout.flush()
return 0
def calcAbundance(in_dir, out_dir, mapping_file, gs_file):
'''
Performs abundance estimation on the predicttions in the directory in_dir
and outputs the raw counts and effective counts matrices in directory out_dir.
.mapping file is a tab separated file with three columns: <sample_id,group,fraglen>
in_dir (string): must be a path to an input directory containing the
predicted labels for each sample in its own subdirectory
out_dir (string): must be a path to an output directory
mapping_file (string): must be a path to a tab separated file with three
columns: <sample_id,group,fraglen>. The first line
of the file should contain headers.
gs_file (string): must be a path to a tab-separated input file containing the gold
standard protein labels with average protein lengths under each label
'''
safe_makedirs(out_dir)
starttime = datetime.now()
print('''================================================
Performing abundance estimation with Carnelian and R
Creating raw counts matrix
{:%Y-%m-%d %H:%M:%S}'''.format(starttime))
sys.stdout.flush()
df = createAbundanceMatrix(in_dir, out_dir, gs_file)
counts_file = os.path.join(out_dir,'raw_counts.tsv')
print(" "+counts_file)
command = counts_file + ' ' + gs_file + ' ' + mapping_file + ' ' + out_dir
rscript_path = os.path.dirname(script_loc)+'/scripts/abundance_estimation.R'
print(rscript_path)
os.system('Rscript '+rscript_path+ ' ' + command)
print('''------------------------------------------------
Total wall clock runtime (sec): {}
================================================'''.format(
(datetime.now() - starttime).total_seconds()))
sys.stdout.flush()
return 0
# Carnelian main function
def main(argv):
parser = argparse.ArgumentParser(description='Perform functional binning of metagenomic sequences using gene annotations')
# Shared arguments
#parser.add_argument('--version', action='version', version='%(prog)s {version}'.format(version=__version__))
ncpus_arg = ArgClass('-n', dest='ncpus', default=1, help='Number of parallel processors to be used', type=int)
kmer_arg = ArgClass('-k', dest='kmer_length', default=8, help='length of k-mers used', type=int)
num_hash_arg = ArgClass('--num_hash', dest='num_hash', default=1, help='number of k-mer hashing functions to get features', type=int)
frag_length_arg = ArgClass('-l', dest='frag_length', default=16, help='length of fragments to be drawn from fasta', type=int)
coverage_arg = ArgClass('-c', dest='coverage', default=1.0, help='number/fraction of times a position in a fragment should be covered by the k-mer', type=float)
hierarchical_arg = ArgClass('--hweight', help='intermediate organization of positions chosen in the k-mer in row_weight; should be a multiple of row_weight and a divisor of k-mer length if set', type=int, default=-1)
row_weight_arg = ArgClass('--rweight', help='the number of positions that will be randomly chosen in the contiguous k-mer; k-mer length should be a multiple of row_weight', type=int, default=4)
num_batches_arg = ArgClass('--num-batches', help='Number of times to generate a random batch of training data for VW', type=int, default=1)
num_passes_arg = ArgClass('--num-passes', help='Number of VW passes in each training batch', type=int, default=1)
bits_arg = ArgClass('--bits', help='Number of bits used in VW model', type=int, default=31)
lambda1_arg = ArgClass('--lambda1', help='VW model lambda1 training parameter', type=float, default=0.)
lambda2_arg = ArgClass('--lambda2', help='VW model lambda2 training parameter', type=float, default=0.)
type_arg = ArgClass('--type', dest='type', default='nucleotide', help='Sequence type. "prot" if the reference fasta files are for amino acid sequences. "nucl" if nucleotide sequences. In the latter case the nucleotide sequences will be translated. (default: %(default)s)')
cutoff_arg = ArgClass('--cutoff', help='Probability cutoff for VW predictions', type=float, default=0.)
# Subparsers
subparsers = parser.add_subparsers(help='sub-commands', dest='mode')
# Translation Args
parser_translate = subparsers.add_parser('translate', help='Find genes in the input reads.fasta file and generate all possible ORFs', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser_translate.add_argument('seq_dir', help='Input directory for fasta file to be translated')
parser_translate.add_argument('out_dir', help='Output directory for fasta translated fasta file')
parser_translate.add_argument('fgsp_loc', help='Path where FragGeneScan is installed')
parser_translate.add_argument(*ncpus_arg.args, **ncpus_arg.kwargs)
# Fragmentation Args
parser_frag = subparsers.add_parser('frag', help='Fragment a fasta file into substrings for training/testing', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser_frag.add_argument('test_dir', help='Input directory for fasta and label files to be fragmented')
parser_frag.add_argument('frag_dir', help='Output directory for fasta fragments and corresponding labels')
parser_frag.add_argument(*frag_length_arg.args, **frag_length_arg.kwargs)
parser_frag.add_argument(*coverage_arg.args, **coverage_arg.kwargs)
parser_frag.add_argument(*type_arg.args, **type_arg.kwargs)
# Training Args
parser_train = subparsers.add_parser('train', help='Train a Vowpal Wabbit model using carnelian hash-based features', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser_train.add_argument('train_dir', help='Input directory for training data')
parser_train.add_argument('model_dir', help='Output directory for VW model')
parser_train.add_argument('--precise', help='If set, will train model to output probabilities', action='store_true')
parser_train.add_argument(*frag_length_arg.args, **frag_length_arg.kwargs)
parser_train.add_argument(*coverage_arg.args, **coverage_arg.kwargs)
#parser_train.add_argument(*type_arg.args, **type_arg.kwargs)
parser_train.add_argument(*kmer_arg.args, **kmer_arg.kwargs)
parser_train.add_argument(*num_batches_arg.args, **num_batches_arg.kwargs)
parser_train.add_argument(*num_passes_arg.args, **num_passes_arg.kwargs)
parser_train.add_argument(*num_hash_arg.args, **num_hash_arg.kwargs)
parser_train.add_argument(*row_weight_arg.args, **row_weight_arg.kwargs)
parser_train.add_argument(*hierarchical_arg.args, **hierarchical_arg.kwargs)
parser_train.add_argument(*bits_arg.args, **bits_arg.kwargs)
parser_train.add_argument(*lambda1_arg.args, **lambda1_arg.kwargs)
parser_train.add_argument(*lambda2_arg.args, **lambda2_arg.kwargs)
parser_retrain = subparsers.add_parser("retrain", help="Incrementally retrain an existing | |
<gh_stars>10-100
import asyncio
import discord
import os
import time
from discord.ext import commands
from discord.ext.commands import BucketType
from humanfriendly import format_timespan as timeez
from data import dbconn
from utils import challonge_api, paginator, discord_, tournament_helper
from constants import BOT_INVITE, GITHUB_LINK, SERVER_INVITE, ADMIN_PRIVILEGE_ROLES
MAX_REGISTRANTS = 256
class Tournament(commands.Cog):
def __init__(self, client):
self.client = client
self.db = dbconn.DbConn()
self.api = challonge_api.ChallongeAPI(self.client)
def make_tournament_embed(self, ctx):
desc = "Information about Tournament related commands! **[use .tournament <command>]**\n\n"
handle = self.client.get_command('tournament')
for cmd in handle.commands:
desc += f"`{cmd.name}`: **{cmd.brief}**\n"
embed = discord.Embed(description=desc, color=discord.Color.dark_magenta())
embed.set_author(name="Lockout commands help", icon_url=ctx.me.avatar_url)
embed.set_footer(
text="Use the prefix . before each command. For detailed usage about a particular command, type .help <command>")
embed.add_field(name="GitHub repository", value=f"[GitHub]({GITHUB_LINK})",
inline=True)
embed.add_field(name="Bot Invite link",
value=f"[Invite]({BOT_INVITE})",
inline=True)
embed.add_field(name="Support Server", value=f"[Server]({SERVER_INVITE})",
inline=True)
return embed
@commands.group(brief='Commands related to tournaments! Type .tournament for more details', invoke_without_command=True, aliases=['tourney'])
async def tournament(self, ctx):
await ctx.send(embed=self.make_tournament_embed(ctx))
@tournament.command(name="faq", brief="FAQ")
async def faq(self, ctx):
data = [["What formats are supported?",
"The bot currently supports 3 types of formats: Single Elimination, Double Elimination and Swiss"],
["Where will the tournament be held?",
"The tournament will be held on [Challonge](https://challonge.com). The bot will automatically setup the tournament and update brackets."],
["How to setup a tournament?",
"To setup a tournament type `.tournament setup x y` where x is an integer in the range 0 to 2 (Single Elim, Double Elim, Swiss) denoting tournament type and y is the name of the tournament."],
["How to register/unregister for the tournament?",
"To register type `.tournament register` and to unregister type `.tournament unregister`. Admins can forcefully unregister a user by typing `.tournament _unregister <handle>`."],
["How to start the tournament?",
"To start the tournament, type `.tournament begin`"],
["How to compete once the tournament has started?",
"To compete, use the `.round` command and challenge your opponent. The bot will automatically ask you if you want the result of the round to be counted in the tournament and update the tournament bracket once the round is complete."],
["What if my opponent doesn't show up or leaves the server without completing the matches?",
"You can ask an admin to use the command `.tournament forcewin <handle>` where handle is the winners codeforces handle."],
["What if the bot accidentally gives victory to the wrong user?",
"You can ask an admin to invalidate the match results by typing `.tournament match_invalidate x` where x is match number (can be accessed from challonge page of the tournament). This will also reset the subsequent matches whose result depends on this match"]]
embed = discord.Embed(description='\n\n'.join([f':small_red_triangle_down: **{x[0]}**\n:white_small_square: {x[1]}' for x in data]), color=discord.Color.dark_green())
embed.set_author(name="Frequently Asked Questions about tournaments")
await ctx.send(embed=embed)
@tournament.command(name="setup", brief="Setup a tournament.")
async def setup(self, ctx, tournament_type: int, *, tournament_name: str):
"""
**tournament_name:** Alpha-numeric string (Max 50 characters)
**tournament_type:** Integer (0: single elimination, 1: double elimination, 2: swiss)
"""
if not discord_.has_admin_privilege(ctx):
await discord_.send_message(ctx, f"{ctx.author.mention} you require 'manage server' permission or one of the "
f"following roles: {', '.join(ADMIN_PRIVILEGE_ROLES)} to use this command")
return
if len(tournament_name) not in range(1, 51):
await discord_.send_message(ctx, "The tournament name should be 50 character max")
return
if any([not ch.isalnum() and ch != ' ' for ch in tournament_name]):
await discord_.send_message(ctx, "The tournament name should contain only alpha-numeric characters")
return
if tournament_type not in range(0, 3):
await discord_.send_message(ctx, "Tournament type should be either 0, 1 or 2. (0: single elimination, 1: double elimination, 2: swiss)")
return
if self.db.get_tournament_info(ctx.guild.id):
await discord_.send_message(ctx, "A tournament is already in progress in this server!")
return
self.db.add_tournament(ctx.guild.id, tournament_name, tournament_type, 0, "-", 0)
types = ["Single Elimination", "Double Elimination", "Swiss"]
desc = f"""
Initialised a {types[tournament_type]} tournament.
To register, type `.tournament register` (Max registrations: **{MAX_REGISTRANTS}**)
To unregister, type `.tournament unregister`
To start the tournament, type `.tournament begin`
"""
embed = discord.Embed(description=desc, color=discord.Color.green())
embed.set_author(name=tournament_name)
await ctx.send(embed=embed)
@tournament.command(name="register", brief="Register for the tournament")
async def register(self, ctx):
tournament_info = self.db.get_tournament_info(ctx.guild.id)
if not tournament_info:
await discord_.send_message(ctx, "There is no ongoing tournament in the server currently")
return
if tournament_info.status != 0:
await discord_.send_message(ctx, "The tournament has already begun")
return
if not self.db.get_handle(ctx.guild.id, ctx.author.id):
await discord_.send_message(ctx, "Your handle is not set, set your handle first and try again")
return
handle_info = self.db.get_handle_info(ctx.guild.id, ctx.author.id)
registrants = self.db.get_registrants(ctx.guild.id)
if ctx.author.id in [x.discord_id for x in registrants]:
await discord_.send_message(ctx, "You have already registered for the tournament")
return
if handle_info[2] in [x.handle for x in registrants]:
await discord_.send_message(ctx, f"Someone has already registered for the tournament with handle `{handle_info[2]}`")
return
if len(registrants) == MAX_REGISTRANTS:
await discord_.send_message(ctx, "The tournament has already reached its max registrants limit!")
return
self.db.add_registrant(ctx.guild.id, ctx.author.id, handle_info[2], handle_info[3], 0)
await ctx.send(embed=discord.Embed(description=f"Successfully registered for the tournament. `{MAX_REGISTRANTS-len(registrants)-1}` slots left.",
color=discord.Color.green()))
@tournament.command(name="unregister", brief="Unregister from the tournament")
async def unregister(self, ctx):
tournament_info = self.db.get_tournament_info(ctx.guild.id)
if not tournament_info:
await discord_.send_message(ctx, "There is no ongoing tournament in the server currently")
return
if tournament_info.status != 0:
await discord_.send_message(ctx, "The tournament has already begun")
return
registrants = self.db.get_registrants(ctx.guild.id)
if ctx.author.id not in [x.discord_id for x in registrants]:
await discord_.send_message(ctx, "You have not registered for the tournament")
return
self.db.remove_registrant(ctx.guild.id, ctx.author.id)
await ctx.send(embed=discord.Embed(
description=f"Successfully unregistered from the tournament. `{MAX_REGISTRANTS - len(registrants) + 1}` slots left.",
color=discord.Color.green()))
@tournament.command(name="_unregister", brief="Forcefully unregister someone from the tournament")
async def _unregister(self, ctx, *, handle: str):
if not discord_.has_admin_privilege(ctx):
await discord_.send_message(ctx, f"{ctx.author.mention} you require 'manage server' permission or one of the "
f"following roles: {', '.join(ADMIN_PRIVILEGE_ROLES)} to use this command")
return
tournament_info = self.db.get_tournament_info(ctx.guild.id)
if not tournament_info:
await discord_.send_message(ctx, "There is no ongoing tournament in the server currently")
return
if tournament_info.status != 0:
await discord_.send_message(ctx, "The tournament has already begun")
return
registrants = self.db.get_registrants(ctx.guild.id)
res = self.db.remove_registrant_by_handle(ctx.guild.id, handle)
if not res:
await discord_.send_message(ctx, f"The user with handle `{handle}` has not registered for the tournament")
return
await ctx.send(embed=discord.Embed(
description=f"Successfully unregistered from the tournament. `{MAX_REGISTRANTS - len(registrants) + 1}` slots left.",
color=discord.Color.green()))
@tournament.command(name="registrants", brief="View the list of users who have registered the tournament")
async def registrants(self, ctx):
registrants = self.db.get_registrants(ctx.guild.id)
if not registrants:
await discord_.send_message(ctx, "No registrations yet")
return
await paginator.Paginator([[str(i+1), registrants[i].handle, str(registrants[i].rating)] for i in range(len(registrants))], ["S No.", "Handle", "Rating"], "Registrants for the Lockout tournament", 15).paginate(ctx, self.client)
@tournament.command(name="info", brief="Get basic information about the tournament")
async def info(self, ctx):
tournament_info = self.db.get_tournament_info(ctx.guild.id)
if not tournament_info:
await discord_.send_message(ctx, "There is no ongoing tournament in the server currently")
return
desc = ""
desc += f"**Tournament name**: {tournament_info.name}\n"
desc += f"**Tournament type**: {['Single Elimination', 'Double Elimination', 'Swiss'][tournament_info.type]}\n"
desc += f"**Registrations**: {len(self.db.get_registrants(ctx.guild.id))}\n"
desc += f"**Challonge link**: {'Tournament not started yet' if tournament_info.status == 0 else f'[link](https://challonge.com/{tournament_info.url})'}"
embed = discord.Embed(description=desc, color=discord.Color.dark_orange())
embed.set_author(name="Lockout tournament details")
await ctx.send(embed=embed)
@tournament.command(name="begin", brief="Begin the tournament", aliases=['start'])
@commands.cooldown(1, 120, BucketType.guild)
async def begin(self, ctx):
if not discord_.has_admin_privilege(ctx):
await discord_.send_message(ctx, f"{ctx.author.mention} you require 'manage server' permission or one of the "
f"following roles: {', '.join(ADMIN_PRIVILEGE_ROLES)} to use this command")
return
tournament_info = self.db.get_tournament_info(ctx.guild.id)
if not tournament_info:
await discord_.send_message(ctx, "There is no ongoing tournament in the server currently")
return
if tournament_info.status == 2:
await discord_.send_message(ctx, f"The tournament has already begun! Type `.tournament matches` or `.tournament info` to view details about the tournament")
return
registrants = self.db.get_registrants(ctx.guild.id)
if not registrants or len(registrants) < 2:
await discord_.send_message(ctx, "Not enough registrants for the tournament yet")
return
logging_channel = await self.client.fetch_channel(os.environ.get("LOGGING_CHANNEL"))
if tournament_info.status == 0:
resp = await discord_.get_time_response(self.client, ctx, "Are you sure you want to start the tournament? No new registrations will be allowed once the tournament has started. Type `1` for yes and `0` for no", 30, ctx.author, [0, 1])
if not resp[0] or resp[1] == 0:
ctx.command.reset_cooldown(ctx)
return
await ctx.send(f"Setting up tournament...")
tournament_resp = await self.api.add_tournament(tournament_info)
if not tournament_resp or 'errors' in tournament_resp:
ctx.command.reset_cooldown(ctx)
await discord_.send_message(ctx, "Some error occurred, try again later")
if tournament_resp and 'errors' in tournament_resp:
await logging_channel.send(f"Error in tournament setup: {ctx.guild.id} {tournament_resp['errors']}")
return
# api takes some time to register tournament id
await asyncio.sleep(5)
await ctx.send(f"Adding participants...")
participants_resp = await self.api.bulk_add_participants(tournament_resp['tournament']['id'], [{"name": f"{registrants[i].handle} ({registrants[i].rating})", "seed": i+1} for i in range(len(registrants))])
if not participants_resp or 'errors' in participants_resp:
ctx.command.reset_cooldown(ctx)
await discord_.send_message(ctx, "Some error occurred, try again later")
if participants_resp and 'errors' in participants_resp:
await logging_channel.send(f"Error in bulk | |
# -----------------------------------------------------------------------------
#
# Copyright 2013-2019 lispers.net - <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----------------------------------------------------------------------------
#
# lisp.py
#
# This file contains all constants, definitions, data structures, packet
# send and receive functions for the LISP protocol according to RFC 6830.
#
#------------------------------------------------------------------------------
if 64 - 64: i11iIiiIii
import socket
import time
import struct
import binascii
import hmac
import hashlib
import datetime
import os
import sys
import random
import threading
import operator
import netifaces
import platform
import Queue
import traceback
from Crypto . Cipher import AES
import ecdsa
import json
import commands
import copy
import chacha
import poly1305
from geopy . distance import vincenty
import curve25519
use_chacha = ( os . getenv ( "LISP_USE_CHACHA" ) != None )
use_poly = ( os . getenv ( "LISP_USE_POLY" ) != None )
if 65 - 65: O0 / iIii1I11I1II1 % OoooooooOO - i1IIi
if 73 - 73: II111iiii
if 22 - 22: I1IiiI * Oo0Ooo / OoO0O00 . OoOoOO00 . o0oOOo0O0Ooo / I1ii11iIi11i
if 48 - 48: oO0o / OOooOOo / I11i / Ii1I
lisp_print_rloc_probe_list = False
if 48 - 48: iII111i % IiII + I1Ii111 / ooOoO0o * Ii1I
if 46 - 46: ooOoO0o * I11i - OoooooooOO
if 30 - 30: o0oOOo0O0Ooo - O0 % o0oOOo0O0Ooo - OoooooooOO * O0 * OoooooooOO
if 60 - 60: iIii1I11I1II1 / i1IIi * oO0o - I1ii11iIi11i + o0oOOo0O0Ooo
if 94 - 94: i1IIi % Oo0Ooo
if 68 - 68: Ii1I / O0
lisp_hostname = ""
lisp_version = ""
lisp_uptime = ""
lisp_i_am_core = False
lisp_i_am_itr = False
lisp_i_am_etr = False
lisp_i_am_rtr = False
lisp_i_am_mr = False
lisp_i_am_ms = False
lisp_i_am_ddt = False
lisp_log_id = ""
lisp_debug_logging = True
if 46 - 46: O0 * II111iiii / IiII * Oo0Ooo * iII111i . I11i
lisp_map_notify_queue = { }
lisp_map_servers_list = { }
lisp_ddt_map_requestQ = { }
lisp_db_list = [ ]
lisp_group_mapping_list = { }
lisp_map_resolvers_list = { }
lisp_rtr_list = { }
lisp_elp_list = { }
lisp_rle_list = { }
lisp_geo_list = { }
lisp_json_list = { }
lisp_myrlocs = [ None , None , None ]
lisp_mymacs = { }
if 62 - 62: i11iIiiIii - II111iiii % I1Ii111 - iIii1I11I1II1 . I1ii11iIi11i . II111iiii
if 61 - 61: oO0o / OoOoOO00 / iII111i * OoO0O00 . II111iiii
if 1 - 1: II111iiii - I1ii11iIi11i % i11iIiiIii + IiII . I1Ii111
if 55 - 55: iIii1I11I1II1 - I1IiiI . Ii1I * IiII * i1IIi / iIii1I11I1II1
if 79 - 79: oO0o + I1Ii111 . ooOoO0o * IiII % I11i . I1IiiI
lisp_myinterfaces = { }
lisp_iid_to_interface = { }
lisp_multi_tenant_interfaces = [ ]
if 94 - 94: iII111i * Ii1I / IiII . i1IIi * iII111i
lisp_test_mr_timer = None
lisp_rloc_probe_timer = None
if 47 - 47: i1IIi % i11iIiiIii
if 20 - 20: ooOoO0o * II111iiii
if 65 - 65: o0oOOo0O0Ooo * iIii1I11I1II1 * ooOoO0o
if 18 - 18: iIii1I11I1II1 / I11i + oO0o / Oo0Ooo - II111iiii - I11i
lisp_registered_count = 0
if 1 - 1: I11i - OOooOOo % O0 + I1IiiI - iII111i / I11i
if 31 - 31: OoO0O00 + II111iiii
if 13 - 13: OOooOOo * oO0o * I1IiiI
if 55 - 55: II111iiii
lisp_info_sources_by_address = { }
lisp_info_sources_by_nonce = { }
if 43 - 43: OoOoOO00 - i1IIi + I1Ii111 + Ii1I
if 17 - 17: o0oOOo0O0Ooo
if 64 - 64: Ii1I % i1IIi % OoooooooOO
if 3 - 3: iII111i + O0
if 42 - 42: OOooOOo / i1IIi + i11iIiiIii - Ii1I
if 78 - 78: OoO0O00
lisp_crypto_keys_by_nonce = { }
lisp_crypto_keys_by_rloc_encap = { }
lisp_crypto_keys_by_rloc_decap = { }
lisp_data_plane_security = False
lisp_search_decap_keys = True
if 18 - 18: O0 - iII111i / iII111i + ooOoO0o % ooOoO0o - IiII
lisp_data_plane_logging = False
lisp_frame_logging = False
lisp_flow_logging = False
if 62 - 62: iII111i - IiII - OoOoOO00 % i1IIi / oO0o
if 77 - 77: II111iiii - II111iiii . I1IiiI / o0oOOo0O0Ooo
if 14 - 14: I11i % O0
if 41 - 41: i1IIi + I1Ii111 + OOooOOo - IiII
if 77 - 77: Oo0Ooo . IiII % ooOoO0o
if 42 - 42: oO0o - i1IIi / i11iIiiIii + OOooOOo + OoO0O00
if 17 - 17: oO0o . Oo0Ooo . I1ii11iIi11i
lisp_crypto_ephem_port = None
if 3 - 3: OoOoOO00 . Oo0Ooo . I1IiiI / Ii1I
if 38 - 38: II111iiii % i11iIiiIii . ooOoO0o - OOooOOo + Ii1I
if 66 - 66: OoooooooOO * OoooooooOO . OOooOOo . i1IIi - OOooOOo
if 77 - 77: I11i - iIii1I11I1II1
lisp_pitr = False
if 82 - 82: i11iIiiIii . OOooOOo / Oo0Ooo * O0 % oO0o % iIii1I11I1II1
if 78 - 78: iIii1I11I1II1 - Ii1I * OoO0O00 + o0oOOo0O0Ooo + iII111i + iII111i
if 11 - 11: iII111i - OoO0O00 % ooOoO0o % iII111i / OoOoOO00 - OoO0O00
if 74 - 74: iII111i * O0
lisp_l2_overlay = False
if 89 - 89: oO0o + Oo0Ooo
if 3 - 3: i1IIi / I1IiiI % I11i * i11iIiiIii / O0 * I11i
if 49 - 49: oO0o % Ii1I + i1IIi . I1IiiI % I1ii11iIi11i
if 48 - 48: I11i + I11i / II111iiii / iIii1I11I1II1
if 20 - 20: o0oOOo0O0Ooo
lisp_rloc_probing = False
lisp_rloc_probe_list = { }
if 77 - 77: OoOoOO00 / I11i
if 98 - 98: iIii1I11I1II1 / i1IIi / i11iIiiIii / o0oOOo0O0Ooo
if 28 - 28: OOooOOo - IiII . IiII + OoOoOO00 - OoooooooOO + O0
if 95 - 95: OoO0O00 % oO0o . O0
if 15 - 15: ooOoO0o / Ii1I . Ii1I - i1IIi
if 53 - 53: IiII + I1IiiI * oO0o
lisp_register_all_rtrs = True
if 61 - 61: i1IIi * OOooOOo / OoooooooOO . i11iIiiIii . OoOoOO00
if 60 - 60: I11i / I11i
if 46 - 46: Ii1I * OOooOOo - OoO0O00 * oO0o - I1Ii111
if 83 - 83: OoooooooOO
lisp_nonce_echoing = False
lisp_nonce_echo_list = { }
if 31 - 31: II111iiii - OOooOOo . I1Ii111 % OoOoOO00 - O0
if 4 - 4: II111iiii / ooOoO0o . iII111i
if 58 - 58: OOooOOo * i11iIiiIii / OoOoOO00 % I1Ii111 - I1ii11iIi11i / oO0o
if 50 - 50: I1IiiI
lisp_nat_traversal = False
if 34 - 34: I1IiiI * II111iiii % iII111i * OoOoOO00 - I1IiiI
if 33 - 33: o0oOOo0O0Ooo + OOooOOo * OoO0O00 - Oo0Ooo / oO0o % Ii1I
if 21 - 21: OoO0O00 * iIii1I11I1II1 % oO0o * i1IIi
if 16 - 16: O0 - I1Ii111 * iIii1I11I1II1 + iII111i
if 50 - 50: II111iiii - ooOoO0o * I1ii11iIi11i / I1Ii111 + o0oOOo0O0Ooo
if 88 - 88: Ii1I / I1Ii111 + iII111i - II111iiii / ooOoO0o - OoOoOO00
if 15 - 15: I1ii11iIi11i + OoOoOO00 - OoooooooOO / OOooOOo
if 58 - 58: i11iIiiIii % I11i
lisp_program_hardware = False
if 71 - 71: OOooOOo + ooOoO0o % i11iIiiIii + I1ii11iIi11i - IiII
if 88 - 88: OoOoOO00 - OoO0O00 % OOooOOo
if 16 - 16: I1IiiI * oO0o % IiII
if 86 - 86: I1IiiI + Ii1I % i11iIiiIii * oO0o . ooOoO0o * I11i
lisp_checkpoint_map_cache = False
lisp_checkpoint_filename = "./lisp.checkpoint"
if 44 - 44: oO0o
if 88 - 88: I1Ii111 % Ii1I . II111iiii
if 38 - 38: o0oOOo0O0Ooo
if 57 - 57: O0 / oO0o * I1Ii111 / OoOoOO00 . II111iiii
lisp_ipc_data_plane = False
lisp_ipc_dp_socket = None
lisp_ipc_dp_socket_name = "lisp-ipc-data-plane"
if 26 - 26: iII111i
if 91 - 91: OoO0O00 . I1ii11iIi11i + OoO0O00 - iII111i / OoooooooOO
if 39 - 39: I1ii11iIi11i / ooOoO0o - II111iiii
if 98 - 98: I1ii11iIi11i / I11i % oO0o . OoOoOO00
if 91 - 91: oO0o % Oo0Ooo
lisp_ipc_lock = None
if 64 - 64: I11i % iII111i - I1Ii111 - oO0o
if 31 - 31: I11i - II111iiii . I11i
if 18 - 18: o0oOOo0O0Ooo
if 98 - 98: iII111i * iII111i / iII111i + I11i
if 34 - 34: ooOoO0o
if 15 - 15: I11i * ooOoO0o * Oo0Ooo % i11iIiiIii % OoOoOO00 - OOooOOo
lisp_default_iid = 0
lisp_default_secondary_iid = 0
if 68 - 68: I1Ii111 % i1IIi . IiII . I1ii11iIi11i
if 92 - 92: iII111i . I1Ii111
if 31 - 31: I1Ii111 . OoOoOO00 / O0
if 89 - 89: OoOoOO00
if 68 - 68: OoO0O00 * OoooooooOO % O0 + OoO0O00 + ooOoO0o
lisp_ms_rtr_list = [ ]
if 4 - 4: ooOoO0o + O0 * OOooOOo
if 55 - 55: Oo0Ooo + iIii1I11I1II1 / OoOoOO00 * oO0o - i11iIiiIii - Ii1I
if 25 - 25: I1ii11iIi11i
if 7 - 7: | |
# Copyright (C) 2016 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
import unittest
import logging
try:
import mock # Python 2
except ImportError:
from unittest import mock # Python 3
from nose.tools import ok_, eq_, raises
from ryu.lib.packet.bgp import BGPPathAttributeOrigin
from ryu.lib.packet.bgp import BGPPathAttributeAsPath
from ryu.lib.packet.bgp import BGP_ATTR_ORIGIN_IGP
from ryu.lib.packet.bgp import BGP_ATTR_TYPE_ORIGIN
from ryu.lib.packet.bgp import BGP_ATTR_TYPE_AS_PATH
from ryu.lib.packet.bgp import BGP_ATTR_TYPE_EXTENDED_COMMUNITIES
from ryu.lib.packet.bgp import IPAddrPrefix
from ryu.lib.packet.bgp import IP6AddrPrefix
from ryu.lib.packet.bgp import EvpnArbitraryEsi
from ryu.lib.packet.bgp import EvpnLACPEsi
from ryu.lib.packet.bgp import EvpnEthernetAutoDiscoveryNLRI
from ryu.lib.packet.bgp import EvpnMacIPAdvertisementNLRI
from ryu.lib.packet.bgp import EvpnInclusiveMulticastEthernetTagNLRI
from ryu.lib.packet.bgp import FlowSpecIPv4NLRI
from ryu.lib.packet.bgp import BGPPathAttributeExtendedCommunities
from ryu.services.protocols.bgp.bgpspeaker import EVPN_MAX_ET
from ryu.services.protocols.bgp.bgpspeaker import ESI_TYPE_LACP
from ryu.services.protocols.bgp.bgpspeaker import FLOWSPEC_FAMILY_IPV4
from ryu.services.protocols.bgp.bgpspeaker import FLOWSPEC_FAMILY_VPNV4
from ryu.services.protocols.bgp.bgpspeaker import FLOWSPEC_TA_SAMPLE
from ryu.services.protocols.bgp.bgpspeaker import FLOWSPEC_TA_TERMINAL
from ryu.services.protocols.bgp.core import BgpCoreError
from ryu.services.protocols.bgp.core_managers import table_manager
from ryu.services.protocols.bgp.rtconf.vrfs import VRF_RF_IPV4
from ryu.services.protocols.bgp.rtconf.vrfs import VRF_RF_IPV6
from ryu.services.protocols.bgp.rtconf.vrfs import VRF_RF_L2_EVPN
from ryu.services.protocols.bgp.rtconf.vrfs import VRF_RF_IPV4_FLOWSPEC
from ryu.services.protocols.bgp.utils.bgp import create_v4flowspec_actions
LOG = logging.getLogger(__name__)
class Test_TableCoreManager(unittest.TestCase):
"""
Test case for bgp.core_managers.table_manager.TableCoreManager
"""
@mock.patch(
'ryu.services.protocols.bgp.core_managers.TableCoreManager.__init__',
mock.MagicMock(return_value=None))
def _test_update_vrf_table(self, prefix_inst, route_dist, prefix_str,
next_hop, route_family, route_type,
is_withdraw=False, **kwargs):
# Instantiate TableCoreManager
tbl_mng = table_manager.TableCoreManager(None, None)
vrf_table_mock = mock.MagicMock()
tbl_mng._tables = {(route_dist, route_family): vrf_table_mock}
# Test
tbl_mng.update_vrf_table(
route_dist=route_dist,
prefix=prefix_str,
next_hop=next_hop,
route_family=route_family,
route_type=route_type,
is_withdraw=is_withdraw,
**kwargs)
# Check
call_args_list = vrf_table_mock.insert_vrf_path.call_args_list
ok_(len(call_args_list) == 1) # insert_vrf_path should be called once
args, kwargs = call_args_list[0]
ok_(len(args) == 0) # no positional argument
eq_(str(prefix_inst), str(kwargs['nlri']))
eq_(is_withdraw, kwargs['is_withdraw'])
if is_withdraw:
eq_(None, kwargs['next_hop'])
eq_(False, kwargs['gen_lbl'])
else:
eq_(next_hop, kwargs['next_hop'])
eq_(True, kwargs['gen_lbl'])
def test_update_vrf_table_ipv4(self):
# Prepare test data
route_dist = '65000:100'
ip_network = '192.168.0.0'
ip_prefix_len = 24
prefix_str = '%s/%d' % (ip_network, ip_prefix_len)
prefix_inst = IPAddrPrefix(ip_prefix_len, ip_network)
next_hop = '10.0.0.1'
route_family = VRF_RF_IPV4
route_type = None # should be ignored
kwargs = {} # should be ignored
self._test_update_vrf_table(prefix_inst, route_dist, prefix_str,
next_hop, route_family, route_type,
**kwargs)
def test_update_vrf_table_ipv6(self):
# Prepare test data
route_dist = '65000:100'
ip_network = 'fe80::'
ip_prefix_len = 64
prefix_str = '%s/%d' % (ip_network, ip_prefix_len)
prefix_inst = IP6AddrPrefix(ip_prefix_len, ip_network)
next_hop = 'fe80::0011:aabb:ccdd:eeff'
route_family = VRF_RF_IPV6
route_type = None # should be ignored
kwargs = {} # should be ignored
self._test_update_vrf_table(prefix_inst, route_dist, prefix_str,
next_hop, route_family, route_type,
**kwargs)
def test_update_vrf_table_l2_evpn_with_esi_int(self):
# Prepare test data
route_dist = '65000:100'
prefix_str = None # should be ignored
kwargs = {
'ethernet_tag_id': 100,
'mac_addr': 'aa:bb:cc:dd:ee:ff',
'ip_addr': '192.168.0.1',
'mpls_labels': [], # not be used
}
esi = EvpnArbitraryEsi(b'\x00\x00\x00\x00\x00\x00\x00\x00\x00')
prefix_inst = EvpnMacIPAdvertisementNLRI(
route_dist=route_dist,
esi=esi,
**kwargs)
next_hop = '10.0.0.1'
route_family = VRF_RF_L2_EVPN
route_type = EvpnMacIPAdvertisementNLRI.ROUTE_TYPE_NAME
kwargs['esi'] = 0
self._test_update_vrf_table(prefix_inst, route_dist, prefix_str,
next_hop, route_family, route_type,
**kwargs)
def test_update_vrf_table_l2_evpn_with_esi_dict(self):
# Prepare test data
route_dist = '65000:100'
prefix_str = None # should be ignored
kwargs = {
'ethernet_tag_id': EVPN_MAX_ET,
}
esi = EvpnLACPEsi(mac_addr='aa:bb:cc:dd:ee:ff', port_key=100)
prefix_inst = EvpnEthernetAutoDiscoveryNLRI(
route_dist=route_dist,
esi=esi,
**kwargs)
next_hop = '0.0.0.0'
route_family = VRF_RF_L2_EVPN
route_type = EvpnEthernetAutoDiscoveryNLRI.ROUTE_TYPE_NAME
kwargs['esi'] = {
'type': ESI_TYPE_LACP,
'mac_addr': 'aa:bb:cc:dd:ee:ff',
'port_key': 100,
}
self._test_update_vrf_table(prefix_inst, route_dist, prefix_str,
next_hop, route_family, route_type,
**kwargs)
def test_update_vrf_table_l2_evpn_without_esi(self):
# Prepare test data
route_dist = '65000:100'
prefix_str = None # should be ignored
kwargs = {
'ethernet_tag_id': 100,
'ip_addr': '192.168.0.1',
}
prefix_inst = EvpnInclusiveMulticastEthernetTagNLRI(
route_dist=route_dist, **kwargs)
next_hop = '10.0.0.1'
route_family = VRF_RF_L2_EVPN
route_type = EvpnInclusiveMulticastEthernetTagNLRI.ROUTE_TYPE_NAME
self._test_update_vrf_table(prefix_inst, route_dist, prefix_str,
next_hop, route_family, route_type,
**kwargs)
@mock.patch(
'ryu.services.protocols.bgp.core_managers.TableCoreManager.__init__',
mock.MagicMock(return_value=None))
def test_update_vrf_table_l2_evpn_with_vni(self):
# Prepare test data
route_dist = '65000:100'
prefix_str = None # should be ignored
kwargs = {
'ethernet_tag_id': 100,
'mac_addr': 'aa:bb:cc:dd:ee:ff',
'ip_addr': '192.168.0.1',
'vni': 500,
}
esi = EvpnArbitraryEsi(b'\x00\x00\x00\x00\x00\x00\x00\x00\x00')
prefix_inst = EvpnMacIPAdvertisementNLRI(
route_dist=route_dist,
esi=esi,
**kwargs)
next_hop = '10.0.0.1'
route_family = VRF_RF_L2_EVPN
route_type = EvpnMacIPAdvertisementNLRI.ROUTE_TYPE_NAME
tunnel_type = 'vxlan'
kwargs['esi'] = 0
# Instantiate TableCoreManager
tbl_mng = table_manager.TableCoreManager(None, None)
vrf_table_mock = mock.MagicMock()
tbl_mng._tables = {(route_dist, route_family): vrf_table_mock}
# Test
tbl_mng.update_vrf_table(
route_dist=route_dist,
prefix=prefix_str,
next_hop=next_hop,
route_family=route_family,
route_type=route_type,
tunnel_type=tunnel_type,
**kwargs)
# Check
call_args_list = vrf_table_mock.insert_vrf_path.call_args_list
ok_(len(call_args_list) == 1) # insert_vrf_path should be called once
args, kwargs = call_args_list[0]
ok_(len(args) == 0) # no positional argument
eq_(str(prefix_inst), str(kwargs['nlri']))
eq_(next_hop, kwargs['next_hop'])
eq_(False, kwargs['gen_lbl']) # should not generate MPLS labels
eq_(tunnel_type, kwargs['tunnel_type'])
def test_update_vrf_table_ipv4_withdraw(self):
# Prepare test data
route_dist = '65000:100'
ip_network = '192.168.0.0'
ip_prefix_len = 24
prefix_str = '%s/%d' % (ip_network, ip_prefix_len)
prefix_inst = IPAddrPrefix(ip_prefix_len, ip_network)
next_hop = '10.0.0.1'
route_family = VRF_RF_IPV4
route_type = None # should be ignored
kwargs = {} # should be ignored
self._test_update_vrf_table(prefix_inst, route_dist, prefix_str,
next_hop, route_family, route_type,
is_withdraw=True, **kwargs)
@raises(BgpCoreError)
@mock.patch(
'ryu.services.protocols.bgp.core_managers.TableCoreManager.__init__',
mock.MagicMock(return_value=None))
def test_update_vrf_table_no_vrf(self):
# Prepare test data
route_dist = '65000:100'
ip_network = '192.168.0.0'
ip_prefix_len = 24
prefix_str = '%s/%d' % (ip_network, ip_prefix_len)
next_hop = '10.0.0.1'
route_family = VRF_RF_IPV4
route_type = None # should be ignored
kwargs = {} # should be ignored
# Instantiate TableCoreManager
tbl_mng = table_manager.TableCoreManager(None, None)
tbl_mng._tables = {} # no table
# Test
tbl_mng.update_vrf_table(
route_dist=route_dist,
prefix=prefix_str,
next_hop=next_hop,
route_family=route_family,
route_type=route_type,
**kwargs)
@raises(BgpCoreError)
def test_update_vrf_table_invalid_next_hop(self):
# Prepare test data
route_dist = '65000:100'
ip_network = '192.168.0.0'
ip_prefix_len = 24
prefix_str = '%s/%d' % (ip_network, ip_prefix_len)
prefix_inst = IPAddrPrefix(ip_prefix_len, ip_network)
next_hop = 'xxx.xxx.xxx.xxx' # invalid
route_family = VRF_RF_IPV4
route_type = None # should be ignored
kwargs = {} # should be ignored
self._test_update_vrf_table(prefix_inst, route_dist, prefix_str,
next_hop, route_family, route_type,
**kwargs)
@raises(BgpCoreError)
def test_update_vrf_table_invalid_ipv4_prefix(self):
# Prepare test data
route_dist = '65000:100'
ip_network = 'xxx.xxx.xxx.xxx' # invalid
ip_prefix_len = 24
prefix_str = '%s/%d' % (ip_network, ip_prefix_len)
prefix_inst = IPAddrPrefix(ip_prefix_len, ip_network)
next_hop = '10.0.0.1'
route_family = VRF_RF_IPV4
route_type = None # should be ignored
kwargs = {} # should be ignored
self._test_update_vrf_table(prefix_inst, route_dist, prefix_str,
next_hop, route_family, route_type,
**kwargs)
@raises(BgpCoreError)
def test_update_vrf_table_invalid_ipv6_prefix(self):
# Prepare test data
route_dist = '65000:100'
ip_network = 'xxxx::' # invalid
ip_prefix_len = 64
prefix_str = '%s/%d' % (ip_network, ip_prefix_len)
prefix_inst = IP6AddrPrefix(ip_prefix_len, ip_network)
next_hop = 'fe80::0011:aabb:ccdd:eeff'
route_family = VRF_RF_IPV6
route_type = None # should be ignored
kwargs = {} # should be ignored
self._test_update_vrf_table(prefix_inst, route_dist, prefix_str,
next_hop, route_family, route_type,
**kwargs)
@raises(BgpCoreError)
def test_update_vrf_table_invalid_route_family(self):
# Prepare test data
route_dist = '65000:100'
ip_network = '192.168.0.0'
ip_prefix_len = 24
prefix_str = '%s/%d' % (ip_network, ip_prefix_len)
prefix_inst = IPAddrPrefix(ip_prefix_len, ip_network)
next_hop = '10.0.0.1'
route_family = 'foobar' # invalid
route_type = None # should be ignored
kwargs = {} # should be ignored
self._test_update_vrf_table(prefix_inst, route_dist, prefix_str,
next_hop, route_family, route_type,
**kwargs)
@mock.patch(
'ryu.services.protocols.bgp.core_managers.TableCoreManager.__init__',
mock.MagicMock(return_value=None))
@mock.patch(
'ryu.services.protocols.bgp.core_managers.TableCoreManager.learn_path')
def _test_update_global_table(self, learn_path_mock, prefix, next_hop,
is_withdraw, expected_next_hop):
# Prepare test data
origin = BGPPathAttributeOrigin(BGP_ATTR_ORIGIN_IGP)
aspath = BGPPathAttributeAsPath([[]])
pathattrs = OrderedDict()
pathattrs[BGP_ATTR_TYPE_ORIGIN] = origin
pathattrs[BGP_ATTR_TYPE_AS_PATH] = aspath
pathattrs = str(pathattrs)
# Instantiate TableCoreManager
tbl_mng = table_manager.TableCoreManager(None, None)
# Test
tbl_mng.update_global_table(
prefix=prefix,
next_hop=next_hop,
is_withdraw=is_withdraw,
)
# Check
call_args_list = learn_path_mock.call_args_list
ok_(len(call_args_list) == 1) # learn_path should be called once
args, kwargs = call_args_list[0]
ok_(len(kwargs) == 0) # no keyword argument
output_path = args[0]
eq_(None, output_path.source)
eq_(prefix, output_path.nlri.prefix)
eq_(pathattrs, str(output_path.pathattr_map))
eq_(expected_next_hop, output_path.nexthop)
eq_(is_withdraw, output_path.is_withdraw)
def test_update_global_table_ipv4(self):
self._test_update_global_table(
prefix='192.168.0.0/24',
next_hop='10.0.0.1',
is_withdraw=False,
expected_next_hop='10.0.0.1',
)
def test_update_global_table_ipv4_withdraw(self):
self._test_update_global_table(
prefix='192.168.0.0/24',
next_hop='10.0.0.1',
is_withdraw=True,
expected_next_hop='10.0.0.1',
)
def test_update_global_table_ipv4_no_next_hop(self):
self._test_update_global_table(
prefix='192.168.0.0/24',
next_hop=None,
is_withdraw=True,
expected_next_hop='0.0.0.0',
)
def test_update_global_table_ipv6(self):
self._test_update_global_table(
prefix='fe80::/64',
next_hop='fe80::0011:aabb:ccdd:eeff',
is_withdraw=False,
expected_next_hop='fe80::0011:aabb:ccdd:eeff',
)
def test_update_global_table_ipv6_withdraw(self):
self._test_update_global_table(
prefix='fe80::/64',
next_hop='fe80::0011:aabb:ccdd:eeff',
is_withdraw=True,
expected_next_hop='fe80::0011:aabb:ccdd:eeff',
)
def test_update_global_table_ipv6_no_next_hop(self):
self._test_update_global_table(
prefix='fe80::/64',
next_hop=None,
is_withdraw=True,
expected_next_hop='::',
)
@mock.patch(
'ryu.services.protocols.bgp.core_managers.TableCoreManager.__init__',
mock.MagicMock(return_value=None))
def _test_update_flowspec_vrf_table(self, flowspec_family, route_family,
route_dist, rules, prefix,
is_withdraw, actions=None):
# Instantiate TableCoreManager
tbl_mng = table_manager.TableCoreManager(None, None)
vrf_table_mock = mock.MagicMock()
tbl_mng._tables = {(route_dist, route_family): vrf_table_mock}
# Test
tbl_mng.update_flowspec_vrf_table(
flowspec_family=flowspec_family,
route_dist=route_dist,
rules=rules,
actions=actions,
is_withdraw=is_withdraw,
)
# Check
call_args_list = vrf_table_mock.insert_vrffs_path.call_args_list
ok_(len(
call_args_list) == 1) # insert_vrffs_path should be called once
args, kwargs = call_args_list[0]
ok_(len(args) == 0) # no positional argument
eq_(prefix, kwargs['nlri'].prefix)
eq_(is_withdraw, kwargs['is_withdraw'])
def test_update_flowspec_vrf_table_vpnv4(self):
flowspec_family = 'vpnv4fs'
route_family = 'ipv4fs'
route_dist = '65001:100'
rules = {
'dst_prefix': '10.70.1.0/24',
}
actions = {
'traffic_rate': {
'as_number': 0,
'rate_info': 100.0,
},
}
prefix = 'ipv4fs(dst_prefix:10.70.1.0/24)'
self._test_update_flowspec_vrf_table(
flowspec_family=flowspec_family,
route_family=route_family,
route_dist=route_dist,
rules=rules,
prefix=prefix,
is_withdraw=False,
actions=actions,
)
def test_update_flowspec_vrf_table_vpnv4_without_actions(self):
flowspec_family = 'vpnv4fs'
route_family = 'ipv4fs'
route_dist = '65001:100'
rules = {
'dst_prefix': '10.70.1.0/24',
}
prefix = 'ipv4fs(dst_prefix:10.70.1.0/24)'
self._test_update_flowspec_vrf_table(
flowspec_family=flowspec_family,
route_family=route_family,
route_dist=route_dist,
rules=rules,
prefix=prefix,
is_withdraw=False,
)
@raises(BgpCoreError)
def test_update_flowspec_vrf_table_vpnv4_invalid_actions(self):
flowspec_family = | |
normal target encoding when given test data.
Parameters
----------
X : pandas DataFrame of shape (n_samples, n_columns)
Independent variable matrix with columns to encode
Returns
-------
pandas DataFrame
Input DataFrame with transformed columns
"""
# Use target encoding from fit() if this is test data
if y is None:
return self._target_encoder.transform(X)
# Compute means for each fold
self._train_ix = []
self._test_ix = []
self._fit_tes = []
kf = KFold(n_splits=self.n_splits, shuffle=self.shuffle)
for train_ix, test_ix in kf.split(X):
self._train_ix.append(train_ix)
self._test_ix.append(test_ix)
te = TargetEncoder(cols=self.cols)
self._fit_tes.append(te.fit(X.iloc[train_ix,:], y.iloc[train_ix]))
# Apply means across folds
Xo = X.copy()
for ix in range(len(self._test_ix)):
test_ix = self._test_ix[ix]
Xo.iloc[test_ix,:] = self._fit_tes[ix].transform(X.iloc[test_ix,:])
# Return transformed DataFrame
return Xo
def fit_transform(self, X, y=None):
"""Fit and transform the data with cross-fold target encoding.
Parameters
----------
X : pandas DataFrame of shape (n_samples, n_columns)
Independent variable matrix with columns to encode
y : pandas Series of shape (n_samples,)
Dependent variable values.
Returns
-------
pandas DataFrame
Input DataFrame with transformed columns
"""
return self.fit(X, y).transform(X, y)
class TargetEncoderLOO(BaseEstimator, TransformerMixin):
"""Leave-one-out target encoder.
Replaces category values in categorical column(s) with the mean target
(dependent variable) value for each category, using a leave-one-out
strategy such that no sample's target value is used in computing the
target mean which is used to replace that sample's category value.
Can also optionally use a Bayesian estimation of the sample's mean target
value, which sets a prior to the average of all encoding values, with the
strength of that prior proportional to the ``bayesian_c`` parameter.
"""
def __init__(self, cols=None, dtype='float64', nocol=None,
bayesian_c=None):
"""Leave-one-out target encoder.
Parameters
----------
cols : str or list of str
Column(s) to target encode. Default is to target encode all
categorical columns in the DataFrame.
dtype : str
Datatype to use for encoded columns. Default = 'float64'
bayesian_c : float
Prior strength (C) for the Bayesian average
https://en.wikipedia.org/wiki/Bayesian_average
nocol : None or str
Action to take if a col in ``cols`` is not in the dataframe to
transform. Valid values:
* None - ignore cols in ``cols`` which are not in dataframe
* 'warn' - issue a warning when a column is not in dataframe
* 'err' - raise an error when a column is not in dataframe
"""
# Check types
if cols is not None and not isinstance(cols, (list, str)):
raise TypeError('cols must be None, or a list or a string')
if isinstance(cols, list):
if not all(isinstance(c, str) for c in cols):
raise TypeError('each element of cols must be a string')
if not isinstance(dtype, str):
raise TypeError('dtype must be a string (e.g. \'float64\'')
if nocol is not None and nocol not in ('warn', 'err'):
raise ValueError('nocol must be None, \'warn\', or \'err\'')
if bayesian_c is not None and not isinstance(bayesian_c, (float, int)):
raise TypeError('bayesian_c must be None or float or int')
# Store parameters
if isinstance(cols, str):
self.cols = [cols]
else:
self.cols = cols
self.dtype = dtype
self.nocol = nocol
if isinstance(bayesian_c, int):
self.bayesian_c = float(bayesian_c)
else:
self.bayesian_c = bayesian_c
self.overall_mean = None
def fit(self, X, y):
"""Fit leave-one-out target encoder to X and y.
Parameters
----------
X : pandas DataFrame of shape (n_samples, n_columns)
Independent variable matrix with columns to encode
y : pandas Series of shape (n_samples,)
Dependent variable values.
Returns
-------
TargetEncoderLOO
Returns self, the fit object.
"""
# Encode all categorical cols by default
if self.cols is None:
self.cols = [col for col in X if str(X[col].dtype)=='object']
# Check columns are in X
if self.nocol == 'err':
for col in self.cols:
if col not in X:
raise ValueError('Column \''+col+'\' not in X')
elif self.nocol == 'warn':
for col in self.cols:
if col not in X:
print('Column \''+col+'\' not in X')
# Compute the overall mean
self.overall_mean = np.mean(y)
# Encode each element of each column
self.sum_count = dict()
for col in self.cols:
self.sum_count[col] = dict()
uniques = X[col].dropna().unique()
for unique in uniques:
ix = X[col]==unique
self.sum_count[col][unique] = (y[ix].sum(),ix.sum())
# Return the fit object
return self
def transform(self, X, y=None):
"""Perform the target encoding transformation.
Uses leave-one-out target encoding when given training data, and uses
normal target encoding when given test data.
Parameters
----------
X : pandas DataFrame of shape (n_samples, n_columns)
Independent variable matrix with columns to encode
Returns
-------
pandas DataFrame
Input DataFrame with transformed columns
"""
# Create output dataframe
Xo = X.copy()
# Bayesian C value
if self.bayesian_c is not None:
C = self.bayesian_c
Cm = C*self.overall_mean
# Use means from training data if passed test data
if y is None:
for col in self.sum_count:
vals = np.full(X.shape[0], np.nan)
for cat, sum_count in self.sum_count[col].items():
if self.bayesian_c is None:
vals[X[col]==cat] = sum_count[0]/sum_count[1]
else: #use bayesian mean
vals[X[col]==cat] = (Cm+sum_count[0])/(C+sum_count[1])
Xo[col] = vals
# LOO target encode each column if this is training data
else:
for col in self.sum_count:
vals = np.full(X.shape[0], np.nan)
for cat, sum_count in self.sum_count[col].items():
ix = X[col]==cat
if sum_count[1]<2:
vals[ix] = np.nan
else:
if self.bayesian_c is None:
vals[ix] = (sum_count[0]-y[ix])/(sum_count[1]-1)
else: #use Bayesian mean
vals[ix] = ((Cm+sum_count[0]-y[ix])
/(C+sum_count[1]-1))
Xo[col] = vals
# Return encoded DataFrame
return Xo
def fit_transform(self, X, y=None):
"""Fit and transform the data with leave-one-out target encoding.
Parameters
----------
X : pandas DataFrame of shape (n_samples, n_columns)
Independent variable matrix with columns to encode
y : pandas Series of shape (n_samples,)
Dependent variable values.
Returns
-------
pandas DataFrame
Input DataFrame with transformed columns
"""
return self.fit(X, y).transform(X, y)
class MultiTargetEncoderLOO(BaseEstimator, TransformerMixin):
"""Leave-one-out target encoder which handles multiple classes per sample.
Replaces category values in categorical column(s) with the mean target
(dependent variable) value for each category, using a leave-one-out
strategy such that no sample's target value is used in computing the
target mean which is used to replace that sample's category value.
Can also optionally use a Bayesian estimation of the sample's mean target
value, which sets a prior to the average of all encoding values, with the
strength of that prior proportional to the ``bayesian_c`` parameter.
Parameters
----------
cols : str or list of str
Column(s) to target encode. Default is to target encode all
categorical columns in the DataFrame.
dtype : str
Datatype to use for encoded columns. Default = 'float64'
bayesian_c : float
Prior strength (C) for the Bayesian average
https://en.wikipedia.org/wiki/Bayesian_average
sep : str
Separator string which delimits the labels
nocol : None or str
Action to take if a col in ``cols`` is not in the dataframe to
transform. Valid values:
* None - (default) ignore cols which aren't in dataframe
* 'warn' - issue a warning when a column is not in dataframe
* 'err' - raise an error when a column is not in dataframe
"""
def __init__(self, cols=None, dtype='float64', nocol=None,
bayesian_c=0.0, sep=','):
# Check types
if cols is not None and not isinstance(cols, (list, str)):
raise TypeError('cols must be None, or a list or a string')
if isinstance(cols, list):
if not all(isinstance(c, str) for c in cols):
raise TypeError('each element of cols must be a string')
if not isinstance(dtype, str):
raise TypeError('dtype must be a string (e.g. \'float64\'')
if nocol is not None and nocol not in ('warn', 'err'):
raise ValueError('nocol must be None, \'warn\', or \'err\'')
if not isinstance(bayesian_c, (float, int)):
raise TypeError('bayesian_c must be float or int')
if not isinstance(sep, str):
raise TypeError('sep must be a str')
# Store parameters
if isinstance(cols, str):
self.cols = [cols]
else:
self.cols = cols
self.dtype = dtype
self.nocol = nocol
self.bayesian_c = float(bayesian_c)
self.sep = sep
self.overall_mean = None
def fit(self, X, y):
"""Fit leave-one-out target encoder to X and y.
Parameters
----------
X : pandas DataFrame of shape (n_samples, n_columns)
Independent variable matrix with columns to encode
y : pandas Series of shape (n_samples,)
Dependent variable values.
Returns
-------
MultiTargetEncoderLOO
Returns self, the fit object.
"""
# Encode all categorical cols | |
unknowns, parallel_verbosity)
if report_level >= 1:
print('\nSummaries for empirical parameter distributions\n-----------------------------------------------')
print(pandas.DataFrame(repeated_estimates).describe().T)
if report_level >= 2:
_runtimes_min = [result[1]['runtime_min'] for result in results]
print(f'\nAverage runtime per estimation job was {numpy.mean(_runtimes_min):.2f} +/- {numpy.std(_runtimes_min, ddof=1):.2f} min')
return repeated_estimates, results
def get_sensitivities(self,
measurements:List[Measurement]=None, responses:list='all', parameters:list=None,
tfinal:float=None, abs_h:float=None, rel_h:float=1e-3,
handle_CVodeError:bool=True, verbosity_CVodeError:bool=False,
) -> List[Sensitivity]:
"""
Approximates sensitivities of model responses w.r.t. parameters using Central Difference Quotient: f'(x) = f(x+h) - f(x-h) / (2*h).
Indicates how a model response (i.e., a state or observation) changes dynamically in time
with a small change in a certain parameters (i.e., a model parameter, initial value, or observation parameter).
Keyword arguments
-----------------
measurements : List[Measurement]
Can provide a Measurement object for any model state or observation.
Default is None, which implies that `tfinal` cannot be None.
responses : list
Specific model responses (state or observable), for which the sensitivities are requested.
Default is `all´, which causes sensitivities for all model responses.
parameters (list or dict, default=None) : The parameters for which the sensitivities are requested.
In case a dict is provided, the corresponding values will be set.
In case a list is provided, the corresponding values for the current mapping will be used.
tfinal : float
The final integration time.
Default is None, which implies that `measurements` cannot be None.
abs_h : float
Absolute perturbation for central difference quotient. `rel_h` must be set to None for `abs_h` to take effect.
Default is None.
rel_h : float
Relative pertubation for central difference quotient. Overrides use of abs_h.
Absolute perturbation for each parametric sensitivity is then calculated according to: abs_h = rel_h * max(1, |p|).
Default is 1e-3.
handle_CVodeError : bool
Catches CVodeError raised by the solver, in order to not interrupt the estimations for toxic parameter values.
Default is True.
verbosity_CVodeError : bool
Enables informative output during handling CVodeErrors. Default is False.
Returns
-------
sensitivities : List[Sensitivities]
Raises
------
TypeError
Wrong type for kwarg `responses`.
ValueError
Non-unique (case-insensitive) respones given.
ValueError
Non-unique (case-insensitive) parameters given.
ValueError
Given parameters are not known according to the current parameter mapping.
ValueError
Neither measurements dict nor tfinal is provided.
TypeError
Wrong type for kwarg `parameters`.
TypeError
A list containing not only Measurement objects is provided.
"""
if not isinstance(responses, list) and responses != 'all':
raise TypeError('Responses must be either of type list or `all`')
if responses != 'all':
if not Helpers.has_unique_ids(responses):
raise ValueError(Messages.non_unique_ids)
if measurements is not None:
for _item in measurements:
if not isinstance(_item, Measurement):
raise TypeError(f'Must provide a list of Measurement objects: {_item} in {measurements}')
# timepoints for integration
t = numpy.array([])
if measurements is not None:
t = numpy.append(t, Helpers.get_unique_timepoints(measurements))
if tfinal is not None:
tfinal = numpy.array(tfinal)
if t.size == 0:
t = numpy.append(t, tfinal)
elif tfinal > max(t):
t = numpy.append(t, tfinal)
if t.size == 0:
raise ValueError('Must provide either measurements or tfinal')
if t.size == 1:
_simulations = self.simulate(t=t, verbosity=50)
t = Helpers.get_unique_timepoints(_simulations)
# set parameters if provided
_parameter_names = self._get_valid_parameter_names()
if parameters is not None:
if not Helpers.has_unique_ids(parameters):
raise ValueError(Messages.non_unique_ids)
if not set(parameters).issubset(set(_parameter_names)):
raise ValueError(f'Invalid parameters: {set(parameters).difference(set(_parameter_names))}. Valid parameters are: {_parameter_names}.')
if isinstance(parameters, dict):
self.set_parameters(parameters)
elif isinstance(parameters, list):
_parameters = self._get_all_parameters()
parameters = {p : _parameters[p] for p in parameters}
else:
parameters = self._get_all_parameters()
sensitivities = []
for _id in self.replicate_ids:
sensitivities.extend(self._get_sensitivities_parallel(_id, parameters, rel_h, abs_h, t, responses))
return sensitivities
def get_information_matrix(self,
measurements:List[Measurement], estimates:dict,
sensitivities:List[Sensitivity]=None, handle_CVodeError:bool=True, verbosity_CVodeError:bool=False,
) -> numpy.ndarray:
"""
Constructs Fisher information matrix (FIM) by calculating a FIM at each distinct timepoint where at least one measurement was made.
The time-varying FIMs are added up to the FIM. FIM(t) are build using sensitivities, which are approximated using the central difference quotient method.
FIM is of shape (n_estimated_parameters, n_estimated_parameters), with parameters sorted alphabetically (case-insensitive).
Non-intertible FIM indicates that parameter(s) cannot be identified from the given measurements.
Arguments
---------
measurements : List[Measurement]
Can provide a Measurement object for any model state or observation.
estimates : dict
The parameters (model parameters, initial values, observation parameters) that have been estimated previously.
Keyword arguments
-----------------
sensitivities : List[Sensitivity]
These may have been calculated previously using the method `get_sensitivities`.
Default is None, which causes calculation of sensitivities.
handle_CVodeError : bool
Catches CVodeError raised by the solver, in order to not interrupt the estimations for toxic parameter values.
Default is True.
verbosity_CVodeError : bool
Enables informative output during handling CVodeErrors. Default is False.
Returns
-------
FIM : numpy.ndarray
Fisher information matrix is of shape (n_estimated_parameters, n_estimated_parameters),
with values of rows and cols corresponding to the parameters (sorted alphabetically case-insensitive).
Raises
------
TypeError
A list containing not only Measurement objects is provided.
TypeError
A list containing not only Sensitivity objects is provided.
"""
for _item in measurements:
if not isinstance(_item, Measurement):
raise TypeError('Must provide a list of Measurement objects')
if sensitivities is None:
sensitivities = self.get_sensitivities(
measurements=measurements,
parameters=estimates,
handle_CVodeError=handle_CVodeError,
)
else:
for _item in sensitivities:
if not isinstance(_item, Sensitivity):
raise TypeError('Must provide a list of Sensitivity objects')
all_t = Helpers.get_unique_timepoints(measurements)
FIMs = {}
FIM = numpy.full(shape=(len(estimates), len(estimates)), fill_value=0)
for _id in self.replicate_ids:
FIMs[_id] = []
for _t in all_t:
_FIM_t = self._get_information_matrix_at_t(t=_t, measurements=measurements, estimates=estimates, sensitivities=sensitivities, replicate_id=_id)
FIMs[_id].append(_FIM_t)
FIM = FIM + _FIM_t
return FIM
def get_parameter_uncertainties(self,
estimates:dict,
measurements:List[Measurement],
sensitivities:List[Sensitivity]=None,
report_level:int=0,
handle_CVodeError:bool=True,
verbosity_CVodeError:bool=True,
) -> dict:
"""
Calculates uncertainties for estimated parameters, based on variance-covariance matrix derived from sensitivity-based Fisher information matrix.
NOTE: The parameter variance-covariance matrix represents a symmetric, linear approximation to the parameter (co)-variances.
Other methods such a Monte-Carlo sampling can discover non-linear correlations, but require significant computational load.
Arguments
---------
estimates : dict
Dictionary holding the previously estimated parameter values.
measurements : List[Measurement]
The measurements from which the parameters have been estimated.
Keyword arguments
-----------------
sensitivities : List[Sensitivity]
These may have been calculated previously using the method `get_sensitivities`.
Default is None, which causes calculation of sensitivities.
report_level : int
Controls depth of informative output, default is 0 which is no output.
handle_CVodeError : bool
Catches CVodeError raised by the solver, in order to not interrupt the estimations for toxic parameter values.
Default is True.
verbosity_CVodeError : bool
Enables informative output during handling CVodeErrors. Default is False.
Returns
-------
parameter_information : dict
A dictionary summarizing the parameters, their values and standard errors.
Raises
------
TypeError
A list containing not only Measurement objects is provided.
TypeError
A list containing not only Sensitivity objects is provided.
"""
for _item in measurements:
if not isinstance(_item, Measurement):
raise TypeError('Must provide a list of Measurement objects')
if sensitivities is None:
sensitivities = self.get_sensitivities(
measurements=measurements,
parameters=estimates,
handle_CVodeError=handle_CVodeError,
verbosity_CVodeError=verbosity_CVodeError,
)
else:
for _item in sensitivities:
if not isinstance(_item, Sensitivity):
raise TypeError('Must provide a list of Sensitivity objects')
matrices = self.get_parameter_matrices(measurements=measurements, estimates=estimates, sensitivities=sensitivities)
std_errs = numpy.sqrt(numpy.diag(matrices['Cov']))
if report_level>=1:
print('\nEstimated parameters:\n----------')
for _p, _err in zip(sorted(estimates.keys(), key=str.lower), std_errs):
print(f'{_p}: {estimates[_p]:.2e} +/- {_err:.2e} ({abs(_err/estimates[_p]*100):.2f} %)')
parameter_information = {}
parameter_information['Parameters'] = sorted(estimates.keys(), key=str.lower)
parameter_information['Values'] = numpy.array([estimates[_p] for _p in sorted(estimates.keys(), key=str.lower)])
parameter_information['StdErrs'] = std_errs
return parameter_information
def get_optimality_criteria(self, Cov:numpy.ndarray, report_level:int=0) -> dict:
"""
Calculates single-value optimality criteria from a parameter variance-covariance matrix.
Arguments
---------
Cov : numpy.ndarray
The parameter covariance matrix for the estimated parameters.
Keyword arguments
-----------------
report_level : int
Controls informative output on optimality criteria.
Default is 0, which is no print output.
Returns
-------
opt_criteria : dict
The calculated optimality criteria.
"""
criteria = ['A', 'D', 'E', 'E_mod']
cov_evaluator = CovOptimality()
opt_criteria = {_criterion : cov_evaluator.get_value(_criterion, Cov) for _criterion in criteria}
if report_level >=1:
print('\nOptimality criteria:\n----------')
for _criterion in criteria:
print(f'{_criterion}: {opt_criteria[_criterion]:.2e}')
return opt_criteria
def get_parameter_matrices(self,
estimates:dict,
measurements:List[Measurement],
sensitivities:List[Sensitivity]=None,
handle_CVodeError:bool=True,
) -> Dict[str, numpy.ndarray]:
"""
Calculate Fisher information matrix FIM, as well as corresponding variance-covariance matrix Cov and correlation matrix Corr.
Arguments
---------
estimates : dict
Dictionary | |
"""
This module contains methods used our implementation of the Asynchronously
Parallel Optimization Solver for finding Multiple Minima (APOSMM) method
described in detail in the paper
`https://doi.org/10.1007/s12532-017-0131-4 <https://doi.org/10.1007/s12532-017-0131-4>`_
"""
from __future__ import division
from __future__ import absolute_import
__all__ = ['aposmm_logic','initialize_APOSMM', 'decide_where_to_start_localopt', 'update_history_dist']
import sys, os, traceback
import numpy as np
# import scipy as sp
from scipy.spatial.distance import cdist
from mpi4py import MPI
from numpy.lib.recfunctions import merge_arrays
from math import log, gamma, pi, sqrt
from petsc4py import PETSc
import nlopt
def aposmm_logic(H,persis_info,gen_specs,_):
"""
APOSMM as a libEnsemble generation function. Coordinates multiple local
optimization runs, starting from points which do not have a better point
nearby them. This generation function produces/requires the following
fields in ``H``:
- ``'x' [n floats]``: Parameters being optimized over
- ``'x_on_cube' [n floats]``: Parameters scaled to the unit cube
- ``'f' [float]``: Objective function being minimized
- ``'local_pt' [bool]``: True if point from a local optimization run, false if it is a sample point
- ``'dist_to_unit_bounds' [float]``: Distance to domain boundary
- ``'dist_to_better_l' [float]``: Distance to closest better local optimization point
- ``'dist_to_better_s' [float]``: Distance to closest better sample optimization point
- ``'ind_of_better_l' [int]``: Index of point ``'dist_to_better_l``' away
- ``'ind_of_better_s' [int]``: Index of point ``'dist_to_better_s``' away
- ``'started_run' [bool]``: True if point has started a local optimization run
- ``'num_active_runs' [int]``: Counts number of non-terminated local runs the point is in
- ``'local_min' [float]``: True if point has been ruled a local minima
and optionally
- ``'priority' [float]``: Value quantifying a point's desirability
- ``'f_i' [float]``: Value of ith objective component (if calculated one at a time)
- ``'fvec' [m floats]``: All objective components (if calculated together)
- ``'obj_component' [int]``: Index corresponding to value in ``'f_i``'
- ``'pt_id' [int]``: Identify the point
When using libEnsemble to do individual objective component evaluations,
APOSMM will return ``gen_specs['components']`` copies of each point, but
each component=0 version of the point will only be considered when
- deciding where to start a run,
- best nearby point,
- storing the order of the points is the run
- storing the combined objective function value
- etc
Necessary quantities in ``gen_specs`` are:
- ``'lb' [n floats]``: Lower bound on search domain
- ``'ub' [n floats]``: Upper bound on search domain
- ``'initial_sample_size' [int]``: Number of uniformly sampled points that must be returned (with a non-nan value) before a local optimization run is started.
- ``'localopt_method' [str]``: Name of an NLopt or PETSc/TAO method
Optional ``gen_specs`` entries are:
- ``'sample_points' [int]``: The points to be sampled (in the original domain)
- ``'combine_component_func' [func]``: Function to combine objective components
- ``'components' [int]``: Number of objective components
- ``'dist_to_bound_multiple' [float in (0,1]]``: What fraction of the distance to the nearest boundary should the initial step size be in localopt runs
- ``'high_priority_to_best_localopt_runs': [bool]``: True if localopt runs with smallest observed function value are given priority
- ``'lhs_divisions' [int]``: Number of Latin hypercube sampling partitions (0 or 1 results in uniform sampling)
- ``'min_batch_size' [int]``: Lower bound on the number of points given every time APOSMM is called
- ``'mu' [float]``: Distance from the boundary that all localopt starting points must satisfy
- ``'nu' [float]``: Distance from identified minima that all starting points must satisfy
- ``'single_component_at_a_time' [bool]``: True if single objective components will be evaluated at a time
- ``'rk_const' [float]``:
And ``gen_specs`` convergence tolerances for NLopt and PETSc/TAO:
- ``'fatol' [float]``:
- ``'ftol_abs' [float]``:
- ``'ftol_rel' [float]``:
- ``'gatol' [float]``:
- ``'grtol' [float]``:
- ``'xtol_abs' [float]``:
- ``'xtol_rel' [float]``:
:Note:
``gen_specs['combine_component_func']`` must be defined when there are
multiple objective components.
:Note:
APOSMM critically uses ``persis_info`` to store information about
active runs, order of points in each run, etc. The allocation function
must ensure it's always given.
:See:
``libensemble/tests/regression_tests/test_branin_aposmm.py``
for basic APOSMM usage.
:See:
``libensemble/tests/regression_tests/test_chwirut_aposmm_one_residual_at_a_time.py``
for an example of APOSMM coordinating multiple local optimization runs
for an objective with more than one component.
"""
"""
Description of intermediate variables in aposmm_logic:
n: domain dimension
c_flag: True if giving libEnsemble individual components of fvec to evaluate. (Note if c_flag is True, APOSMM will only use the com
n_s: the number of complete evaluations (not just component evaluations)
updated_inds: indices of H that have been updated (and so all their information must be sent back to libE manager to update)
O: new points to be sent back to the history
x_new: when re-running a local opt method to get the next point: stores the first new point requested by a local optimization method
pt_in_run: when re-running a local opt method to get the next point: counts function evaluations to know when a new point is given
total_pts_in_run: when re-running a local opt method to get the next point: total evaluations in run to be incremented
starting_inds: indices where a runs should be started.
active_runs: indices of active local optimization runs (currently saved to disk between calls to APOSMM)
sorted_run_inds: indices of the considered run (in the order they were requested by the localopt method)
x_opt: the reported minimum from a localopt run (disregarded unless exit_code isn't 0)
exit_code: 0 if a new localopt point has been found, otherwise it's the NLopt/POUNDERS code
samples_needed: counts the number of additional uniformly drawn samples needed
"""
n, n_s, c_flag, O, rk_const, lhs_divisions, mu, nu = initialize_APOSMM(H, gen_specs)
# np.savez('H'+str(len(H)),H=H,gen_specs=gen_specs,persis_info=persis_info)
if n_s < gen_specs['initial_sample_size']:
updated_inds = set()
else:
global x_new, pt_in_run, total_pts_in_run # Used to generate a next local opt point
updated_inds = update_history_dist(H, gen_specs, c_flag)
starting_inds = decide_where_to_start_localopt(H, n_s, rk_const, lhs_divisions, mu, nu)
updated_inds.update(starting_inds)
for ind in starting_inds:
# Find the run number
if not np.any(H['started_run']):
persis_info['active_runs'] = set()
persis_info['run_order'] = {}
persis_info['total_runs'] = 0
new_run_num = persis_info['total_runs']
H['started_run'][ind] = 1
H['num_active_runs'][ind] += 1
persis_info['run_order'][new_run_num] = [ind]
persis_info['active_runs'].update([new_run_num])
persis_info['total_runs'] +=1
inactive_runs = set()
# Find next point in any uncompleted runs using information stored in persis_info
for run in persis_info['active_runs']:
x_opt, exit_code, persis_info, sorted_run_inds = advance_localopt_method(H, gen_specs, c_flag, run, persis_info)
if np.isinf(x_new).all():
assert exit_code>0, "Exit code not zero, but no information in x_new.\n Local opt run " + str(run) + " after " + str(len(sorted_run_inds)) + " evaluations.\n Worker crashing!"
# No new point was added. Hopefully at a minimum
update_history_optimal(x_opt, H, sorted_run_inds)
inactive_runs.add(run)
updated_inds.update(sorted_run_inds)
else:
matching_ind = np.where(np.equal(x_new,O['x_on_cube']).all(1))[0]
if len(matching_ind) == 0:
persis_info = add_points_to_O(O, x_new, H, gen_specs, c_flag, persis_info, local_flag=1, sorted_run_inds=sorted_run_inds, run=run)
else:
assert len(matching_ind) == 1, "This point shouldn't have ended up in the O twice!"
persis_info['run_order'][run].append(O['sim_id'][matching_ind[0]])
for i in inactive_runs:
persis_info['active_runs'].remove(i)
persis_info['run_order'].pop(i) # Deletes any information about this run
if len(H) == 0:
samples_needed = gen_specs['initial_sample_size']
elif 'min_batch_size' in gen_specs:
samples_needed = gen_specs['min_batch_size'] - len(O)
else:
samples_needed = int(not bool(len(O))) # 1 if len(O)==0, 0 otherwise
if samples_needed > 0:
if 'sample_points' in gen_specs:
v = sum(H['local_pt'])
x_new = gen_specs['sample_points'][v:v+samples_needed]
on_cube = False # We assume the points are on the original domain, not unit cube
else:
x_new = persis_info['rand_stream'].uniform(0,1,(samples_needed,n))
on_cube = True
persis_info = add_points_to_O(O, x_new, H, gen_specs, c_flag, persis_info, on_cube=on_cube)
O = np.append(H[np.array(list(updated_inds),dtype=int)][[o[0] for o in gen_specs['out']]],O)
return O, persis_info
def add_points_to_O(O, pts, H, gen_specs, c_flag, persis_info, local_flag=0, sorted_run_inds=[], run=[], on_cube=True):
"""
Adds points to O, the numpy structured array to be sent back to the manager
"""
assert not local_flag or len(pts) == 1, "add_points_to_O does not support this functionality"
original_len_O = len(O)
len_H = len(H)
ub = gen_specs['ub']
lb = gen_specs['lb']
if c_flag:
m = gen_specs['components']
assert len_H % m == 0, "Number of points in len_H not congruent to 0 mod 'components'"
pt_ids = np.sort(np.tile(np.arange((len_H+original_len_O)/m,(len_H+original_len_O)/m + len(pts)),(1,m)))
pts = np.tile(pts,(m,1))
num_pts = len(pts)
O.resize(len(O)+num_pts,refcheck=False) # Adds (num_pts) rows of zeros to O
if on_cube:
O['x_on_cube'][-num_pts:] = pts
O['x'][-num_pts:] = pts*(ub-lb)+lb
else:
O['x_on_cube'][-num_pts:] = (pts-lb)/(ub-lb)
O['x'][-num_pts:] = pts
O['sim_id'][-num_pts:] = np.arange(len_H+original_len_O,len_H+original_len_O+num_pts)
O['local_pt'][-num_pts:] = local_flag
O['dist_to_unit_bounds'][-num_pts:] = np.inf
O['dist_to_better_l'][-num_pts:] = np.inf
O['dist_to_better_s'][-num_pts:] = np.inf
O['ind_of_better_l'][-num_pts:] = -1
O['ind_of_better_s'][-num_pts:] = -1
if c_flag:
O['obj_component'][-num_pts:] = np.tile(range(0,m),(1,num_pts//m))
O['pt_id'][-num_pts:] = pt_ids
if local_flag:
O['num_active_runs'][-num_pts] += 1
# O['priority'][-num_pts:] = 1
# O['priority'][-num_pts:] = np.random.uniform(0,1,num_pts)
| |
grep "
"openpyxl'. If that is the case, fix with 'pip install openpyxl==2.4.8'")
raise e
data = save_virtual_workbook(xl)
r = Response(data,
mimetype="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
status=200)
return r
@app.route(nis_api_base + "/isession/regenerate_xlsx", methods=["POST"])
def regenerate_xlsx_file(): # Receive an XLSX workbook, regenerate it
# Recover InteractiveSession
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
generator_type, content_type, buffer, execute, register = receive_file_submission(request)
try:
xl = openpyxl.load_workbook(io.BytesIO(buffer), data_only=True)
rewrite_xlsx_file(xl)
# rewrite_xlsx_file(xl, copy_style=False)
except Exception as e:
print("Exception rewriting XLSX. Is openpyxl==2.4.8 installed?. Check with 'pip freeze | grep openpyxl'. "
"If that is the case, fix with 'pip install openpyxl==2.4.8'")
raise e
tmp = save_virtual_workbook(xl)
r = Response(tmp,
mimetype="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
status=200)
return r
@app.route(nis_api_base + "/commands_and_fields", methods=["GET"])
def obtain_commands_and_their_fields():
# Recover InteractiveSession
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
j = {}
for k, v in command_fields.items():
j["name"] = k
flds = []
for f in v:
flds.append(f.allowed_names)
j["fields"] = flds
return j
@app.route(nis_api_base + "/validate_command_record", methods=["POST"])
def validate_command_record():
"""
A function for on-line, field by field or row by row validation of syntax
(the client can send what the user just entered, the server, this function, will respond
None the field is ok, and an error message if not)
The input comes in a JSON field "content":
{"command": "<command name",
"fields": {"<field name>": "<value", ...}
}
:return: A dictionary with the same fields of the input dictionary, whose values are the diagnosis, None being
everything-ok, and a string being a message describing the problem.
"""
# Recover InteractiveSession
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
# Read request
command_content_to_validate = request.get_json()
result, status = validate_command(command_content_to_validate)
return build_json_response(result, 200 if status else 400)
def get_misc_cmd_help(cmd_name):
if cmd_name == "metadata":
return {"type": "Metadata", "by_rows": False, "name": "Metadata", "template":
"Case study code\n\
Case study name\n\
Title\n\
Subject, topic and/or keywords\n\
Description\n\
Geographical level\n\
Dimensions\n\
Reference documentation\n\
Authors\n\
Date of elaboration\n\
Temporal situation\n\
Geographical location\n\
DOI\n\
Language\n\
Restriction level\n\
Version", "examples": [
"Case study code\tCS3_R_WEF_P-0.1\n\
Case study name\n\
Title\tSoslaires\n\
Subject, topic and/or keywords\n\
Description\tA small scale system combining Energy, Water and Food\n\
Geographical level\tLocal\n\
Dimensions\tEnergy\tWater\tFood\n\
Reference documentation\n\
Authors\t<NAME>\<NAME>\t<NAME>\n\
Date of elaboration\t2016\n\
Temporal situation\t2016\n\
Geographical location\tGran Canaria\n\
DOI\n\
Language\tEnglish\n\
Restriction level\tPublic\n\
Version\tV0.1"]}
elif cmd_name == "pedigree_matrix":
return {"type": "Metadata", "name": "Pedigree", "template":
"Code\t<Phase name #1>\t<Phase name #2>\t<Phase name #3>\t...",
"examples": [
"Code\tTheoreticalStructures\tDataInput\tPeerAcceptance\tColleagueConsensus\n\
4\tEstablishedTheory\tExperimentalData\tTotal\tAllButCranks\n\
3\tTheoreticallyBasedModel\tHistoricFieldData\tHigh\tAllButRebels\n\
2\tComputationalModel\tCalculatedData\tMedium\tCompetingSchools\n\
1\tStatisticalProcessing\tEducatedGuess\tLow\tEmbryonicField\n\
0\tDefinitions\tUneducatedGuess\tNone\tNoOpinion",
"Code\tModelStructure\tDataInput\tTesting\n\
4\tComprehensive\tReview\tCorroboration\n\
3\tFiniteElementApproximation\tHistoricField\tComparison\n\
2\tTransferFunction\tExperimental\tUncertaintyAnalysis\n\
1\tStatisticalProcessing\tCalculated\tSensitivityAnalysis\n\
0\tDefinitions\tExpertGuess\tNone",
"Code\tDefinitionsAndStandards\tDataCollectionAndAnalysis\tInstitutionalCulture\tReview\n\
5\tNegotiation\tTaskForce\tDialogue\tExternal\n\
4\tScience\tDirectSurvey\tAccomodation\tIndependent\n\
3\tConvenience\tIndirectEstimate\tObedience\tRegular\n\
2\tSymbolism\tEducatedGuess\tEvasion\tOccasional\n\
1\tInertia\tFiat\tNoContact\tNone\n\
0\tUnknown\tUnknown\tUnknown\tUnknown"
]
}
elif cmd_name == "datasetdata":
return {"type": "Input", "name": "DatasetData", "template":
"<Dataset concept #1>\t<Dataset concept #2>\t<Dataset concept #3>\t...",
"examples": [
"Country\tYear\tWaterConsumption\n\
ES\t2015\t102\n\
ES\t2016\t110\n\
IT\t2015\t130\n\
IT\t2016\t140\n",
"Tech\tScale\tUnitEnergyConsumption\n\
Coal\tMiddle\t1.4\n\
Coal\tLarge\t1.3\n\
Coal\tVeryLarge\t1.2\n\
Nuclear\tLarge\t1.3\n\
Nuclear\tVeryLarge\t1.15\n"
]
}
else:
return None
def get_regular_cmd_help(cmd: nexinfosys.Command):
ctype = str(cmd.cmd_type)
cmdflds = command_fields.get(cmd.name, None)
examples = cmd.direct_examples
files = cmd.files
return dict(type=ctype,
name=cmd.allowed_names[0],
template="\t".join([f.allowed_names[0] for f in cmdflds if "@" not in f.allowed_names[0] and not f.deprecated]),
examples=[]
)
@app.route(nis_api_base + "/commands_reference.json", methods=["GET"])
def obtain_commands_reference():
# Recover InteractiveSession
isess = deserialize_isession_and_prepare_db_session()
if isess and isinstance(isess, Response):
return isess
d = []
sequence = [nexinfosys.CommandType.core,
nexinfosys.CommandType.input,
nexinfosys.CommandType.analysis,
nexinfosys.CommandType.metadata,
nexinfosys.CommandType.convenience,
nexinfosys.CommandType.misc]
for ctype in sequence:
for cmd in commands:
if cmd.is_v2 and cmd.cmd_type == ctype:
tmp = get_misc_cmd_help(cmd.name)
if tmp:
d.append(tmp)
elif command_fields.get(cmd.name, None):
d.append(get_regular_cmd_help(cmd))
return build_json_response([e for e in d if e])
"""
d = [
{"type": "External dataset", "name": "Mapping", "template":
"<Source dimension from external dataset>\t<Target internal taxonomy>\t<Weight (optional, default 1 (many-to-one), <1 for many-to-many mappings)>",
"examples": [
"nrg_110a.PRODUCT\tMuSIASEM_EC\n\
2100\tHeat\n\
2200\tHeat\n\
2410\tHeat\n\
3214\tHeat\n\
3215\tHeat\n\
3215\tFeedstock\n\
3220\tHeat\n\
3234\tFuel\n\
3235\tFuel\n\
3244\tFuel\n\
3246\tFuel\n\
3247\tFuel\n\“n-2”“n-2”
3250\tFeedstock\n\
3260\tFuel\n\
3270A\tHeat\n\
3280\tFeedstock\n\
3285\tHeat\n\
4000\tHeat\n\
5532\tHeat\n\
5541\tHeat\n\
5542\tHeat\n\
55431\tHeat\n\
55432\tHeat\n\
5544\tHeat\n\
5545\tFuel\n\
5550\tHeat\n\
6000\tElectricity\n\
7100\tHeat\n\
7200\tHeat\n\
",
"nrg_110a.INDIC_NRG\tMuSIASEM_Sector\n\
B_101300\tES\n\
B_101825\tMQ\n\
B_102030\tAFO\n\
B_102020\tFI\n\
B_101805\tIS\n\
B_101810\tNF\n\
B_101815\tCP\n\
B_101820\tNM\n\
B_101830\tFT\n\
B_101835\tTL\n\
B_101840\tPPP\n\
B_101846\tTE\n\
B_101847\tMA\n\
B_101851\tWWP\n\
B_101852\tCO\n\
B_101853\tNS\n\
B_102035\tSG"
]
},
{"type": "External dataset", "name": "Parameters", "template":
"Name\tValue\tType\tGroup\tDescription",
"examples": [
"Name\tValue\tType\tGroup\tDescription\n\
p1\t3\tnumber\t\tParameter # 1\n\
p2\t3.5\tnumber\t\tParameter two"
]
},
{"type": "Specification", "by_rows": False, "name": "Metadata", "template":
"Case study code\n\
Case study name\n\
Title\n\
Subject, topic and/or keywords\n\
Description\n\
Geographical level\n\
Dimensions\n\
Reference documentation\n\
Authors\n\
Date of elaboration\n\
Temporal situation\n\
Geographical location\n\
DOI\n\
Language\n\
Restriction level\n\
Version", "examples": [
"Case study code\tCS3_R_WEF_P-0.1\n\
Case study name\n\
Title\tSoslaires\n\
Subject, topic and/or keywords\n\
Description\tA small scale system combining Energy, Water and Food\n\
Geographical level\tLocal\n\
Dimensions\tEnergy\tWater\tFood\n\
Reference documentation\n\
Authors\t<NAME>\<NAME>\t<NAME>\n\
Date of elaboration\t2016\n\
Temporal situation\t2016\n\
Geographical location\tGran Canaria\n\
DOI\n\
Language\tEnglish\n\
Restriction level\tPublic\n\
Version\tV0.1"]},
{"type": "Specification", "name": "Processors", "template":
"Name\tLevel\tFF_TYPE\tVAR\tVALUE\tUNIT\tRELATIVE TO\tUNCERTAINTY\tASSESSMENT\tPEDIGREE\\nMATRIX\tPEDIGREE\tTIME\tGEO\tSCALE\tSOURCE\tCOMMENTS",
"examples": [
"Name\tLevel\tFF_TYPE\tVAR\tVALUE\tUNIT\tRELATIVE TO\tUNCERTAINTY\tASSESSMENT\tPEDIGREE\\nMATRIX\tPEDIGREE\tTIME\tGEO\tSCALE\tSOURCE\tCOMMENTS\n\
WindFarm\tN-1\tInt_In_Fund\tHA\t660\thours\t\t\t\t\t\tYear\t\t\t\t\n\
WindFarm\tN-1\tInt_In_Fund\tHA_cost\t1800\t€\t\t\t\t\t\t2016\t\t\t\t\n\
WindFarm\tN-1\tInt_Out_Flow\tWindElectricity\t9.28\tGWh\t\t\t\t\t\tYear\t\t\t\t11,8% Energy transformation efficiency from wind to electricity\n\
ElectricGrid\tN\tExt_In_Flow\tGridElectricity\t6.6\tGWh\t\t\t\t\t\tYear\t\t\t\t0.429 M€ income from energy sale"]},
{"type": "Specification", "name": "Upscale", "template":
"<factor name>\t\n\
<child processor type> / <parent processor type>\t<one or more codes from predefined categories. One or more rows allowed, from this row upwards>\n\
<one or more codes from predefined categories. One or more columns allowed, from this column to the left>\
",
"examples": [
"LU\tGH\tGH\tOF\n\
Farm / AgrarianRegion\tMCR1\tMCR2\tMCR1\n\
AR1\t0.00\t0.06\t0.94\n\
AR2\t0.15\t0.85\t0.00\n\
AR3\t0.19\t0.77\t0.04\n\
AR4\t0.03\t0.05\t0.92\n\
AR5\t0.00\t0.00\t1.00\n\
AR6\t0.00\t0.87\t0.13"
]
},
{"type": "Specification", "name": "Structure", "template":
"Origin\tRelation\tDestination\tDestination\tDestination",
"examples": [
"Origin\tRelation\tDestination\tDestination\tDestination\tDestination\tDestination\tDestination\tDestination\tDestination\n\
WindFarm:WindElectricity\t>\t1/(0.5*p1)>DesalinationPlant:WindElectricity\tElectricGrid\t\t\t\t\t\t\n\
ElectricGrid\t>\tDesalinationPlant:GridElectricity\t\t\t\t\t\t\t\n\
DesalinationPlant:DesalinatedWater\t>\tFarm:BlueWater\t\t\t\t\t\t\t\n\
Farm\t|\tCantaloupe\tWatermelon\tTomato\tZucchini\tBeans\tPumpkin\tBanana\tMoringa\n\
Farm:LU\t>\tCantaloupe\tWatermelon\tTomato\tZucchini\tBeans\tPumpkin\tBanana\tMoringa\n\
Farm:HA\t>\tCantaloupe\tWatermelon\tTomato\tZucchini\tBeans\tPumpkin\tBanana\tMoringa\n\
Farm:IrrigationCapacity\t>\tCantaloupe\tWatermelon\tTomato\tZucchini\tBeans\tPumpkin\tBanana\tMoringa\n\
Farm:BlueWater\t>\tCantaloupe\tWatermelon\tTomato\tZucchini\tBeans\tPumpkin\tBanana\tMoringa\n\
Farm:Agrochemicals\t>\tCantaloupe\tWatermelon\tTomato\tZucchini\tBeans\tPumpkin\tBanana\tMoringa\n\
Farm:Fuel\t>\tCantaloupe\tWatermelon\tTomato\tZucchini\tBeans\tPumpkin\tBanana\tMoringa\n\
Farm:GreenWater\t<\tCantaloupe\tWatermelon\tTomato\tZucchini\tBeans\tPumpkin\tBanana\tMoringa\n\
Farm:MaterialWaste\t<\tCantaloupe\tWatermelon\tTomato\tZucchini\tBeans\tPumpkin\tBanana\tMoringa\n\
Farm:DiffusivePollution\t<\tCantaloupe\tWatermelon\tTomato\tZucchini\tBeans\tPumpkin\tBanana\tMoringa\n\
Farm:CO2\t<\tCantaloupe\tWatermelon\tTomato\tZucchini\tBeans\tPumpkin\tBanana\tMoringa\n\
Farm\t<\tCantaloupe:Cantaloupe\tWatermelon:Watermelon\tTomato:Tomato\tZucchini:Zucchini\tBeans:Beans\tPumpkin:Pumpkin\tBanana:Banana\tMoringa:Moringa"
]
},
{"type": "Specification", "name": "Taxonomy_F", "template":
"Code\tDescription\tCode\tDescription\tExpression",
"examples": [
"Code\tDescription\tCode\tDescription\tExpression\n\
Vegetables\tAll kinds of vegetables\n\
\t\tCantaloupe\n\
\t\tWatermelon\t\n\
\t\tTomato\n\
\t\tZucchini\n\
\t\tBeans\n\
\t\tPumpkin\n\
\t\tBanana\n\
\t\tMoringa"
]
},
{"type": "Specification", "name": "Pedigree", "template":
"Code\t<Phase name #1>\t<Phase name #2>\t<Phase name #3>\t...",
"examples": [
"Code\tTheoreticalStructures\tDataInput\tPeerAcceptance\tColleagueConsensus\n\
4\tEstablishedTheory\tExperimentalData\tTotal\tAllButCranks\n\
3\tTheoreticallyBasedModel\tHistoricFieldData\tHigh\tAllButRebels\n\
2\tComputationalModel\tCalculatedData\tMedium\tCompetingSchools\n\
1\tStatisticalProcessing\tEducatedGuess\tLow\tEmbryonicField\n\
0\tDefinitions\tUneducatedGuess\tNone\tNoOpinion",
"Code\tModelStructure\tDataInput\tTesting\n\
4\tComprehensive\tReview\tCorroboration\n\
3\tFiniteElementApproximation\tHistoricField\tComparison\n\
2\tTransferFunction\tExperimental\tUncertaintyAnalysis\n\
1\tStatisticalProcessing\tCalculated\tSensitivityAnalysis\n\
0\tDefinitions\tExpertGuess\tNone",
"Code\tDefinitionsAndStandards\tDataCollectionAndAnalysis\tInstitutionalCulture\tReview\n\
5\tNegotiation\tTaskForce\tDialogue\tExternal\n\
4\tScience\tDirectSurvey\tAccomodation\tIndependent\n\
3\tConvenience\tIndirectEstimate\tObedience\tRegular\n\
2\tSymbolism\tEducatedGuess\tEvasion\tOccasional\n\
1\tInertia\tFiat\tNoContact\tNone\n\
0\tUnknown\tUnknown\tUnknown\tUnknown"
]
},
{"type": "Specification", "name": "Composition_P", "template":
"Code\tDescription\tCode\tDescription",
"examples": [
"Code\tDescription\tCode\tDescription\tCode\tDescription\tCode\tDescription\tCode\tDescription\n\
Society\tEncompassess the human realm\n\
\t\tHH\tHousehold Sector\n\
\t\tPW\tPaid Work Sector\n\
\t\t\t\tSG\tService & Government\n\
\t\t\t\tPS\tPrimary & Secondary\n\
\t\t\t\t\t\tBM\tBuilding & Manufacturing\n\
\t\t\t\t\t\tPF\tPrimary flows\n\
\t\t\t\t\t\t\t\tAG\tAgriculture\n\
\t\t\t\t\t\t\t\tEM\tEnergy & Mining"
]
},
{"type": "Specification", "name": "Taxonomy_C", "template":
"Code\tDescription\tCode\tDescription\tExpression",
"examples": [
]
},
{"type": "Specification", "name": "References", "template":
"ref_id\t<list of columns depending on the type reference (bibliographic, geographic, provenance, see examples)>",
"examples": [
"ref_id\tTitle\tDate\tBoundingBox\tTopicCategory\tDescription\tMetadataPointOfContact\tAnnote\tDataLocation",
"ref_id\tEntry_Type\tAddress\tAnnote\tBookTitle\tChapter\tCrossRef\tEdition\tEditor\tHowPublished\tInstitution\tJournal\tKey\tMonth\tNote\tNumber\tOrganization\tPages\tPublisher\tSchool\tSeries\tTitle\tType\tURL\tVolume\tYear",
"ref_id\tAgentType\tAgent\tActivities\tEntities"
]
},
{"type": "Specification", "name": "Scale", "template":
"<A matrix having as row starts the origin factor type names, as column headers the target factor type names",
"examples": []
},
{"type": "Analysis", "name": "Indicators", "template":
"Name\tFormula\tDescription\tBenchmark\tBenchmark\tBenchmark\tBenchmark",
"examples": []
}
]
"""
@app.route(nis_api_base + "/command_reference.json", methods=["POST"])
def command_help():
"""
A function for on-line help for a command
The input comes in a JSON field "content":
{"command": "<command name"
}
:return: A dictionary with the same fields passed in the input dictionary, whose values are the help divided in
sections: explanation, allowed_values, formal syntax and examples
"""
# Read request
command_content_to_validate = request.get_json()
result, status = comm_help(command_content_to_validate)
return build_json_response(result, status)
@app.route(nis_api_base + "/command_fields_reference.json", methods=["POST"])
def command_fields_help():
"""
A function for on-line, field by field help
The input comes in a JSON field "content":
{"command": "<command name",
"fields": ["<field name>", "<field_name>"]
}
:return: A dictionary with the same fields passed in the input dictionary, whose values are the help divided in
sections: explanation, allowed_values, formal syntax and examples
"""
# Read request
command_content_to_validate = request.get_json()
result, status = command_field_help(command_content_to_validate)
return build_json_response(result, 200 if status else 400)
# @app.route(nis_api_base + "/sources/<id>/databases/<database_id>/datasets/<dataset_id>", methods=["GET"])
# def data_source_database_dataset_query(id, database_id, dataset_id):
# """
# This is the most powerful data method, allowing to
#
# :param id:
# :param database_id:
# :param dataset_id:
# :return:
# """
# # Recover InteractiveSession
# isess = deserialize_isession_and_prepare_db_session()
# if isess and isinstance(isess, Response):
# return isess
def data_processes():
pass
def nusap_data_pedigree():
pass
def grammars():
pass
def mappings():
"""
From an external dataset to internal categories
:return:
"""
pass
def hierarchies():
a= 6
pass
# -- Test --
@app.route('/test', methods=['GET'])
@app.route(nis_api_base + '/test', methods=['GET'])
def hello():
logger.debug("LOG!!!")
return build_json_response({"hello": "world"})
if __name__ == '__main__':
# xl = openpyxl.load_workbook("/home/rnebot/Dropbox/nis-internal-tests/issue_report.xlsx", data_only=True)
# rewrite_xlsx_file(xl)
# xl.save("/home/rnebot/Downloads/borrame.xlsx")
# sys.exit(0)
# from tasks import add
# from celery.task.control import inspect
# import time
# def f():
# t = []
# for i in range(10):
# t.append(add.delay(i, i + 1))
# i = inspect()
# st = [ti.ready() for ti in t]
# while not all(st):
# print(f"Completos: {sum(st)}; quedan {len(st)-sum(st)}")
# print(i.active())
# time.sleep(1)
# st = [ti.ready() for ti in t]
# f()
# 1) GUNICORN
# (start REDIS first at localhost:6379. E.g.: docker run --rm --name redis-local -p 6379:6379 redis:alpine)
#
# cd ~/AA_MAGIC/nis-nexinfosys
# export MAGIC_NIS_SERVICE_CONFIG_FILE=/home/rnebot/Dropbox/nis-nexinfosys-config/nis_local.conf
# gunicorn --bind 0.0.0.0:8080 --workers 3 nexinfosys.restful_service.service_main:app
# 2) DOCKER. BASIC DEPLOYMENT
#
# PREVIOUSLY, COMPILE FRONTEND
# cd ~/GoogleDrive/AA_MAGIC/nis-frontend
# npm install
# rm dist -fr
# node --max_old_space_size=8192 node_modules/@angular/cli/bin/ng build --prod -c production_local --aot --base-href /nis_client/
# rm ~/GoogleDrive/AA_MAGIC/nis-nexinfosys/frontend/* -fr
# cp -r ~/GoogleDrive/AA_MAGIC/nis-frontend/dist/* ~/GoogleDrive/AA_MAGIC/nis-nexinfosys/frontend
#
# 2) (continuation) DOCKER COMMANDS (example)
# docker network create nis-net
# docker run --rm --name redis-local --net nis-net -p 6379:6379 redis:alpine
# docker create --name nis-local --net nis-net -p 5000:80 -v /home/rnebot/DATOS/docker_magic_nis:/srv -e MAGIC_NIS_SERVICE_CONFIG_FILE="nis_local_redis_docker.conf" magic-nis
# cs = CaseStudy()
# vs1 = CaseStudyVersion()
# vs1.case_study = cs
# vs2 = CaseStudyVersion()
# vs2.case_study = cs
#
# lst = [cs, vs1, vs2]
# d_list = serialize(lst)
# lst2 = deserialize(d_list)
# sys.exit(1)
# >>>>>>>>>> IMPORTANT <<<<<<<<<
# For debugging in local mode, prepare an environment variable "MAGIC_NIS_SERVICE_CONFIG_FILE", with value "./nis_local.conf"
# >>>>>>>>>> IMPORTANT <<<<<<<<<
# >>>>>>>>>> IMPORTANT <<<<<<<<<
# "cannot connect to X server" error when remote debugging?
# Execute "Xvfb :99 -ac -noreset" in the remote server and uncomment the following | |
<filename>dev/Tools/build/waf-1.7.13/lmbrwaflib/packaging.py
#
# All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
# its licensors.
#
# For complete copyright and license terms please see the LICENSE at the root of this
# distribution (the "License"). All use of this software is governed by the License,
# or, if provided, by the license below or the license accompanying this file. Do not
# remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
import subprocess, os, shutil, plistlib
from utils import *
from branch_spec import spec_modules
from gems import Gem
from qt5 import QT5_LIBS
from waflib import Context, Build, Utils, Logs, TaskGen
from waflib.Task import Task, ASK_LATER, RUN_ME, SKIP_ME
from waf_branch_spec import PLATFORMS, CONFIGURATIONS, PLATFORM_CONFIGURATION_FILTER
from contextlib import contextmanager
from lumberyard_sdks import get_dynamic_lib_extension, get_platform_lib_prefix
from waflib.Utils import Timer
from waflib.Scripting import run_command
def run_xcode_build(pkg, target, destination):
if pkg.platform not in ['darwin_x64', 'ios', 'applettv'] or not pkg.is_option_true('run_xcode_for_packaging'):
Logs.debug("package: Not running xcode because either we are not on a macOS platform or the command line option disabled it")
return
Logs.info("Running xcode build command to create App Bundle")
platform = pkg.platform
if 'darwin' in platform:
platform = "mac"
try:
result = subprocess.check_output(["xcodebuild",
"-project", "{}/{}.xcodeproj".format(getattr(pkg.options, platform + "_project_folder", None), getattr(pkg.options, platform + "_project_name", None)),
"-target", target,
"-quiet",
"RUN_WAF_BUILD=NO",
"CONFIGURATION="+pkg.config,
"CONFIGURATION_BUILD_DIR="+destination.abspath()])
Logs.debug("package: xcode result is {}".format(result))
except Exception as err:
Logs.error("Can't run xcode {}".format(err))
def should_copy_and_not_link(pkg):
return Utils.is_win32 or 'release' in pkg.config or pkg.is_option_true('copy_assets') or pkg.force_copy_of_assets
class package_task(Task):
"""
Package an executable and any resources and assets it uses into a platform specific format.
Extended `Task`
"""
color = 'GREEN'
optional = False
def __init__(self, *k, **kw):
"""
Extended Task.__init__ to store the kw pairs passed in as attributes on the class and assign executable_name, task_gen_name, executable-task_gen and destination_node attributes to values based on the kw args.
:param target: name of the target/executable to be packaged.
:type target: string
:param task_gen_name: [Optional] Name of the task_gen that will build the target/executable. Needed for launchers where the target will be the game project (like StarterGame) but the task gen name will include the platform and 'Launcher' word (i.e. StarterGameWinLauncher).
:type task_gen_name: string
:param destination: [Optional] Path to the place where the packaged executable should be
:type destination: string
:param include_all_libs: [Optional] Force the task to include all libs that are in the same directory as the built executable to be part of the package
:type include_all_libs: boolean
:param include_spec_dependencies: [Optional] Include dependencies that are found by inspecting modules included in the spec
:type include_spec_dependencies: boolean
:param spec: [Optional] The spec to use to get module dependencies. Defaults to game_and_engine if not specified
:type spec: string
:param gem_types: [Optional] Types of gem modules to include as dependencies. Defaults to a list containing GameModule if not specified
:type gem_types: list of Gem.Module.Type
:param resources: [Optional] Files that should be copied to the resource directory. Resource directory is determined by calling get_resource_node
:type resources: list of strings
:param dir_resources: [Optional] Directories that contain resources required for the executable (such as QtLibs). These directories will either be linked or copied into the location of the executable
:type dir_resources: list of strings
:param assets_path: [Optional] Path to where the assets for the executbale are located. They will be either copied into the package or a symlink will be created to them.
:type assets_path: string
:param use_pak_files: [Optional] If pak files should be used instead of assets. This takes precendence over assets_path parameter if both are specified.
:type use_pak_files: boolean
:param pak_file_path: [Optional] Location of the pak files. if not specified will default to "[project name]_[asset platform name]_paks"
:type pak_file_path: string
:param finalize_func: [Optional] A function to execute when the package task has finished. The package context and node containing the destination of the executable is passed into the function.
:type finalize_func: function
"""
super(package_task, self).__init__(self, *k, **kw)
for key, val in kw.items():
setattr(self, key, val)
self.executable_name = kw['target']
self.task_gen_name = kw.get('task_gen_name', self.executable_name)
self.executable_task_gen = self.bld.get_tgen_by_name(self.task_gen_name)
self.destination_node = kw.get('destination', None)
def scan(self):
"""
Overrided scan to check for extra dependencies.
This function inspects the task_generator for its dependencies and
if include_spec_dependencies has been specified to include modules that
are specified in the spec and are potentially part of the project.
"""
spec_to_use = getattr(self, 'spec', 'game_and_engine')
gem_types = getattr(self, 'gem_types', [Gem.Module.Type.GameModule])
include_all_libs = getattr(self, 'include_all_libs', False)
if include_all_libs and spec_to_use != 'all':
spec_to_use = 'all'
self.dependencies = get_dependencies_recursively_for_task_gen(self.bld, self.executable_task_gen)
self.dependencies.update(get_spec_dependencies(self.bld, spec_to_use, gem_types))
# get_dependencies_recursively_for_task_gen will not pick up all the
# gems so if we want all libs add all the gems to the
# dependencies as well
if include_all_libs:
for gem in GemManager.GetInstance(self.bld).gems:
for module in gem.modules:
gem_module_task_gen = self.bld.get_tgen_by_name(module.target_name)
self.dependencies.update(get_dependencies_recursively_for_task_gen(self.bld, gem_module_task_gen))
return (list(self.dependencies), [])
def run(self):
Logs.info("Running package task for {}".format(self.executable_name))
executable_source_node = self.inputs[0]
if not self.destination_node:
# destination not specified so assume we are putting the package
# where the built executable is located, which is the input's
# parent since the input node is the actual executable
self.destination_node = self.inputs[0].parent
Logs.debug("package: packaging {} to destination {}".format(executable_source_node.abspath(), self.destination_node.abspath()))
if 'darwin' in self.bld.platform:
run_xcode_build(self.bld, self.task_gen_name, self.destination_node)
self.process_executable()
self.process_qt()
self.process_resources()
self.process_assets()
def process_executable(self):
executable_source_node = self.inputs[0]
executable_source_location_node = executable_source_node.parent
dependency_source_location_nodes = [self.bld.engine_node.make_node(self.bld.get_output_folders(self.bld.platform, self.bld.config)[0].name)]
if dependency_source_location_nodes != executable_source_location_node:
dependency_source_location_nodes.append(executable_source_location_node)
executable_dest_node = self.outputs[0].parent
executable_dest_node.mkdir()
Logs.info("Putting final packaging into base output folder {}, executable folder {}".format(self.destination_node.abspath(), executable_dest_node.abspath()))
if executable_source_location_node != executable_dest_node:
self.bld.install_files(executable_dest_node.abspath(), self.executable_name, cwd=executable_source_location_node, chmod=Utils.O755, postpone=False)
if getattr(self, 'include_all_libs', False):
self.bld.symlink_libraries(executable_source_location_node, executable_dest_node.abspath())
else:
# self.dependencies comes from the scan function
self.bld.symlink_dependencies(self.dependencies, dependency_source_location_nodes, executable_dest_node.abspath())
else:
Logs.debug("package: source {} = dest {}".format(executable_source_location_node.abspath(), executable_dest_node.abspath()))
if getattr(self, 'finalize_func', None):
self.finalize_func(self.bld, executable_dest_node)
def process_qt(self):
"""
Process Qt libraries for packaging for macOS.
This function will copy the Qt framework/libraries that an application
needs into the specific location for app bundles (Framworks directory)
and perform any cleanup on the copied framework to conform to Apple's
framework bundle structure. This is required so that App bundles can
be properly code signed.
"""
if 'darwin' not in self.bld.platform or 'qtlibs' not in getattr(self, 'dir_resources', []):
return
# Don't need the process_resources method to process the qtlibs folder
# since we are handling it
self.dir_resources.remove('qtlibs')
executable_dest_node = self.outputs[0].parent
output_folder_node = self.bld.get_output_folders(self.bld.platform, self.bld.config)[0]
qt_plugin_source_node = output_folder_node.make_node("qtlibs/plugins")
qt_plugins_dest_node = executable_dest_node.make_node("qtlibs/plugins")
# To be on the safe side check if the destination qtlibs is a link and
# unlink it before we creat the plugins copy/link
if os.path.islink(qt_plugins_dest_node.parent.abspath()):
os.unlink(qt_plugins_dest_node.parent.abspath())
self.bld.create_symlink_or_copy(qt_plugin_source_node, qt_plugins_dest_node.abspath(), postpone=False)
qt_libs_source_node = output_folder_node.make_node("qtlibs/lib")
# Executable dest node will be something like
# Application.app/Contents/MacOS. The parent will be Contents, which
# needs to contain the Frameworks folder according to macOS Framework
# bundle structure
frameworks_node = executable_dest_node.parent.make_node("Frameworks")
frameworks_node.mkdir()
def post_copy_cleanup(dst_framework_node):
# Apple does not like any file in the top level directory of an
# embedded framework. In 5.6 Qt has perl scripts for their build in the
# top level directory so we will just delete them from the embedded
# framework since we won't be building anything.
pearl_files = dst_framework_node.ant_glob("*.prl")
for file in pearl_files:
file.delete()
# on macOS there is not a clean way to get Qt dependencies on itself,
# so we have to scan the lib using otool and then add any of those Qt
# dependencies to our set.
qt_frameworks_to_copy = set()
qt5_vars = Utils.to_list(QT5_LIBS)
for i in qt5_vars:
uselib = i.upper()
if uselib in self.dependencies:
# QT for darwin does not have '5' in the name, so we need to remove it
darwin_adjusted_name = i.replace('Qt5','Qt')
framework_name = darwin_adjusted_name + ".framework"
src = qt_libs_source_node.make_node(framework_name).abspath()
if os.path.exists(src):
qt_frameworks_to_copy.add(framework_name)
# otool -L will generate output like this:
# @rpath/QtWebKit.framework/Versions/5/QtWebKit (compatibility version 5.6.0, current version 5.6.0)
# cut -d ' ' -f 1 will slice the line by spaces and returns the first field. That results in: @rpath/QtWebKit.framework/Versions/5/QtWebKit
# grep @rpath will make sure we only have QtLibraries and not system libraries
# cut | |
to
:return: Keycloak server response
"""
params_path = {"realm-name": self.realm_name, "id": user_id, "group-id": group_id}
data_raw = self.raw_put(
urls_patterns.URL_ADMIN_USER_GROUP.format(**params_path), data=None
)
return raise_error_from_response(data_raw, KeycloakPutError, expected_codes=[204])
def group_user_remove(self, user_id, group_id):
"""
Remove user from group (user_id and group_id)
:param user_id: id of user
:param group_id: id of group to remove from
:return: Keycloak server response
"""
params_path = {"realm-name": self.realm_name, "id": user_id, "group-id": group_id}
data_raw = self.raw_delete(urls_patterns.URL_ADMIN_USER_GROUP.format(**params_path))
return raise_error_from_response(data_raw, KeycloakDeleteError, expected_codes=[204])
def delete_group(self, group_id):
"""
Deletes a group in the Realm
:param group_id: id of group to delete
:return: Keycloak server response
"""
params_path = {"realm-name": self.realm_name, "id": group_id}
data_raw = self.raw_delete(urls_patterns.URL_ADMIN_GROUP.format(**params_path))
return raise_error_from_response(data_raw, KeycloakDeleteError, expected_codes=[204])
def get_clients(self):
"""
Returns a list of clients belonging to the realm
ClientRepresentation
https://www.keycloak.org/docs-api/18.0/rest-api/index.html#_clientrepresentation
:return: Keycloak server response (ClientRepresentation)
"""
params_path = {"realm-name": self.realm_name}
data_raw = self.raw_get(urls_patterns.URL_ADMIN_CLIENTS.format(**params_path))
return raise_error_from_response(data_raw, KeycloakGetError)
def get_client(self, client_id):
"""
Get representation of the client
ClientRepresentation
https://www.keycloak.org/docs-api/18.0/rest-api/index.html#_clientrepresentation
:param client_id: id of client (not client-id)
:return: Keycloak server response (ClientRepresentation)
"""
params_path = {"realm-name": self.realm_name, "id": client_id}
data_raw = self.raw_get(urls_patterns.URL_ADMIN_CLIENT.format(**params_path))
return raise_error_from_response(data_raw, KeycloakGetError)
def get_client_id(self, client_name):
"""
Get internal keycloak client id from client-id.
This is required for further actions against this client.
:param client_name: name in ClientRepresentation
https://www.keycloak.org/docs-api/18.0/rest-api/index.html#_clientrepresentation
:return: client_id (uuid as string)
"""
clients = self.get_clients()
for client in clients:
if client_name == client.get("name") or client_name == client.get("clientId"):
return client["id"]
return None
def get_client_authz_settings(self, client_id):
"""
Get authorization json from client.
:param client_id: id in ClientRepresentation
https://www.keycloak.org/docs-api/18.0/rest-api/index.html#_clientrepresentation
:return: Keycloak server response
"""
params_path = {"realm-name": self.realm_name, "id": client_id}
data_raw = self.raw_get(
urls_patterns.URL_ADMIN_CLIENT_AUTHZ_SETTINGS.format(**params_path)
)
return raise_error_from_response(data_raw, KeycloakGetError)
def create_client_authz_resource(self, client_id, payload, skip_exists=False):
"""
Create resources of client.
:param client_id: id in ClientRepresentation
https://www.keycloak.org/docs-api/18.0/rest-api/index.html#_clientrepresentation
:param payload: ResourceRepresentation
https://www.keycloak.org/docs-api/18.0/rest-api/index.html#_resourcerepresentation
:return: Keycloak server response
"""
params_path = {"realm-name": self.realm_name, "id": client_id}
data_raw = self.raw_post(
urls_patterns.URL_ADMIN_CLIENT_AUTHZ_RESOURCES.format(**params_path),
data=json.dumps(payload),
)
return raise_error_from_response(
data_raw, KeycloakPostError, expected_codes=[201], skip_exists=skip_exists
)
def get_client_authz_resources(self, client_id):
"""
Get resources from client.
:param client_id: id in ClientRepresentation
https://www.keycloak.org/docs-api/18.0/rest-api/index.html#_clientrepresentation
:return: Keycloak server response
"""
params_path = {"realm-name": self.realm_name, "id": client_id}
data_raw = self.raw_get(
urls_patterns.URL_ADMIN_CLIENT_AUTHZ_RESOURCES.format(**params_path)
)
return raise_error_from_response(data_raw, KeycloakGetError)
def create_client_authz_role_based_policy(self, client_id, payload, skip_exists=False):
"""
Create role-based policy of client.
:param client_id: id in ClientRepresentation
https://www.keycloak.org/docs-api/18.0/rest-api/index.html#_clientrepresentation
:param payload: No Document
:return: Keycloak server response
Payload example::
payload={
"type": "role",
"logic": "POSITIVE",
"decisionStrategy": "UNANIMOUS",
"name": "Policy-1",
"roles": [
{
"id": id
}
]
}
"""
params_path = {"realm-name": self.realm_name, "id": client_id}
data_raw = self.raw_post(
urls_patterns.URL_ADMIN_CLIENT_AUTHZ_ROLE_BASED_POLICY.format(**params_path),
data=json.dumps(payload),
)
return raise_error_from_response(
data_raw, KeycloakPostError, expected_codes=[201], skip_exists=skip_exists
)
def create_client_authz_resource_based_permission(self, client_id, payload, skip_exists=False):
"""
Create resource-based permission of client.
:param client_id: id in ClientRepresentation
https://www.keycloak.org/docs-api/18.0/rest-api/index.html#_clientrepresentation
:param payload: PolicyRepresentation
https://www.keycloak.org/docs-api/18.0/rest-api/index.html#_policyrepresentation
:return: Keycloak server response
Payload example::
payload={
"type": "resource",
"logic": "POSITIVE",
"decisionStrategy": "UNANIMOUS",
"name": "Permission-Name",
"resources": [
resource_id
],
"policies": [
policy_id
]
"""
params_path = {"realm-name": self.realm_name, "id": client_id}
data_raw = self.raw_post(
urls_patterns.URL_ADMIN_CLIENT_AUTHZ_RESOURCE_BASED_PERMISSION.format(**params_path),
data=json.dumps(payload),
)
return raise_error_from_response(
data_raw, KeycloakPostError, expected_codes=[201], skip_exists=skip_exists
)
def get_client_authz_scopes(self, client_id):
"""
Get scopes from client.
:param client_id: id in ClientRepresentation
https://www.keycloak.org/docs-api/18.0/rest-api/index.html#_clientrepresentation
:return: Keycloak server response
"""
params_path = {"realm-name": self.realm_name, "id": client_id}
data_raw = self.raw_get(urls_patterns.URL_ADMIN_CLIENT_AUTHZ_SCOPES.format(**params_path))
return raise_error_from_response(data_raw, KeycloakGetError)
def get_client_authz_permissions(self, client_id):
"""
Get permissions from client.
:param client_id: id in ClientRepresentation
https://www.keycloak.org/docs-api/18.0/rest-api/index.html#_clientrepresentation
:return: Keycloak server response
"""
params_path = {"realm-name": self.realm_name, "id": client_id}
data_raw = self.raw_get(
urls_patterns.URL_ADMIN_CLIENT_AUTHZ_PERMISSIONS.format(**params_path)
)
return raise_error_from_response(data_raw, KeycloakGetError)
def get_client_authz_policies(self, client_id):
"""
Get policies from client.
:param client_id: id in ClientRepresentation
https://www.keycloak.org/docs-api/18.0/rest-api/index.html#_clientrepresentation
:return: Keycloak server response
"""
params_path = {"realm-name": self.realm_name, "id": client_id}
data_raw = self.raw_get(
urls_patterns.URL_ADMIN_CLIENT_AUTHZ_POLICIES.format(**params_path)
)
return raise_error_from_response(data_raw, KeycloakGetError)
def get_client_service_account_user(self, client_id):
"""
Get service account user from client.
:param client_id: id in ClientRepresentation
https://www.keycloak.org/docs-api/18.0/rest-api/index.html#_clientrepresentation
:return: UserRepresentation
"""
params_path = {"realm-name": self.realm_name, "id": client_id}
data_raw = self.raw_get(
urls_patterns.URL_ADMIN_CLIENT_SERVICE_ACCOUNT_USER.format(**params_path)
)
return raise_error_from_response(data_raw, KeycloakGetError)
def create_client(self, payload, skip_exists=False):
"""
Create a client
ClientRepresentation:
https://www.keycloak.org/docs-api/18.0/rest-api/index.html#_clientrepresentation
:param skip_exists: If true then do not raise an error if client already exists
:param payload: ClientRepresentation
:return: Client ID
"""
if skip_exists:
client_id = self.get_client_id(client_name=payload["name"])
if client_id is not None:
return client_id
params_path = {"realm-name": self.realm_name}
data_raw = self.raw_post(
urls_patterns.URL_ADMIN_CLIENTS.format(**params_path), data=json.dumps(payload)
)
raise_error_from_response(
data_raw, KeycloakPostError, expected_codes=[201], skip_exists=skip_exists
)
_last_slash_idx = data_raw.headers["Location"].rindex("/")
return data_raw.headers["Location"][_last_slash_idx + 1 :] # noqa: E203
def update_client(self, client_id, payload):
"""
Update a client
:param client_id: Client id
:param payload: ClientRepresentation
:return: Http response
"""
params_path = {"realm-name": self.realm_name, "id": client_id}
data_raw = self.raw_put(
urls_patterns.URL_ADMIN_CLIENT.format(**params_path), data=json.dumps(payload)
)
return raise_error_from_response(data_raw, KeycloakPutError, expected_codes=[204])
def delete_client(self, client_id):
"""
Get representation of the client
ClientRepresentation
https://www.keycloak.org/docs-api/18.0/rest-api/index.html#_clientrepresentation
:param client_id: keycloak client id (not oauth client-id)
:return: Keycloak server response (ClientRepresentation)
"""
params_path = {"realm-name": self.realm_name, "id": client_id}
data_raw = self.raw_delete(urls_patterns.URL_ADMIN_CLIENT.format(**params_path))
return raise_error_from_response(data_raw, KeycloakDeleteError, expected_codes=[204])
def get_client_installation_provider(self, client_id, provider_id):
"""
Get content for given installation provider
Related documentation:
https://www.keycloak.org/docs-api/18.0/rest-api/index.html#_clients_resource
Possible provider_id list available in the ServerInfoRepresentation#clientInstallations
https://www.keycloak.org/docs-api/18.0/rest-api/index.html#_serverinforepresentation
:param client_id: Client id
:param provider_id: provider id to specify response format
"""
params_path = {"realm-name": self.realm_name, "id": client_id, "provider-id": provider_id}
data_raw = self.raw_get(
urls_patterns.URL_ADMIN_CLIENT_INSTALLATION_PROVIDER.format(**params_path)
)
return raise_error_from_response(data_raw, KeycloakGetError, expected_codes=[200])
def get_realm_roles(self):
"""
Get all roles for the realm or client
RoleRepresentation
https://www.keycloak.org/docs-api/18.0/rest-api/index.html#_rolerepresentation
:return: Keycloak server response (RoleRepresentation)
"""
params_path = {"realm-name": self.realm_name}
data_raw = self.raw_get(urls_patterns.URL_ADMIN_REALM_ROLES.format(**params_path))
return raise_error_from_response(data_raw, KeycloakGetError)
def get_realm_role_members(self, role_name, query=None):
"""
Get role members of realm by role name.
:param role_name: Name of the role.
:param query: Additional Query parameters
(see https://www.keycloak.org/docs-api/18.0/rest-api/index.html#_roles_resource)
:return: Keycloak Server Response (UserRepresentation)
"""
query = query or dict()
params_path = {"realm-name": self.realm_name, "role-name": role_name}
return self.__fetch_all(
urls_patterns.URL_ADMIN_REALM_ROLES_MEMBERS.format(**params_path), query
)
def get_client_roles(self, client_id):
"""
Get all roles for the client
:param client_id: id of client (not client-id)
RoleRepresentation
https://www.keycloak.org/docs-api/18.0/rest-api/index.html#_rolerepresentation
:return: Keycloak server response (RoleRepresentation)
"""
params_path = {"realm-name": self.realm_name, "id": client_id}
data_raw = self.raw_get(urls_patterns.URL_ADMIN_CLIENT_ROLES.format(**params_path))
return raise_error_from_response(data_raw, KeycloakGetError)
def get_client_role(self, client_id, role_name):
"""
Get client role id by name
This is required for further actions with this role.
:param client_id: id of client (not client-id)
:param role_name: role’s name (not id!)
RoleRepresentation
https://www.keycloak.org/docs-api/18.0/rest-api/index.html#_rolerepresentation
:return: role_id
"""
params_path = {"realm-name": self.realm_name, "id": client_id, "role-name": role_name}
data_raw = self.raw_get(urls_patterns.URL_ADMIN_CLIENT_ROLE.format(**params_path))
return raise_error_from_response(data_raw, KeycloakGetError)
def get_client_role_id(self, client_id, role_name):
"""
Warning: Deprecated
Get client role id by name
This is required for further actions with this role.
:param client_id: id of client (not client-id)
:param role_name: role’s name (not id!)
RoleRepresentation
https://www.keycloak.org/docs-api/18.0/rest-api/index.html#_rolerepresentation
:return: role_id
"""
role = self.get_client_role(client_id, role_name)
return role.get("id")
def create_client_role(self, client_role_id, payload, skip_exists=False):
"""
Create a client role
RoleRepresentation
https://www.keycloak.org/docs-api/18.0/rest-api/index.html#_rolerepresentation
:param client_role_id: id of client (not client-id)
:param payload: RoleRepresentation
:param skip_exists: If true then do not raise an error if client role already exists
:return: Client role name
"""
if skip_exists:
try:
res = self.get_client_role(client_id=client_role_id, role_name=payload["name"])
return res["name"]
except KeycloakGetError:
pass
params_path = {"realm-name": self.realm_name, "id": client_role_id}
data_raw = self.raw_post(
urls_patterns.URL_ADMIN_CLIENT_ROLES.format(**params_path), data=json.dumps(payload)
)
raise_error_from_response(
data_raw, KeycloakPostError, expected_codes=[201], skip_exists=skip_exists
)
_last_slash_idx = data_raw.headers["Location"].rindex("/")
return data_raw.headers["Location"][_last_slash_idx + 1 :] # noqa: E203
def add_composite_client_roles_to_role(self, client_role_id, role_name, roles):
"""
Add composite roles to client role
:param client_role_id: id of client (not client-id)
:param role_name: The name of the role
:param roles: roles list or role (use RoleRepresentation) to be updated
:return: Keycloak server response
"""
payload = roles if isinstance(roles, list) else [roles]
params_path = {"realm-name": self.realm_name, "id": client_role_id, "role-name": role_name}
data_raw = self.raw_post(
urls_patterns.URL_ADMIN_CLIENT_ROLES_COMPOSITE_CLIENT_ROLE.format(**params_path),
data=json.dumps(payload),
)
return raise_error_from_response(data_raw, KeycloakPostError, expected_codes=[204])
def update_client_role(self, client_role_id, role_name, payload):
"""
Update a client role
RoleRepresentation
https://www.keycloak.org/docs-api/18.0/rest-api/index.html#_rolerepresentation
:param client_role_id: id of client (not client-id)
:param role_name: role's name (not id!)
:param payload: RoleRepresentation
"""
params_path = {"realm-name": self.realm_name, "id": client_role_id, "role-name": role_name}
data_raw = self.raw_put(
urls_patterns.URL_ADMIN_CLIENT_ROLE.format(**params_path), data=json.dumps(payload)
)
return raise_error_from_response(data_raw, KeycloakPutError, expected_codes=[204])
def delete_client_role(self, client_role_id, role_name):
"""
Delete a client role
RoleRepresentation
https://www.keycloak.org/docs-api/18.0/rest-api/index.html#_rolerepresentation
:param client_role_id: id of client (not client-id)
:param role_name: role's name (not id!)
"""
params_path = {"realm-name": self.realm_name, "id": client_role_id, "role-name": role_name}
data_raw = self.raw_delete(urls_patterns.URL_ADMIN_CLIENT_ROLE.format(**params_path))
return raise_error_from_response(data_raw, KeycloakDeleteError, expected_codes=[204])
def assign_client_role(self, user_id, client_id, roles):
"""
Assign a client role to a user
:param user_id: id of user
:param client_id: id of client (not client-id)
:param roles: roles list or role (use RoleRepresentation)
:return: Keycloak server response
"""
payload = roles if isinstance(roles, list) else | |
numpy.arange(N + 1, dtype=float) + 0.5
index = index2[0:-1]
K1 = special.kv(index2, kappa * r1)
K1p = index / (kappa * r1) * K1[0:-1] - K1[1:]
k1 = special.kv(index, kappa * r1) * numpy.sqrt(pi / (2 * kappa * r1))
k1p = -numpy.sqrt(pi / 2) * 1 / (2 * (kappa * r1)**(3 / 2.)) * special.kv(
index, kappa * r1) + numpy.sqrt(pi / (2 * kappa * r1)) * K1p
a0_inf = -sigma0 / (epsilon * kappa * k1p[0])
U1_inf = a0_inf * k1[0]
C1 = 2 * pi * sigma0 * r1 * r1
C0 = qe**2 * Na * 1e-3 * 1e10 / (cal2J * E_0)
E = C0 * C1 * U1_inf
return E
def constant_potential_twosphere_dissimilar(phi01, phi02, r1, r2, R, kappa,
epsilon):
"""
It computes the interaction energy for dissimilar spheres at constant
potential, immersed in water.
Arguments
----------
phi01 : float, constant potential on the surface of the sphere 1.
phi02 : float, constant potential on the surface of the sphere 2.
r1 : float, radius of sphere 1.
r2 : float, radius of sphere 2.
R : float, distance center to center.
kappa : float, reciprocal of Debye length.
epsilon: float, water dielectric constant.
Returns
--------
E_inter: float, interaction energy.
"""
N = 20 # Number of terms in expansion
qe = 1.60217646e-19
Na = 6.0221415e23
E_0 = 8.854187818e-12
cal2J = 4.184
index2 = numpy.arange(N + 1, dtype=float) + 0.5
index = index2[0:-1]
K1 = special.kv(index2, kappa * r1)
K1p = index / (kappa * r1) * K1[0:-1] - K1[1:]
k1 = special.kv(index, kappa * r1) * numpy.sqrt(pi / (2 * kappa * r1))
k1p = -numpy.sqrt(pi / 2) * 1 / (2 * (kappa * r1)**(3 / 2.)) * special.kv(
index, kappa * r1) + numpy.sqrt(pi / (2 * kappa * r1)) * K1p
K2 = special.kv(index2, kappa * r2)
K2p = index / (kappa * r2) * K2[0:-1] - K2[1:]
k2 = special.kv(index, kappa * r2) * numpy.sqrt(pi / (2 * kappa * r2))
k2p = -numpy.sqrt(pi / 2) * 1 / (2 * (kappa * r2)**(3 / 2.)) * special.kv(
index, kappa * r2) + numpy.sqrt(pi / (2 * kappa * r2)) * K2p
I1 = special.iv(index2, kappa * r1)
I1p = index / (kappa * r1) * I1[0:-1] + I1[1:]
i1 = special.iv(index, kappa * r1) * numpy.sqrt(pi / (2 * kappa * r1))
i1p = -numpy.sqrt(pi / 2) * 1 / (2 * (kappa * r1)**(3 / 2.)) * special.iv(
index, kappa * r1) + numpy.sqrt(pi / (2 * kappa * r1)) * I1p
I2 = special.iv(index2, kappa * r2)
I2p = index / (kappa * r2) * I2[0:-1] + I2[1:]
i2 = special.iv(index, kappa * r2) * numpy.sqrt(pi / (2 * kappa * r2))
i2p = -numpy.sqrt(pi / 2) * 1 / (2 * (kappa * r2)**(3 / 2.)) * special.iv(
index, kappa * r2) + numpy.sqrt(pi / (2 * kappa * r2)) * I2p
B = numpy.zeros((N, N), dtype=float)
for n in range(N):
for m in range(N):
for nu in range(N):
if n >= nu and m >= nu:
g1 = gamma(n - nu + 0.5)
g2 = gamma(m - nu + 0.5)
g3 = gamma(nu + 0.5)
g4 = gamma(m + n - nu + 1.5)
f1 = factorial(n + m - nu)
f2 = factorial(n - nu)
f3 = factorial(m - nu)
f4 = factorial(nu)
Anm = g1 * g2 * g3 * f1 * (n + m - 2 * nu + 0.5) / (
pi * g4 * f2 * f3 * f4)
kB = special.kv(n + m - 2 * nu + 0.5, kappa *
R) * numpy.sqrt(pi / (2 * kappa * R))
B[n, m] += Anm * kB
M = numpy.zeros((2 * N, 2 * N), float)
for j in range(N):
for n in range(N):
M[j, n + N] = (2 * j + 1) * B[j, n] * i1[j] / k2[n]
M[j + N, n] = (2 * j + 1) * B[j, n] * i2[j] / k1[n]
if n == j:
M[j, n] = 1
M[j + N, n + N] = 1
RHS = numpy.zeros(2 * N)
RHS[0] = phi01
RHS[N] = phi02
coeff = linalg.solve(M, RHS)
a = coeff[0:N] / k1
b = coeff[N:2 * N] / k2
a0 = a[0]
a0_inf = phi01 / k1[0]
b0 = b[0]
b0_inf = phi02 / k2[0]
U1_inf = a0_inf * k1p[0]
U1_h = a0 * k1p[0] + i1p[0] * numpy.sum(b * B[:, 0])
U2_inf = b0_inf * k2p[0]
U2_h = b0 * k2p[0] + i2p[0] * numpy.sum(a * B[:, 0])
C1 = 2 * pi * kappa * phi01 * r1 * r1 * epsilon
C2 = 2 * pi * kappa * phi02 * r2 * r2 * epsilon
C0 = qe**2 * Na * 1e-3 * 1e10 / (cal2J * E_0)
E_inter = C0 * (C1 * (U1_h - U1_inf) + C2 * (U2_h - U2_inf))
return E_inter
def constant_charge_twosphere_dissimilar(sigma01, sigma02, r1, r2, R, kappa,
epsilon):
"""
It computes the interaction energy between two dissimilar spheres at
constant charge, immersed in water.
Arguments
----------
sigma01: float, constant charge on the surface of the sphere 1.
sigma02: float, constant charge on the surface of the sphere 2.
r1 : float, radius of sphere 1.
r2 : float, radius of sphere 2.
R : float, distance center to center.
kappa : float, reciprocal of Debye length.
epsilon: float, water dielectric constant.
Returns
--------
E_inter: float, interaction energy.
"""
N = 20 # Number of terms in expansion
qe = 1.60217646e-19
Na = 6.0221415e23
E_0 = 8.854187818e-12
cal2J = 4.184
index2 = numpy.arange(N + 1, dtype=float) + 0.5
index = index2[0:-1]
K1 = special.kv(index2, kappa * r1)
K1p = index / (kappa * r1) * K1[0:-1] - K1[1:]
k1 = special.kv(index, kappa * r1) * numpy.sqrt(pi / (2 * kappa * r1))
k1p = -numpy.sqrt(pi / 2) * 1 / (2 * (kappa * r1)**(3 / 2.)) * special.kv(
index, kappa * r1) + numpy.sqrt(pi / (2 * kappa * r1)) * K1p
K2 = special.kv(index2, kappa * r2)
K2p = index / (kappa * r2) * K2[0:-1] - K2[1:]
k2 = special.kv(index, kappa * r2) * numpy.sqrt(pi / (2 * kappa * r2))
k2p = -numpy.sqrt(pi / 2) * 1 / (2 * (kappa * r2)**(3 / 2.)) * special.kv(
index, kappa * r2) + numpy.sqrt(pi / (2 * kappa * r2)) * K2p
I1 = special.iv(index2, kappa * r1)
I1p = index / (kappa * r1) * I1[0:-1] + I1[1:]
i1 = special.iv(index, kappa * r1) * numpy.sqrt(pi / (2 * kappa * r1))
i1p = -numpy.sqrt(pi / 2) * 1 / (2 * (kappa * r1)**(3 / 2.)) * special.iv(
index, kappa * r1) + numpy.sqrt(pi / (2 * kappa * r1)) * I1p
I2 = special.iv(index2, kappa * r2)
I2p = index / (kappa * r2) * I2[0:-1] + I2[1:]
i2 = special.iv(index, kappa * r2) * numpy.sqrt(pi / (2 * kappa * r2))
i2p = -numpy.sqrt(pi / 2) * 1 / (2 * (kappa * r2)**(3 / 2.)) * special.iv(
index, kappa * r2) + numpy.sqrt(pi / (2 * kappa * r2)) * I2p
B = numpy.zeros((N, N), dtype=float)
for n in range(N):
for m in range(N):
for nu in range(N):
if n >= nu and m >= nu:
g1 = gamma(n - nu + 0.5)
g2 = gamma(m - nu + 0.5)
g3 = gamma(nu + 0.5)
g4 = gamma(m + n - nu + 1.5)
f1 = factorial(n + m - nu)
f2 = factorial(n - nu)
f3 = factorial(m - nu)
f4 = factorial(nu)
Anm = g1 * g2 * g3 * f1 * (n + m - 2 * nu + 0.5) | |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 28 16:11:52 2018
Functions for LARFP_lumen_segmentation_CGV.py
@author: clauvasq
"""
# import packages
import numpy as np
import matplotlib.pyplot as plt
import skimage.io as io
io.use_plugin('tifffile')
from skimage.filters import threshold_otsu
from skimage import filters
from skimage import morphology
from scipy import ndimage
import cv2
pw_desktop = '/Users/clauvasq/Desktop/'
def med_filter(stack, med_sel=7):
"""
Applies median filter to image stack by z-slice with disk selem of 7, by
defaultself.
Parameters
----------
stack : ndarray
ndarray of cyst with dimensions [Z, Y, X]
med_sel : int, default = 7
size of selem to use (disk selem!)
Returns
-------
med_stack : ndarray
median filtered stack
"""
med_stack = stack.copy()
for i in range(len(stack)):
z_slice = stack[i, :, :]
med_slice = filters.median(z_slice, selem=morphology.disk(med_sel))
med_stack[i, :, :] = med_slice
return med_stack
def otsu_morph_seg(stack, hole_size=2048, opt=1):
"""
Function segments z-stack on a per-slice basis.
This is to try to remove errors of lumen segmentation caused by actin loops in
in the lumen
Parameters
----------
stack : ndarray
ndarray of cyst with dimensions [Z, Y, X], otsu-thresholded
hole_size : int, default = 2028
for filling in holes in otsu segmentation
opt : 1, 2, 3
option 1: otsu segment each slice, then remove small holes
option 2: morph. opening (dilation then erosion) w/ selem=disk(9), then
remove small holes
option 3: for squiggly lumens, closing, remove small holes, erosion, and
then opening
Returns
-------
bin_stack : ndarray
ndarry of cyst, with segmented lumen, hopefully
"""
bin_stack = stack.copy()
for i in range(len(stack)):
z_slice = stack[i, :, :]
if np.count_nonzero(z_slice) > 0:
if opt == 1:
otsu_slice = threshold_otsu(z_slice)
bin_slice = z_slice > otsu_slice
bin1 = np.array(morphology.remove_small_holes(bin_slice, hole_size),
dtype=np.uint8)
elif opt == 2:
bin_slice = morphology.binary_closing(z_slice, selem=morphology.disk(9))
bin1 = np.array(morphology.remove_small_holes(bin_slice, hole_size),
dtype=np.uint8)
else:
z1 = morphology.binary_closing(z_slice)
z2 = morphology.remove_small_holes(z1, hole_size)
z3 = morphology.binary_erosion(z2, selem=morphology.disk(5))
bin1 = morphology.binary_opening(z3, selem=morphology.disk(2))
bin_stack[i, :, :] = bin1
else:
bin_stack[i, :, :] = np.zeros(np.shape(stack[i, :, :]))
return bin_stack
def two_cell_seg(stack, hole_size=128, disk_size=3, obj_size=100):
"""
Function segments z-stack w/ 2 cells and bright actin enrichment/lumen
Otsu thresholds max projection of stack, then opening on a per slice basis
and remove objects.
Parameters
----------
stack : ndarray
ndarray of cyst with dimensions [Z, Y, X]
hole_size : int, default=128
disk_size : int, default = 3
size of radius of selem for morphological opening
obj_size : int, default = 100
size of minimum sized object, anything smaller will be removed.
Returns
-------
bin_stack : ndarray
ndarry of cyst, with segmented lumen, hopefully
"""
bin_stack = stack.copy()
otsu = threshold_otsu(np.max(stack, 0))
otsu_stack = np.array(stack > otsu, dtype=np.uint8)
for i in range(len(stack)):
im_otsu = otsu_stack[i, :, :]
morph0 = morphology.remove_small_holes(im_otsu, hole_size)
morph1 = morphology.closing(morph0, selem=morphology.disk(disk_size))
morph2 = morphology.remove_small_objects(morph1, min_size=obj_size)
bin_stack[i, :, :] = morph2
bin_stack = np.array(bin_stack, dtype=np.uint8)
return bin_stack
def dim_signal_seg(stack, med_sel=5, otsu_factor=1.5, hole_size=1024, obj_size=500):
"""
Function segments z-stack w/ of cells with dim signal.
Applies median filter, then does otsu threshold, and threshold 2*otsu value
on a per slice basis. Then does morphological operations to fill in lumen
and remove other objects.
Parameters
----------
stack : ndarray
ndarray of cyst with dimensions [Z, Y, X]
med_sel : int, default=5
size of selem to use (disk selem!)
otsu_factor : float, default=1.5
multiplier for otsu value to threshold by
hole_size : int, default=1024
size of holes to remove for morphological processes
obj_size : int, default = 500
size of minimum sized object, anything smaller will be removed.
Returns
-------
bin_stack : ndarray
ndarry of cyst, with segmented lumen, hopefully
"""
bin_stack = stack.copy()
otsu = threshold_otsu(np.max(stack, 0))
otsu_stack = np.array(stack > otsu_factor*otsu, dtype=np.uint8)
for i in range(len(stack)):
z_slice = otsu_stack[i, :, :]
# med_slice = filters.median(z_slice, selem=morphology.disk(med_sel))
# otsu = threshold_otsu(med_slice)
# otsu_slice = med_slice > otsu_factor*otsu
morph1 = morphology.remove_small_holes(z_slice, hole_size)
morph2 = morphology.remove_small_objects(morph1, min_size=obj_size)
bin_stack[i, :, :] = morph2
bin_stack = np.array(bin_stack, dtype=np.uint8)
return bin_stack
def eight_bit_seg(stack, hole_size=2048):
"""
Function segments lumen of **8-bit** z-stack on a per-slice basis.
Parameters
----------
stack : ndarray
ndarray of cyst with dimensions [Z, Y, X], otsu-thresholded
hole_size : int, default = 2048
for filling in holes in otsu segmentation
Returns
-------
bin_stack : ndarray
ndarry of cyst, with segmented lumen, hopefully
"""
bin_stack = stack.copy()
for i in range(len(stack)):
z_slice = stack[i, :, :]
z1 = morphology.binary_dilation(z_slice)
z2 = morphology.remove_small_holes(z1, hole_size)
z3 = morphology.binary_erosion(z2, selem=morphology.disk(4))
bin_stack[i, :, :] = z3
return bin_stack
def eight_bit_cyst_seg(stack, disk_size=7):
bin_stack = stack.copy()
for i in range(len(stack)):
z_slice = stack[i, :, :]
z1 = z_slice > z_slice.mean()
z2 = morphology.binary_closing(z1, selem=morphology.disk(3))
z3 = morphology.remove_small_holes(z2, min_size=8192)
z4 = morphology.binary_erosion(z3, selem=morphology.disk(disk_size))
z5 = morphology.remove_small_objects(z4, 2048)
bin_stack[i, :, :] = z5
return bin_stack
def lumen_post(stack, disk_size=5):
"""
binary erosion on image stack of indicated disk size
use after contour finding on lumen segmentation, occasionally
"""
disk_sel = morphology.disk(disk_size)
post_stack = np.copy(stack)
for i in range(len(stack)):
post_stack[i, :, :] = morphology.binary_erosion(stack[i, :, :],
selem=disk_sel)
return post_stack
def cyst_edge(stack, low_pct=0.01, hi_pct=0.99, plot=False):
"""
Determines edges of cyst in z-slices
Does this by projecting stack in Y (or X). Then, takes mean along X (or Y),
giving line projection of intensity in Z from both X and Y directions. Then,
gets cumuluative sum, and uses low_pct and hi_pct as lower and upper bounds,
respectively, for z-slices. Uses minimum from Y and X for lower, and maximum
of Y and X for upper.
Parameters
----------
stack : ndarray
ndarray of cyst with dimensions [Z, Y, X]
low_pct : 0-1
lower bound of area under intensity curve
hi_pct : 0-1
upper bound of area under intensity curve
plot : default = False
if True, then plots out projections, mean line projection, and cumsum
Returns
-------
z_lower, z_upper: int, int
bounds, inclusive of z-slices that include cyst
"""
# project image along Y and X, respectively
im_projY = stack.sum(1)
im_projX = stack.sum(2)
# take mean along X and Y, respecitively
lineProjY = np.mean(im_projY, 1)
lineProjX = np.mean(im_projX, 1)
# determine edges of peak = find area under curve, and find where each
# reach certain pct of total areas
lineProjY_csum = np.cumsum(lineProjY)
lineProjX_csum = np.cumsum(lineProjX)
Y_csum = lineProjY_csum[-1]
X_csum = lineProjX_csum[-1]
z_fromY = [np.where(lineProjY_csum > low_pct*Y_csum)[0][0],
np.where(lineProjY_csum > hi_pct*Y_csum)[0][0]]
z_fromX = [np.where(lineProjX_csum > low_pct*X_csum)[0][0],
np.where(lineProjX_csum > hi_pct*X_csum)[0][0]]
# find min of z from Y and X, and find max z from Y and X
z_lower = min(z_fromY[0], z_fromX[0])
z_upper = min(z_fromY[1], z_fromX[1])
# plotting
if plot == True:
fig, ax = plt.subplots(nrows=2, ncols=3)
ax[0, 0].imshow(im_projY)
ax[1, 0].imshow(im_projX)
ax[0, 1].plot(lineProjY)
ax[1, 1].plot(lineProjX)
ax[0, 2].plot(lineProjY_csum)
ax[1, 2].plot(lineProjX_csum)
# take mean along X, to determine z-coordinates
# make
return z_lower, z_upper
def bgsub_zyx_morph(stack, sel_e=7, hole_size=2048, obj_size=512, sel_e2=5, opt=2):
"""
Segmentation of whole cyst via background subtraction in z direction,
y direction, and x direction.
(1) median filters
(2) background subtractions
(3) morphological operations to clean up
(4) medain filter again to smooth segmentation
Parameters
----------
stack : ndarray
ndarray of cyst with dimensions [Z, Y, X]
sel_e : int, default = 7
size of selem in disk for first morphological erosion
hole_size : int, default = 2048
size of holes to remove
obj_size : int, default = 512
size of objects to remove
sel_e2 : int, defualt = 5
size of selem in disk for second morphological erosion
opt : 1 or 2, defualt = 2
different order of morphological operations, option 2 seems to work
better...
Returns
-------
med_stack : ndarray
ndarry of cyst, with segmented cyst
"""
# median filter
med_stack = med_filter(stack, med_sel=3)
Z, Y, X = stack.shape
z_fgm = np.copy(stack)
y_fgm = np.copy(stack)
x_fgm = np.copy(stack)
# initialize bacground subtraction
# go through each z_slice, bkg subtract
fgbg = cv2.createBackgroundSubtractorMOG2()
for z in range(Z):
frame = med_stack[z, :, :]
fgmask = fgbg.apply(frame)
fgmask_2 = np.array(fgmask > 0, dtype=np.uint8)
z_fgm[z, :, :] = fgmask_2
# go through each y-slice, bkg subtract
fgbg = cv2.createBackgroundSubtractorMOG2()
for y in range(Y):
frame = med_stack[:, y, :]
fgmask = fgbg.apply(frame)
fgmask_2 = np.array(fgmask > 0, dtype=np.uint8)
| |
"""
@author sanjeethr, oligoglot
Implements SGDClassifier using FeatureUnions for Sentiment Classification of text
It also has code to experiment with hyper tuning parameters of the classifier
"""
from __future__ import print_function
import numpy as np
import pickle
import json
from pprint import pprint
from time import time
import sys, os
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer, HashingVectorizer, CountVectorizer
from sklearn.metrics import classification_report
from sklearn.pipeline import FeatureUnion
from sklearn.pipeline import Pipeline
from sklearn.svm import SVC
from sklearn.linear_model import SGDClassifier
from sklearn.model_selection import RandomizedSearchCV
from scipy.stats import uniform
from sklearn.model_selection import GridSearchCV
from libindic.soundex import Soundex
from lib.feature_utils import load_docs, get_emojis_from_text, get_doc_len_range
sys.path.append(os.path.join(os.path.dirname(sys.path[0]),'extern', 'indic_nlp_library'))
from indicnlp.normalize.indic_normalize import BaseNormalizer
sys.path.append(os.path.join(os.path.dirname(sys.path[0]),'extern'))
import deepchar
sys.path.append(os.path.join(os.path.dirname(sys.path[0]),'extern', 'solthiruthi-sothanaikal'))
from symspellpy import SymSpell, Verbosity
try:
from indictrans import Transliterator
except ImportError:
print('Please install indic-trans from git: https://github.com/libindic/indic-trans')
class ItemSelector(BaseEstimator, TransformerMixin):
"""For data grouped by feature, select subset of data at a provided key.
The data is expected to be stored in a 2D data structure, where the first
index is over features and the second is over samples. i.e.
>> len(data[key]) == n_samples
Please note that this is the opposite convention to scikit-learn feature
matrixes (where the first index corresponds to sample).
ItemSelector only requires that the collection implement getitem
(data[key]). Examples include: a dict of lists, 2D numpy array, Pandas
DataFrame, numpy record array, etc.
>> data = {'a': [1, 5, 2, 5, 2, 8],
'b': [9, 4, 1, 4, 1, 3]}
>> ds = ItemSelector(key='a')
>> data['a'] == ds.transform(data)
ItemSelector is not designed to handle data grouped by sample. (e.g. a
list of dicts). If your data is structured this way, consider a
transformer along the lines of `sklearn.feature_extraction.DictVectorizer`.
Parameters
----------
key : hashable, required
The key corresponding to the desired value in a mappable.
"""
def __init__(self, key):
self.key = key
def fit(self, x, y=None):
return self
def transform(self, data_dict):
return data_dict[self.key]
class TextStats(BaseEstimator, TransformerMixin):
"""Extract features from each document for DictVectorizer"""
def fit(self, x, y=None):
return self
def transform(self, reviews):
return [{'length': len(text),
'num_sentences': text.count('.')}
for text in reviews]
class FeatureExtractor(BaseEstimator, TransformerMixin):
"""Extract review text, emojis and emoji sentiment.
Takes a sequence of strings and produces a dict of values. Keys are
`review`, `emojis`, and `emoji-sentiment`.
"""
def __init__(self, lang = 'ta'):
self.lang = lang
self.normalizer = BaseNormalizer(lang)
# This language map was created using Google's googletrans module. Create the file alltextlang.txt by calling
# detect_lang_and_store in feature_utils.py
self.lmap = self.load_language_maps( os.path.join(os.path.dirname(sys.path[0]),'../resources/data/alltextslang.txt'))
self.soundexer = Soundex()
self.ta_trans = Transliterator(source='eng', target='tam', build_lookup=True)
self.ml_trans = Transliterator(source='eng', target='mal', build_lookup=True)
self.sym_spell = SymSpell(max_dictionary_edit_distance=2, prefix_length=7)
self.sym_spell.load_dictionary('../../src/extern/data/etymdict.csv.vocab.tsv.gz',
term_index=0,
count_index=1,
separator="\t")
super().__init__()
def load_language_maps(self, mapfile):
lmap = {}
with open(mapfile, 'r') as mapf:
for line in mapf:
text, lang, conf = line.rstrip().split('\t')
lmap[text] = (lang, float(conf))
return lmap
def get_language_tag(self, text):
return self.lmap.get(text, ('unknown', 0.0))
def fit(self, x, y=None):
return self
def transform(self, reviews):
features = np.recarray(shape=(len(reviews),),
dtype=[('review', object), ('emojis', object), ('emoji_sentiment', object),
('lang_tag', object), ('len_range', object), ('soundexes', object),],)
for i, review in enumerate(reviews):
features['review'][i] = self.normalizer.normalize(text = review)
emojis, sentiment = get_emojis_from_text(review)
features['emojis'][i] = ' '.join(emojis)
features['emoji_sentiment'][i] = sentiment
lang, conf = self.get_language_tag(review.strip())
if lang == self.lang or lang == (self.lang + 'en'):
# google agrees with some confidence
agreement = 1
elif conf < 0.5:
# google says not-tamil, but weakly
agreement = 0.5
else:
# google clearly says not-tamil
agreement = 0
features['lang_tag'][i] = {'lang': lang, 'agreement': agreement}
features['len_range'][i] = get_doc_len_range(review)
if self.lang == 'ta':
review_trans = self.ta_trans.transform(review)
for word in review_trans.split():
suggestions = self.sym_spell.lookup(word, Verbosity.CLOSEST, max_edit_distance=2, include_unknown=True)
if len(suggestions) > 0 and suggestions[0].distance < 3:
print(word, suggestions[0].term)
# no match with dictionary, we need a more comprehensive dictionary plus phonetic similarity
elif self.lang == 'ml':
review_trans = self.ml_trans.transform(review)
else:
review_trans = review
# TODO: introduce spell correct here for added normalisation
# print(lang, review_trans)
features['soundexes'][i] = ' '.join([self.soundexer.soundex(word) for word in review_trans.split()])
return features
def fit_predict_measure(mode, train_file, test_file, inputfile, lang = 'ta'):
print(train_file, test_file)
data_train = load_docs(train_file, mode='train')
data_test = load_docs(test_file, mode=mode)
print('Data Loaded')
target_names = data_train['target_names']
if mode == 'experiment':
perform_hyper_param_tuning(data_train, data_test, inputfile, lang)
if mode == 'test':
pipeline = get_pipeline(lang, len(data_train['data']))
pipeline.fit(data_train['data'], data_train['target_names'])
""" params = pipeline.get_params(deep=True)
print(params['rsrch__estimator__alpha'], params['rsrch__estimator__penalty']) """
y = pipeline.predict(data_test['data'])
print(len(y))
assert(len(data_test['data'])==len(y))
# TODO: TypeError: can't pickle module objects.
# pickle.dump(pipeline, open(inputfile, 'wb'))
idx = 0
for v in data_test['data']:
if (y[idx] == data_test['target_names'][idx]):
print("Right : {} -> Prediction : {} -> Original : {}".format(v, y[idx], data_test['target_names'][idx]))
else:
print("Wrong : {} -> Prediction : {} -> Original : {}".format(v, y[idx], data_test['target_names'][idx]))
idx += 1
print(classification_report(y, data_test['target_names']))
if mode == 'predict':
pipeline = pickle.load(open(inputfile, 'rb'))
pipeline.fit(data_train['data'], data_train['target_names'])
""" params = pipeline.get_params(deep=True)
print(params['rsrch__estimator__alpha'], params['rsrch__estimator__penalty']) """
y = pipeline.predict(data_test['data'])
print(len(y))
assert(len(data_test['data'])==len(y))
with open(f'theedhumnandrum_{lang}.tsv', 'w') as outf:
outf.write('id\ttext\tlabel\n')
for idx, review, label in zip(data_test['ids'], data_test['data'], y):
print(idx)
outf.write('\t'.join((idx, review, label)) + '\n')
print(f'predict data written to theedhumnandrum_{lang}.tsv')
# Perform tuning of hyper parameters by passing in the field you want to
# tune as a json input file. You can find sample files in the config directory
def perform_hyper_param_tuning(data_train, data_test, input_file, lang = 'ta'):
pipeline = get_pipeline(lang, len(data_train['data']))
# parameters = {
# 'sgd__loss' : ["hinge", "log", "squared_hinge", "modified_huber"],
# 'sgd__alpha' : [0.0001, 0.001, 0.01, 0.1],
# 'sgd__penalty' : ["l2", "l1", "none"],
# }
with open(input_file) as f:
parameters = json.load(f)
grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1, verbose=1, scoring='accuracy')
print("Performing grid search...")
print("pipeline:", [name for name, _ in pipeline.steps])
print("parameters:")
pprint(parameters)
t0 = time()
grid_search.fit(data_train['data'], data_train['target_names'])
print("done in %0.3fs" % (time() - t0))
print()
print("Best score: %0.3f" % grid_search.best_score_)
print("Best parameters set:")
best_parameters = grid_search.best_estimator_.get_params()
for param_name in sorted(parameters.keys()):
print("\t%s: %r" % (param_name, best_parameters[param_name]))
print("Grid scores on development set:")
print()
means = grid_search.cv_results_['mean_test_score']
stds = grid_search.cv_results_['std_test_score']
for mean, std, params in zip(means, stds, grid_search.cv_results_['params']):
print("%0.3f (+/-%0.03f) for %r"
% (mean, std * 2, params))
print()
print("Detailed classification report:")
print()
print("The model is trained on the full development set.")
print("The scores are computed on the full evaluation set.")
print()
y_true, y_pred = data_test["target_names"], grid_search.predict(data_test["data"])
print(classification_report(y_true, y_pred))
print()
# Get the tranformer weights for a language. Use the experiment mode of the script
# to find the right hypertuning parameters
def get_transformer_weights(lang = 'ta'):
lang_weights = {
'ta' : {
'emoji_sentiment': 0.6,
'emojis': 0.8, #higher value seems to improve negative ratings
'review_bow': 0.0,
'review_ngram': 1.0,
'lang_tag': 0.6,
'len_range': 0.0,
'soundexes_bow': 0.5,
},
'ml' : {
'emoji_sentiment': 0.6,
'emojis': 0.8, #higher value seems to improve negative ratings
'review_bow': 0.0,
'review_ngram': 1.0,
'lang_tag': 0.7,
'len_range': 0.5,
'soundexes_bow': 0.5,
}
}
return lang_weights[lang]
# The core function that returns the Pipeline. This is a FeatureUnion of
# a SGD Classifier. Borrows from https://scikit-learn.org/0.18/auto_examples/hetero_feature_union.html
def get_pipeline(lang = 'ta', datalen = 1000):
chosen_weights = get_transformer_weights(lang)
print(chosen_weights)
""" distributions = dict(
penalty=['l1', 'l2', 'elasticnet'],
alpha=uniform(loc=1e-6, scale=1e-4)
) """
pipeline = Pipeline([
# Extract the review text & emojis
('reviewfeatures', FeatureExtractor(lang)),
# Use FeatureUnion to combine the features from emojis and text
('union', FeatureUnion(
transformer_list=[
# Pipeline for emojis handled like a bag of words
('emojis', Pipeline([
('selector', ItemSelector(key='emojis')),
('tfidf', TfidfVectorizer(token_pattern=r'[^\s]+', stop_words=None, max_df=0.4, min_df=2, max_features=10)),
])),
# Pipeline for pulling features from the post's emoji sentiment
('emoji_sentiment', Pipeline([
('selector', ItemSelector(key='emoji_sentiment')),
('vect', HashingVectorizer()),
])),
# Pipeline for length of doc feature
('len_range', Pipeline([
('selector', ItemSelector(key='len_range')),
('vect', HashingVectorizer()),
])),
# Pipeline for standard bag-of-words model for soundexes
('soundexes_bow', Pipeline([
('selector', ItemSelector(key='soundexes')),
# Best Tamil Configuration
# ('tfidf', TfidfVectorizer( input='content', stop_words=None, sublinear_tf=True, max_df=0.4, min_df=1, max_features=200))
('tfidf', TfidfVectorizer(token_pattern=r'[^\s]+', input='content', stop_words=None, sublinear_tf=True, max_df=0.4, min_df=1, max_features=200)),
('best', TruncatedSVD(n_components=50)),
])),
# Pipeline for standard bag-of-words model for review
('review_bow', Pipeline([
('selector', ItemSelector(key='review')),
# Best Tamil Configuration
# ('tfidf', TfidfVectorizer( input='content', stop_words=None, sublinear_tf=True, max_df=0.4, min_df=1, max_features=200))
('tfidf', TfidfVectorizer( input='content', stop_words=None, sublinear_tf=True, max_df=0.4, min_df=1, max_features=200)),
('best', TruncatedSVD(n_components=50)),
])),
# Pipeline for pulling ad hoc features from review text
('review_stats', Pipeline([
('selector', ItemSelector(key='review')),
('stats', TextStats()), # returns a list of dicts
('vect', DictVectorizer()), # list of dicts -> feature matrix
])),
# Pipeline for ngram model for review
('review_ngram', Pipeline([
('selector', ItemSelector(key='review')),
#tamil - best config
# ('tfidf', CountVectorizer(ngram_range=(1, 4))),
('tfidf', CountVectorizer(ngram_range=(1, 4))),
#('tfidf', TfidfVectorizer(ngram_range=(2, 4), max_df=0.4, min_df=2, norm='l2', sublinear_tf=True)),
])),
# Pipeline for pulling | |
<reponame>t-kimber/kinoml<filename>kinoml/modeling/OEModeling.py
import logging
from typing import List, Set, Union, Iterable
from openeye import oechem, oegrid, oespruce, oequacpac, oeomega, oeshape
import pandas as pd
def read_smiles(smiles: str) -> oechem.OEGraphMol:
"""
Read molecule from a smiles string.
Parameters
----------
smiles: str
Smiles string.
Returns
-------
molecule: oechem.OEGraphMol
A molecule as OpenEye molecules.
"""
ims = oechem.oemolistream()
ims.SetFormat(oechem.OEFormat_SMI)
ims.openstring(smiles)
molecules = []
for molecule in ims.GetOEMols():
molecules.append(oechem.OEGraphMol(molecule))
return molecules[0]
def read_molecules(path: str) -> List[oechem.OEGraphMol]:
"""
Read molecules from a file.
Parameters
----------
path: str
Path to molecule file.
Returns
-------
molecules: list of oechem.OEGraphMol
A List of molecules as OpenEye molecules.
"""
from pathlib import Path
path = str(Path(path).expanduser().resolve())
suffix = path.split(".")[-1]
molecules = []
with oechem.oemolistream(path) as ifs:
if suffix == "pdb":
ifs.SetFlavor(
oechem.OEFormat_PDB,
oechem.OEIFlavor_PDB_Default
| oechem.OEIFlavor_PDB_DATA
| oechem.OEIFlavor_PDB_ALTLOC,
)
# add more flavors here if behavior should be different from `Default`
for molecule in ifs.GetOEGraphMols():
molecules.append(oechem.OEGraphMol(molecule))
# TODO: returns empty list if something goes wrong
return molecules
def read_electron_density(path: str) -> Union[oegrid.OESkewGrid, None]:
"""
Read electron density from a file.
Parameters
----------
path: str
Path to electron density file.
Returns
-------
electron_density: oegrid.OESkewGrid or None
A List of molecules as OpenEye molecules.
"""
from pathlib import Path
path = str(Path(path).expanduser().resolve())
electron_density = oegrid.OESkewGrid()
# TODO: different map formats
if not oegrid.OEReadMTZ(path, electron_density, oegrid.OEMTZMapType_Fwt):
electron_density = None
# TODO: returns None if something goes wrong
return electron_density
def write_molecules(molecules: List[oechem.OEGraphMol], path: str):
"""
Save molecules to file.
Parameters
----------
molecules: list of oechem.OEGraphMol
A list of OpenEye molecules for writing.
path: str
File path for saving molecules.
"""
from pathlib import Path
path = str(Path(path).expanduser().resolve())
with oechem.oemolostream(path) as ofs:
for molecule in molecules:
oechem.OEWriteMolecule(ofs, molecule)
return
def select_chain(molecule, chain_id):
"""
Select a chain from an OpenEye molecule.
Parameters
----------
molecule: oechem.OEGraphMol
An OpenEye molecule holding a molecular structure.
chain_id: str
Chain identifier.
Returns
-------
selection: oechem.OEGraphMol
An OpenEye molecule holding the selected chain.
"""
# do not change input mol
selection = molecule.CreateCopy()
# delete other chains
for atom in selection.GetAtoms():
residue = oechem.OEAtomGetResidue(atom)
if residue.GetChainID() != chain_id:
selection.DeleteAtom(atom)
return selection
def select_altloc(molecule, altloc_id):
"""
Select an alternate location from an OpenEye molecule.
Parameters
----------
molecule: oechem.OEGraphMol
An OpenEye molecule holding a molecular structure.
altloc_id: str
Alternate location identifier.
Returns
-------
selection: oechem.OEGraphMol
An OpenEye molecule holding the selected alternate location.
"""
# do not change input mol
selection = molecule.CreateCopy()
allowed_altloc_ids = [" ", altloc_id]
# delete other alternate location
for atom in selection.GetAtoms():
residue = oechem.OEAtomGetResidue(atom)
if oechem.OEResidue.GetAlternateLocation(residue) not in allowed_altloc_ids:
selection.DeleteAtom(atom)
return selection
def remove_non_protein(
molecule: oechem.OEGraphMol,
exceptions: Union[None, List[str]] = None,
remove_water: bool = False,
) -> oechem.OEGraphMol:
"""
Remove non-protein atoms from an OpenEye molecule.
Parameters
----------
molecule: oechem.OEGraphMol
An OpenEye molecule holding a molecular structure.
exceptions: None or list of str
Exceptions that should not be removed.
remove_water: bool
If water should be removed.
Returns
-------
selection: oechem.OEGraphMol
An OpenEye molecule holding the filtered structure.
"""
if exceptions is None:
exceptions = []
if remove_water is False:
exceptions.append("HOH")
# do not change input mol
selection = molecule.CreateCopy()
for atom in selection.GetAtoms():
residue = oechem.OEAtomGetResidue(atom)
if residue.IsHetAtom():
if residue.GetName() not in exceptions:
selection.DeleteAtom(atom)
return selection
def _OEFixBuiltLoopFragmentNumbers(protein):
"""
Temporary fix, thanks to Jesper!
"""
prev_fn = -1
# Checking for CA atoms, since this will avoid messing with the caps and built sidechains,
# since this is only a built loop problem
builtPred = oespruce.OEIsModeledAtom()
for atom in protein.GetAtoms(oechem.OEIsCAlpha()):
res = oechem.OEAtomGetResidue(atom)
fn = res.GetFragmentNumber()
if builtPred(atom) and prev_fn != -1:
for ra in oechem.OEGetResidueAtoms(atom):
r = oechem.OEAtomGetResidue(ra)
r.SetFragmentNumber(prev_fn)
oechem.OEAtomSetResidue(ra, r)
else:
prev_fn = fn
def _OEFixWaterFragmentNumbers(solvent):
"""
Temporary fix, thanks to Jesper!
"""
fragment_counter = {}
for atom in solvent.GetAtoms(oechem.OEIsWater()):
res = oechem.OEAtomGetResidue(atom)
if res.GetInsertCode() != " ":
continue
if res.GetFragmentNumber() not in fragment_counter:
fragment_counter[res.GetFragmentNumber()] = 0
fragment_counter[res.GetFragmentNumber()] += 1
largest_solvent_fn_count = -1
largest_solvent_fn = -1
for fn in fragment_counter:
if fragment_counter[fn] > largest_solvent_fn_count:
largest_solvent_fn_count = fragment_counter[fn]
largest_solvent_fn = fn
if largest_solvent_fn < 0:
return
for atom in solvent.GetAtoms(oechem.OEIsWater(True)):
res = oechem.OEAtomGetResidue(atom)
res.SetFragmentNumber(largest_solvent_fn)
oechem.OEAtomSetResidue(atom, res)
def _OEFixConnectionNH(protein):
"""
Temporary fix, thanks to Jesper!
"""
for atom in protein.GetAtoms(
oechem.OEAndAtom(oespruce.OEIsModeledAtom(), oechem.OEIsNitrogen())
):
if oechem.OEGetPDBAtomIndex(atom) == oechem.OEPDBAtomName_N:
expected_h_count = 1
if oechem.OEGetResidueIndex(atom) == oechem.OEResidueIndex_PRO:
expected_h_count = 0
if atom.GetTotalHCount() != expected_h_count:
oechem.OESuppressHydrogens(atom)
atom.SetImplicitHCount(1)
oechem.OEAddExplicitHydrogens(protein, atom)
for nbr in atom.GetAtoms(oechem.OEIsHydrogen()):
oechem.OESet3DHydrogenGeom(protein, nbr)
def _OEFixLoopIssues(du):
"""
Temporary fix, thanks to Jesper!
"""
impl = du.GetImpl()
protein = impl.GetProtein()
_OEFixBuiltLoopFragmentNumbers(protein)
_OEFixConnectionNH(protein)
oechem.OEPDBOrderAtoms(protein)
solvent = impl.GetSolvent()
_OEFixWaterFragmentNumbers(solvent)
def _OEFixMissingOXT(du):
"""
Temporary fix, thanks to Jesper!
"""
impl = du.GetImpl()
protein = impl.GetProtein()
for atom in protein.GetAtoms():
if "H'" in atom.GetName():
atom.SetAtomicNum(8)
atom.SetName("OXT")
atom.SetFormalCharge(-1)
def _prepare_structure(
structure: oechem.OEGraphMol,
has_ligand: bool = False,
electron_density: Union[oegrid.OESkewGrid, None] = None,
loop_db: Union[str, None] = None,
ligand_name: Union[str, None] = None,
cap_termini: bool = True,
real_termini: Union[List[int], None] = None,
) -> Union[oechem.OEDesignUnit, None]:
"""
Prepare an OpenEye molecule holding a protein ligand complex for docking.
Parameters
----------
structure: oechem.OEGraphMol
An OpenEye molecule holding a structure with protein and optionally a ligand.
has_ligand: bool
If structure contains a ligand that should be used in design unit generation.
electron_density: oegrid.OESkewGrid
An OpenEye grid holding the electron density.
loop_db: str or None
Path to OpenEye Spruce loop database. You can request a copy at
https://www.eyesopen.com/database-downloads. A testing subset (3TPP) is available
at https://docs.eyesopen.com/toolkits/python/sprucetk/examples_make_design_units.html.
ligand_name: str or None
The name of the ligand located in the binding pocket of interest.
cap_termini: bool
If termini should be capped with ACE and NME.
real_termini: list of int or None
Residue numbers of biologically real termini will not be capped with ACE and NME.
Returns
-------
design_unit: oechem.OEDesignUnit or None
An OpenEye design unit holding the prepared structure with the
highest quality among all identified design units.
"""
def _has_residue_number(atom, residue_numbers=real_termini):
"""Return True if atom matches any given residue number."""
residue = oechem.OEAtomGetResidue(atom)
return any(
[
True if residue.GetResidueNumber() == residue_number else False
for residue_number in residue_numbers
]
)
# set design unit options
structure_metadata = oespruce.OEStructureMetadata()
design_unit_options = oespruce.OEMakeDesignUnitOptions()
if cap_termini is False:
design_unit_options.GetPrepOptions().GetBuildOptions().SetCapCTermini(False)
design_unit_options.GetPrepOptions().GetBuildOptions().SetCapNTermini(False)
if loop_db is not None:
from pathlib import Path
loop_db = str(Path(loop_db).expanduser().resolve())
design_unit_options.GetPrepOptions().GetBuildOptions().GetLoopBuilderOptions().SetLoopDBFilename(
loop_db
)
# cap all termini but biologically real termini
if real_termini is not None and cap_termini is True:
oespruce.OECapTermini(structure, oechem.PyAtomPredicate(_has_residue_number))
# already capped, preserve biologically real termini
design_unit_options.GetPrepOptions().GetBuildOptions().SetCapCTermini(False)
design_unit_options.GetPrepOptions().GetBuildOptions().SetCapNTermini(False)
# make design units
if has_ligand:
if electron_density is None:
design_units = list(
oespruce.OEMakeDesignUnits(structure, structure_metadata, design_unit_options)
)
# filter design units for ligand of interest
if ligand_name is not None:
design_units = [
design_unit
for design_unit in design_units
if ligand_name in design_unit.GetTitle()
]
else:
design_units = list(
oespruce.OEMakeDesignUnits(
structure, electron_density, structure_metadata, design_unit_options
)
)
else:
design_units = list(
oespruce.OEMakeBioDesignUnits(structure, structure_metadata, design_unit_options)
)
if len(design_units) >= 1:
design_unit = design_units[0]
else:
# TODO: Returns None if something goes wrong
return None
# fix loop issues and missing OXT
_OEFixLoopIssues(design_unit)
_OEFixMissingOXT(design_unit)
return design_unit
def prepare_complex(
protein_ligand_complex: oechem.OEGraphMol,
electron_density: Union[oegrid.OESkewGrid, None] = None,
loop_db: Union[str, None] = None,
ligand_name: Union[str, None] = None,
cap_termini: bool = True,
real_termini: Union[List[int], None] = None,
) -> Union[oechem.OEDesignUnit, None]:
"""
Prepare an OpenEye molecule holding a protein ligand complex for docking.
Parameters
----------
protein_ligand_complex: oechem.OEGraphMol
An OpenEye molecule holding a structure with protein and ligand.
electron_density: oegrid.OESkewGrid
An OpenEye grid holding the electron density.
loop_db: str or None
Path to OpenEye Spruce loop database.
ligand_name: str or None
The name of the ligand located in the binding pocket of interest.
cap_termini: bool
If termini should be capped with ACE and NME.
real_termini: list of int or None
Residue numbers of biologically real termini will not be capped with ACE and NME.
Returns
-------
design_unit: oechem.OEDesignUnit or None
An OpenEye design unit holding the prepared structure with the highest quality among all identified design
units.
"""
return _prepare_structure(
structure=protein_ligand_complex,
has_ligand=True,
electron_density=electron_density,
loop_db=loop_db,
ligand_name=ligand_name,
cap_termini=cap_termini,
real_termini=real_termini,
)
def prepare_protein(
protein: oechem.OEGraphMol,
loop_db: Union[str, None] = None,
cap_termini: bool = True,
real_termini: Union[List[int], None] = None,
) -> Union[oechem.OEDesignUnit, None]:
"""
Prepare an OpenEye molecule holding | |
# pylint:disable=too-many-lines
import copy
import ipaddress
import struct
import sortedcontainers
import thrift.protocol.TBinaryProtocol
import thrift.transport.TTransport
import common.ttypes
import constants
import encoding.ttypes
import encoding.constants
import key
import utils
RIFT_MAGIC = 0xA1F7
class PacketInfo:
ERR_MSG_TOO_SHORT = "Message too short"
ERR_WRONG_MAGIC = "Wrong magic value"
ERR_WRONG_MAJOR_VERSION = "Wrong major version"
ERR_TRIFT_DECODE = "Thrift decode error"
ERR_TRIFT_VALIDATE = "Thrift validate error"
ERR_MISSING_OUTER_SEC_ENV = "Missing outer security envelope"
ERR_ZERO_OUTER_KEY_ID_NOT_ACCEPTED = "Zero outer key id not accepted"
ERR_NON_ZERO_OUTER_KEY_ID_NOT_ACCEPTED = "Non-zero outer key id not accepted"
ERR_INCORRECT_OUTER_FINGERPRINT = "Incorrect outer fingerprint"
ERR_MISSING_ORIGIN_SEC_ENV = "Missing TIE origin security envelope"
ERR_ZERO_ORIGIN_KEY_ID_NOT_ACCEPTED = "Zero TIE origin key id not accepted"
ERR_NON_ZERO_ORIGIN_KEY_ID_NOT_ACCEPTED = "Non-zero TIE origin key id not accepted"
ERR_UNEXPECTED_ORIGIN_SEC_ENV = "Unexpected TIE origin security envelope"
ERR_INCONSISTENT_ORIGIN_KEY_ID = "Inconsistent TIE origin key id and fingerprint"
ERR_INCORRECT_ORIGIN_FINGERPRINT = "Incorrect TIE origin fingerprint"
ERR_REFLECTED_NONCE_OUT_OF_SYNC = "Reflected nonce out of sync"
DECODE_ERRORS = [
ERR_MSG_TOO_SHORT,
ERR_WRONG_MAGIC,
ERR_WRONG_MAJOR_VERSION,
ERR_TRIFT_DECODE,
ERR_TRIFT_VALIDATE]
AUTHENTICATION_ERRORS = [
ERR_MISSING_OUTER_SEC_ENV,
ERR_ZERO_OUTER_KEY_ID_NOT_ACCEPTED,
ERR_NON_ZERO_OUTER_KEY_ID_NOT_ACCEPTED,
ERR_INCORRECT_OUTER_FINGERPRINT,
ERR_MISSING_ORIGIN_SEC_ENV,
ERR_ZERO_ORIGIN_KEY_ID_NOT_ACCEPTED,
ERR_NON_ZERO_ORIGIN_KEY_ID_NOT_ACCEPTED,
ERR_UNEXPECTED_ORIGIN_SEC_ENV,
ERR_INCONSISTENT_ORIGIN_KEY_ID,
ERR_INCORRECT_ORIGIN_FINGERPRINT,
ERR_REFLECTED_NONCE_OUT_OF_SYNC]
def __init__(self):
# Where was the message received from?
self.rx_intf = None
self.address_family = None
self.from_addr_port_str = None
# RIFT model object
self.protocol_packet = None
self.encoded_protocol_packet = None
self.packet_type = None
# Error string (None if decode was successful)
self.error = None
self.error_details = None
# Envelope header (magic and packet number)
self.env_header = None
self.packet_nr = None
# Outer security envelope header
self.outer_sec_env_header = None
self.outer_key_id = None
self.nonce_local = None
self.nonce_remote = None
self.remaining_tie_lifetime = None
self.outer_fingerprint_len = None
self.outer_fingerprint = None
# Origin security envelope header
self.origin_sec_env_header = None
self.origin_key_id = None
self.origin_fingerprint_len = None
self.origin_fingerprint = None
def __str__(self):
result_str = ""
if self.packet_nr is not None:
result_str += "packet-nr={} ".format(self.packet_nr)
if self.outer_key_id is not None:
result_str += "outer-key-id={} ".format(self.outer_key_id)
if self.nonce_local is not None:
result_str += "nonce-local={} ".format(self.nonce_local)
if self.nonce_remote is not None:
result_str += "nonce-remote={} ".format(self.nonce_remote)
if self.remaining_tie_lifetime is not None:
if self.remaining_tie_lifetime == 0xffffffff:
result_str += "remaining-lie-lifetime=all-ones "
else:
result_str += "remaining-lie-lifetime={} ".format(self.remaining_tie_lifetime)
if self.outer_fingerprint_len is not None:
result_str += "outer-fingerprint-len={} ".format(self.outer_fingerprint_len)
if self.origin_key_id is not None:
result_str += "origin-key-id={} ".format(self.origin_key_id)
if self.origin_fingerprint_len is not None:
result_str += "origin-fingerprint-len={} ".format(self.origin_fingerprint_len)
if self.protocol_packet is not None:
result_str += "protocol-packet={}".format(self.protocol_packet)
return result_str
def message_parts(self):
assert self.env_header
assert self.outer_sec_env_header
assert self.encoded_protocol_packet
if self.origin_sec_env_header:
return [self.env_header,
self.outer_sec_env_header,
self.origin_sec_env_header,
self.encoded_protocol_packet]
else:
return [self.env_header,
self.outer_sec_env_header,
self.encoded_protocol_packet]
def update_env_header(self, packet_nr):
self.packet_nr = packet_nr
self.env_header = struct.pack("!HH", RIFT_MAGIC, packet_nr)
def update_outer_sec_env_header(self, outer_key, nonce_local, nonce_remote,
remaining_lifetime=None):
if remaining_lifetime:
remaining_tie_lifetime = remaining_lifetime
else:
remaining_tie_lifetime = 0xffffffff
post = struct.pack("!HHL", nonce_local, nonce_remote, remaining_tie_lifetime)
if outer_key:
self.outer_key_id = outer_key.key_id
self.outer_fingerprint = outer_key.padded_digest(
[post, self.origin_sec_env_header, self.encoded_protocol_packet])
self.outer_fingerprint_len = len(self.outer_fingerprint) // 4
else:
self.outer_key_id = 0
self.outer_fingerprint = b''
self.outer_fingerprint_len = 0
self.nonce_local = nonce_local
self.nonce_remote = nonce_remote
self.remaining_tie_lifetime = remaining_tie_lifetime
reserved = 0
major_version = encoding.constants.protocol_major_version
pre = struct.pack("!BBBB", reserved, major_version, self.outer_key_id,
self.outer_fingerprint_len)
self.outer_sec_env_header = pre + self.outer_fingerprint + post
def update_origin_sec_env_header(self, origin_key):
if origin_key:
self.origin_key_id = origin_key.key_id
self.origin_fingerprint = origin_key.padded_digest([self.encoded_protocol_packet])
self.origin_fingerprint_len = len(self.origin_fingerprint) // 4
else:
self.origin_key_id = 0
self.origin_fingerprint = b''
self.origin_fingerprint_len = 0
byte1 = (self.origin_key_id >> 16) & 0xff
byte2 = (self.origin_key_id >> 8) & 0xff
byte3 = self.origin_key_id & 0xff
pre = struct.pack("!BBBB", byte1, byte2, byte3, self.origin_fingerprint_len)
self.origin_sec_env_header = pre + self.origin_fingerprint
def ipv4_prefix_tup(ipv4_prefix):
return (ipv4_prefix.address, ipv4_prefix.prefixlen)
def ipv6_prefix_tup(ipv6_prefix):
return (ipv6_prefix.address, ipv6_prefix.prefixlen)
def ip_prefix_tup(ip_prefix):
assert (ip_prefix.ipv4prefix is None) or (ip_prefix.ipv6prefix is None)
assert (ip_prefix.ipv4prefix is not None) or (ip_prefix.ipv6prefix is not None)
if ip_prefix.ipv4prefix:
return (4, ipv4_prefix_tup(ip_prefix.ipv4prefix))
return (6, ipv6_prefix_tup(ip_prefix.ipv6prefix))
def tie_id_tup(tie_id):
return (tie_id.direction, tie_id.originator, tie_id.tietype, tie_id.tie_nr)
def tie_header_tup(tie_header):
return (tie_header.tieid, tie_header.seq_nr,
tie_header.origination_time)
def link_id_pair_tup(link_id_pair):
return (link_id_pair.local_id, link_id_pair.remote_id)
def timestamp_tup(timestamp):
return (timestamp.AS_sec, timestamp.AS_nsec)
def add_missing_methods_to_thrift():
# See http://bit.ly/thrift-missing-hash for details about why this is needed
common.ttypes.IPv4PrefixType.__hash__ = (
lambda self: hash(ipv4_prefix_tup(self)))
common.ttypes.IPv4PrefixType.__eq__ = (
lambda self, other: ipv4_prefix_tup(self) == ipv4_prefix_tup(other))
common.ttypes.IPv6PrefixType.__hash__ = (
lambda self: hash(ipv6_prefix_tup(self)))
common.ttypes.IPv6PrefixType.__eq__ = (
lambda self, other: ipv6_prefix_tup(self) == ipv6_prefix_tup(other))
common.ttypes.IPPrefixType.__hash__ = (
lambda self: hash(ip_prefix_tup(self)))
common.ttypes.IPPrefixType.__eq__ = (
lambda self, other: ip_prefix_tup(self) == ip_prefix_tup(other))
common.ttypes.IPPrefixType.__str__ = ip_prefix_str
common.ttypes.IPPrefixType.__lt__ = (
lambda self, other: ip_prefix_tup(self) < ip_prefix_tup(other))
common.ttypes.IEEE802_1ASTimeStampType.__hash__ = (
lambda self: hash(timestamp_tup(self)))
common.ttypes.IEEE802_1ASTimeStampType.__eq__ = (
lambda self, other: timestamp_tup(self) == timestamp_tup(other))
encoding.ttypes.TIEID.__hash__ = (
lambda self: hash(tie_id_tup(self)))
encoding.ttypes.TIEID.__eq__ = (
lambda self, other: tie_id_tup(self) == tie_id_tup(other))
encoding.ttypes.TIEID.__lt__ = (
lambda self, other: tie_id_tup(self) < tie_id_tup(other))
encoding.ttypes.TIEHeader.__hash__ = (
lambda self: hash(tie_header_tup(self)))
encoding.ttypes.TIEHeader.__eq__ = (
lambda self, other: tie_header_tup(self) == tie_header_tup(other))
encoding.ttypes.TIEHeaderWithLifeTime.__hash__ = (
lambda self: hash((tie_header_tup(self.header), self.remaining_lifetime)))
encoding.ttypes.TIEHeaderWithLifeTime.__eq__ = (
lambda self, other: (tie_header_tup(self.header) == tie_header_tup(other.header)) and
self.remaining_lifetime == other.remaining_lifetime)
encoding.ttypes.LinkIDPair.__hash__ = (
lambda self: hash(link_id_pair_tup(self)))
encoding.ttypes.LinkIDPair.__eq__ = (
lambda self, other: link_id_pair_tup(self) == link_id_pair_tup(other))
encoding.ttypes.LinkIDPair.__hash__ = (
lambda self: hash(link_id_pair_tup(self)))
encoding.ttypes.LinkIDPair.__lt__ = (
lambda self, other: link_id_pair_tup(self) < link_id_pair_tup(other))
def encode_protocol_packet(protocol_packet, origin_key):
packet_info = PacketInfo()
packet_info.protocol_packet = protocol_packet
if protocol_packet.content.lie:
packet_info.packet_type = constants.PACKET_TYPE_LIE
elif protocol_packet.content.tie:
packet_info.packet_type = constants.PACKET_TYPE_TIE
elif protocol_packet.content.tide:
packet_info.packet_type = constants.PACKET_TYPE_TIDE
elif protocol_packet.content.tire:
packet_info.packet_type = constants.PACKET_TYPE_TIRE
reencode_packet_info(packet_info, origin_key)
return packet_info
def reencode_packet_info(packet_info, origin_key):
# Since Thrift does not support unsigned integer, we need to "fix" unsigned integers to be
# encoded as signed integers.
# We have to make a deep copy of the non-encoded packet, but this "fixing" involves changing
# various fields in the non-encoded packet from the range (0...MAX_UNSIGNED_INT) to
# (MIN_SIGNED_INT...MAX_SIGNED_INT) for various sizes of integers.
# For the longest time, I tried to avoid making a deep copy of the non-encoded packets, at least
# for some of the packets. For transient messages (e.g. LIEs) that is easier than for persistent
# messages (e.g. TIE which are stored in the database, or TIDEs which are encoded once and sent
# multiple times). However, in the end this turned out to be impossible or at least a
# bountiful source of bugs, because transient messages contain direct or indirect references
# to persistent objects. So, I gave up, and now always do a deep copy of the message to be
# encoded.
protocol_packet = packet_info.protocol_packet
fixed_protocol_packet = copy.deepcopy(protocol_packet)
fix_prot_packet_before_encode(fixed_protocol_packet)
transport_out = thrift.transport.TTransport.TMemoryBuffer()
protocol_out = thrift.protocol.TBinaryProtocol.TBinaryProtocol(transport_out)
fixed_protocol_packet.write(protocol_out)
packet_info.encoded_protocol_packet = transport_out.getvalue()
# If it is a TIE, update the origin security header. We do this here since it only needs to be
# done once when the packet is encoded. However, for the envelope header and for the outer
# security header it is up to the caller to call the corresponding update function before
# sending out the encoded message:
# * The envelope header must be updated each time the packet number changes
# * The outer security header must be updated each time a nonce or the remaining TIE lifetime
# changes.
if protocol_packet.content.tie:
packet_info.update_origin_sec_env_header(origin_key)
return packet_info
def decode_message(rx_intf, from_info, message, active_outer_key, accept_outer_keys,
active_origin_key, accept_origin_keys):
packet_info = PacketInfo()
record_source_info(packet_info, rx_intf, from_info)
continue_offset = decode_envelope_header(packet_info, message)
if continue_offset == -1:
return packet_info
continue_offset = decode_outer_security_header(packet_info, message, continue_offset)
if continue_offset == -1:
return packet_info
if packet_info.remaining_tie_lifetime != 0xffffffff:
continue_offset = decode_origin_security_header(packet_info, message, continue_offset)
if continue_offset == -1:
return packet_info
continue_offset = decode_protocol_packet(packet_info, message, continue_offset)
if continue_offset == -1:
return packet_info
if not check_outer_fingerprint(packet_info, active_outer_key, accept_outer_keys):
return packet_info
if not check_origin_fingerprint(packet_info, active_origin_key, accept_origin_keys):
return packet_info
return packet_info
def set_lifetime(packet_info, lifetime):
packet_info.remaining_tie_lifetime = lifetime
def record_source_info(packet_info, rx_intf, from_info):
packet_info.rx_intf = rx_intf
if from_info:
if len(from_info) == 2:
packet_info.address_family = constants.ADDRESS_FAMILY_IPV4
packet_info.from_addr_port_str = "from {}:{}".format(from_info[0], from_info[1])
else:
assert len(from_info) == 4
packet_info.address_family = constants.ADDRESS_FAMILY_IPV6
packet_info.from_addr_port_str = "from [{}]:{}".format(from_info[0], from_info[1])
def decode_envelope_header(packet_info, message):
if len(message) < 4:
packet_info.error = packet_info.ERR_MSG_TOO_SHORT
packet_info.error_details = "Missing magic and packet number"
return -1
(magic, packet_nr) = struct.unpack("!HH", message[0:4])
if magic != RIFT_MAGIC:
packet_info.error = packet_info.ERR_WRONG_MAGIC
packet_info.error_details = "Expected 0x{:x}, got 0x{:x}".format(RIFT_MAGIC, magic)
return -1
packet_info.env_header = message[0:4]
packet_info.packet_nr = packet_nr
return 4
def decode_outer_security_header(packet_info, message, offset):
start_header_offset = offset
message_len = len(message)
if offset + 4 > message_len:
packet_info.error = packet_info.ERR_MSG_TOO_SHORT
packet_info.error_details = \
"Missing major version, outer key id and outer fingerprint length"
return -1
(_reserved, major_version, outer_key_id, outer_fingerprint_len) = \
struct.unpack("!BBBB", message[offset:offset+4])
offset += 4
expected_major_version = encoding.constants.protocol_major_version
if major_version != expected_major_version:
packet_info.error = packet_info.ERR_WRONG_MAJOR_VERSION
packet_info.error_details = ("Expected {}, got {}"
.format(expected_major_version, major_version))
return -1
outer_fingerprint_len *= 4
if offset + outer_fingerprint_len > message_len:
packet_info.error | |
<filename>melina.py
#!/usr/bin/env python
import os
import sys
import shlex
import argparse
import enum
import re
import io
import lxml.etree as ET
import decimal
__version__ = '0.1'
'''
Mo
name
children
fields (Struct, Enum, Scalar)
Field
name
cardinality
type
Struct
name
fields (...)
Enum
name
enumerators (...)
Int
Float
String
'''
def _indent(text, value):
return '\n'.join(x and (' ' * value + x) or '' for x in text.split('\n'))
def _sanitize(obj, classes, maybe = False):
if maybe:
if obj == None:
return
assert isinstance(obj, classes), 'Object %s is not an instance of expected classes: %s' % (repr(obj), classes)
return obj
def _sanitize_list(lst, classes, maybe = False):
if maybe:
if lst == None:
return
for obj in lst:
assert isinstance(obj, classes), 'List element is an instance of unexpected class'
return lst
_re_identifier = re.compile('^[a-zA-Z_][a-zA-Z0-9_]*$')
def _sanitize_identifier(name):
assert _re_identifier.match(name), 'String does not represent a valid identifier'
return name
_re_enumerator_identifier = re.compile('^[a-zA-Z0-9_]+$')
def _sanitize_enumerator_identifier(name):
assert _re_enumerator_identifier.match(name), 'String does not represent a valid identifier'
return name
def _add_to_1st_line(text, doc):
lines = text.splitlines()
lines[0] += ' // ' + doc
out = ''.join((line + '\n' for line in lines))
return out
class TranslationUnit(object):
def __init__(self, header, mos):
self.header = _sanitize(header, Header)
self.mos = _sanitize_list(mos, Mo)
def __repr__(self):
return '<TranslationUnit with %s mos>' % len(self.mos)
def __str__(self):
return '%s\n%s' % (str(self.header), ''.join((str(mo) for mo in self.mos)))
class Header(object):
attributes = ('pdmeta', 'domain', 'product', 'release', 'version', 'revision')
def __init__(self, pdmeta = '', domain = '', product = '', release = '', version = '', revision = ''):
self.pdmeta = _sanitize(pdmeta, str)
self.domain = _sanitize(domain, str)
self.product = _sanitize(product, str)
self.release = _sanitize(release, str)
self.version = _sanitize(version, str)
self.revision = _sanitize(revision, str)
def __repr__(self):
return '<Header>'
def __str__(self):
if self.pdmeta is not None:
return ', '.join(('%s: "%s"' % (name, getattr(self, name)) for name in self.attributes))
else:
return ''
class Mo(object):
default_flags = [False, True, True, True]
def __init__(self, name, fields, children, doc, flags):
self.name = _sanitize_identifier(name)
self.fields = _sanitize_list(fields, Field)
self.children = _sanitize_list(children, MoChild)
self.doc = _sanitize(doc, str, maybe=True)
self.flags = _sanitize_list(flags, bool, maybe=True)
if flags is not None:
assert len(flags) == 4
def __repr__(self):
return '<Mo %s>' % self.name
def __str__(self):
text = 'mo'
if self.flags:
text += '(%s)' % ''.join((char for flag, char in zip(self.flags, 'hcud') if flag))
text += (
' %s' % self.name + ':' + ''.join((' ' + str(x) for x in self.children)) + '\n' +
_indent(''.join((str(field) for field in self.fields)), 4)
)
if self.doc:
return _add_to_1st_line(text, self.doc)
return text
class MoChild(object):
def __init__(self, name, max_count):
self.name = _sanitize_identifier(name)
self.max_count = _sanitize(max_count, (int, long), maybe=True)
if max_count is not None:
assert max_count >= 1
def __repr__(self):
return '<MoChild %s>' % self.name
def __str__(self):
if self.max_count is not None:
return '%s(%s)' % (self.name, self.max_count)
else:
return self.name
class Field(object):
def __init__(self, name, type_, cardinality, doc):
self.name = _sanitize_identifier(name)
self.type = _sanitize(type_, (Struct, Enum, Scalar))
self.cardinality = _sanitize(cardinality, Cardinality)
self.doc = _sanitize(doc, str, maybe=True)
def __repr__(self):
return '<Field %s>' % self.name
def __str__(self):
if isinstance(self.type, Scalar):
opts = self.type.options
if opts:
opts = ' ' + opts
text = '%s %s %s%s\n' % (self.cardinality, self.type, self.name, opts)
else:
text = '%s %s' % (self.cardinality, self.type)
if self.doc:
return _add_to_1st_line(text, self.doc)
return text
class Cardinality(object):
def __init__(self, kind, max_count = None):
self.kind = _sanitize(kind, CardinalityKind)
self.max_count = _sanitize(max_count, (int, long), maybe=True)
if max_count is not None:
assert kind == CardinalityKind.REPEATED
assert max_count > 1
def __repr__(self):
return '<Cardinality %s>' % self.kind.name.lower()
def __str__(self):
if self.kind == CardinalityKind.REPEATED and self.max_count:
return 'repeated(%s)' % self.max_count
else:
return self.kind.name.lower()
class CardinalityKind(enum.Enum):
REQUIRED = 0
OPTIONAL = 1
REPEATED = 2
class Struct(object):
def __init__(self, name, fields):
self.name = _sanitize_identifier(name)
self.fields = _sanitize_list(fields, Field)
def __repr__(self):
return '<Struct %s>' % self.name
def __str__(self):
return (
'struct %s\n' % self.name +
_indent(''.join((str(field) for field in self.fields)), 4)
)
class Enum(object):
def __init__(self, name, enumerators, default = None):
self.name = _sanitize_identifier(name)
self.enumerators = _sanitize_list(enumerators, Enumerator)
self.default = _sanitize(default, (int, long), maybe=True)
if default is not None:
assert default in ((x.value for x in enumerators))
def __repr__(self):
return '<Enum %s>' % self.name
def __str__(self):
return (
'enum %s%s\n' % (self.name, ' [default = %s]' % self.default if self.default is not None else '') +
_indent(''.join((str(enumer) for enumer in self.enumerators)), 4)
)
class Enumerator(object):
def __init__(self, name, value):
self.name = _sanitize_enumerator_identifier(name)
self.value = _sanitize(value, (int, long))
def __repr__(self):
return '<Enumerator %s>' % self.name
def __str__(self):
return '%s = %s\n' % (self.name, self.value)
class Scalar(object):
def __repr__(self):
return '<%s>' % self.__class__.__name__
def __str__(self):
return '%s' % self.__class__.__name__.lower()
class Bool(Scalar):
def __init__(self, default = None):
self.default = _sanitize(default, bool, maybe=True)
@property
def defaultstr(self):
if self.default is not None:
return 'true' if self.default else 'false'
else:
return ''
@property
def options(self):
if self.default is not None:
return '[default = %s]' % self.defaultstr
else:
return ''
class Int(Scalar):
def __init__(self, minval, maxval, step, default = None, units = None):
self.minval = _sanitize(minval, (int, long, decimal.Decimal), maybe=True)
self.maxval = _sanitize(maxval, (int, long, decimal.Decimal), maybe=True)
self.step = _sanitize(step, (int, long, decimal.Decimal), maybe=True)
self.default = _sanitize(default, (int, long, decimal.Decimal), maybe=True)
self.units = _sanitize(units, str, maybe=True)
if minval is not None or maxval is not None:
if step is None:
assert isinstance(minval, (int, long))
assert isinstance(maxval, (int, long))
else:
assert minval is not None
assert maxval is not None
assert step != 0
assert minval <= maxval
if default is not None:
if step is None:
assert isinstance(default, (int, long))
@staticmethod
def dectostr(dec):
decstr = str(dec)
if 'E' in decstr:
exp = int(str(dec).split('E')[1])
if exp < 0:
return '%%.%sf' % abs(exp) % dec
else:
return '%f' % dec
else:
return decstr
@property
def minvalstr(self):
return self.dectostr(self.minval)
@property
def maxvalstr(self):
return self.dectostr(self.maxval)
@property
def stepstr(self):
return self.dectostr(self.step)
@property
def defaultstr(self):
if self.default is not None:
return self.dectostr(self.default)
else:
return ''
@property
def options(self):
opts = []
if self.default is not None:
opts.append('default = %s' % self.defaultstr)
if self.units is not None:
opts.append('units = "%s"' % self.units)
if opts:
return '[%s]' % ', '.join(opts)
else:
return ''
def __str__(self):
if self.step is not None:
return 'int(%s, %s, %s)' % (self.minvalstr, self.maxvalstr, self.stepstr)
elif self.minval is not None:
return 'int(%s..%s)' % (self.minvalstr, self.maxvalstr)
else:
return 'int'
class String(Scalar):
def __init__(self, minlen, maxlen, default = None):
self.minlen = _sanitize(minlen, (int, long), maybe=True)
self.maxlen = _sanitize(maxlen, (int, long), maybe=True)
self.default = _sanitize(default, str, maybe=True)
if minlen is not None or maxlen is not None:
assert 0 <= minlen
assert 0 <= maxlen
assert minlen <= maxlen
if default is not None:
assert minlen <= len(default) <= maxlen
else:
assert minlen is None
assert maxlen is None
@property
def options(self):
if self.default is not None:
return '[default = "%s"]' % self.default
else:
return ''
def __str__(self):
out = 'string'
if self.minlen is not None:
out += '(%s..%s)' % (self.minlen, self.maxlen)
return out
def _line(text, pos):
return text.count('\n', 0, pos) + 1
def _col(text, pos):
return pos - text.rfind('\n', 0, pos)
class MetaSpan(object):
__slots__ = ('input', 'start', 'end')
def __init__(self, input, start, end=None):
self.input = input
self.start = start
self.end = end
@property
def start_linecol(self):
return (_line(self.input, self.start), _col(self.input, self.start))
@property
def start_repr(self):
return '%s:%s' % self.start_linecol
@property
def start_line(self):
return self.input.splitlines()[self.start_linecol[0] - 1]
@property
def end_linecol(self):
return (_line(self.input, self.end), _col(self.input, self.end))
@property
def end_repr(self):
return '%s:%s' % self.end_linecol
def __repr__(self):
return '%s:%s-%s:%s' % (self.start_linecol + self.end_linecol)
class MetaTokenKind(enum.Enum):
KEYW = 0 # mo, struct, enum, repeated, optional, int, float, string
NAME = 1 # [_a-zA-Z][_a-zA-Z0-9]*
STRING = 2 # ".*"
NUMBER = 3 # [0-9]*
FLOAT = 4 # [0-9]*\.[0-9]*
NUMNAME = 5 # [_a-zA-Z0-9]+
LCB = 6 # {
RCB = 7 # }
LSB = 8 # [
RSB = 9 # ]
LP = 10 # (
RP = 11 # )
SEMI = 12 # ;
COMMA = 13 # ,
ASSIGN = 14 # =
ARROW = 15 # ->
TWODOT = 16 # ..
COMMENT = 17 # '//\n', '/**/' <ignored, stored>
END = 18
class MetaToken(object):
__slots__ = ('kind', 'value', 'span', 'string')
kind2oper = {
MetaTokenKind.LCB: '{',
MetaTokenKind.RCB: '}',
MetaTokenKind.LSB: '[',
MetaTokenKind.RSB: ']',
MetaTokenKind.LP: '(',
MetaTokenKind.RP: ')',
MetaTokenKind.SEMI: ';',
| |
<reponame>brianrodri/google_appengine
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Bulkloader Config Parser and runner.
A library to read bulkloader yaml configs.
The code to interface between the bulkloader tool and the various connectors
and conversions.
"""
import copy
import os
import sys
from google.appengine.api import datastore
from google.appengine.ext.bulkload import bulkloader_errors
from google.appengine.ext.bulkload import bulkloader_parser
from google.appengine.ext.bulkload import csv_connector
from google.appengine.ext.bulkload import simpletext_connector
from google.appengine.ext.bulkload import simplexml_connector
CONNECTOR_FACTORIES = {
'csv': csv_connector.CsvConnector.create_from_options,
'simplexml': simplexml_connector.SimpleXmlConnector.create_from_options,
'simpletext': simpletext_connector.SimpleTextConnector.create_from_options,
}
class BulkloadState(object):
"""Encapsulates state which is passed to other methods used in bulk loading.
It is optionally passed to import/export transform functions.
It is passed to connector objects.
Properties:
filename: The filename flag passed on the command line.
loader_opts: The loader_opts flag passed on the command line.
exporter_opts: The exporter_opts flag passed on the command line.
current_instance: The current entity or model instance.
current_entity: On export, the current entity instance.
current_dictionary: The current input or output dictionary.
"""
def __init__(self):
self.filename = ''
self.loader_opts = None
self.exporter_opts = None
self.current_instance = None
self.current_entity = None
self.current_dictionary = None
def default_export_transform(value):
"""A default export transform if nothing else is specified.
We assume most export connectors are string based, so a string cast is used.
However, casting None to a string leads to 'None', so that's special cased.
Args:
value: A value of some type.
Returns:
unicode(value), or u'' if value is None
"""
if value is None:
return u''
else:
return unicode(value)
class DictConvertor(object):
"""Convert a dict to an App Engine model instance or entity. And back.
The constructor takes a transformer spec representing a single transformer
in a bulkloader.yaml.
The DictConvertor object has two public methods, dict_to_entity and
entity_to_dict, which do the conversion between a neutral dictionary (the
input/output of a connector) and an entity based on the spec.
Note that the model class may be used instead of an entity during the
transform--this adds extra validation, etc, but also has a performance hit.
"""
def __init__(self, transformer_spec):
"""Constructor. See class docstring for more info.
Args:
transformer_spec: A single transformer from a parsed bulkloader.yaml.
This assumes that the transformer_spec is valid. It does not
double check things like use_model_on_export requiring model.
"""
self._transformer_spec = transformer_spec
self._create_key = None
for prop in self._transformer_spec.property_map:
if prop.property == '__key__':
self._create_key = prop
def dict_to_entity(self, input_dict, bulkload_state):
"""Transform the dict to a model or entity instance(s).
Args:
input_dict: Neutral input dictionary describing a single input record.
bulkload_state: bulkload_state object describing the state.
Returns:
Entity or model instance, or collection of entity or model instances,
to be uploaded.
"""
bulkload_state_copy = copy.copy(bulkload_state)
bulkload_state_copy.current_dictionary = input_dict
instance = self.__create_instance(input_dict, bulkload_state_copy)
bulkload_state_copy.current_instance = instance
self.__run_import_transforms(input_dict, instance, bulkload_state_copy)
if self._transformer_spec.post_import_function:
post_map_instance = self._transformer_spec.post_import_function(
input_dict, instance, bulkload_state_copy)
return post_map_instance
return instance
def entity_to_dict(self, entity, bulkload_state):
"""Transform the entity to a dict, possibly via a model.
Args:
entity: An entity.
bulkload_state: bulkload_state object describing the global state.
Returns:
A neutral output dictionary describing the record to write to the
output.
In the future this may return zero or multiple output dictionaries.
"""
if self._transformer_spec.use_model_on_export:
instance = self._transformer_spec.model.from_entity(entity)
else:
instance = entity
export_dict = {}
bulkload_state.current_entity = entity
bulkload_state.current_instance = instance
bulkload_state.current_dictionary = export_dict
self.__run_export_transforms(instance, export_dict, bulkload_state)
if self._transformer_spec.post_export_function:
post_export_result = self._transformer_spec.post_export_function(
instance, export_dict, bulkload_state)
return post_export_result
return export_dict
def __dict_to_prop(self, transform, input_dict, bulkload_state):
"""Handle a single property on import.
Args:
transform: The transform spec for this property.
input_dict: Neutral input dictionary describing a single input record.
bulkload_state: bulkload_state object describing the global state.
Returns:
The value for this particular property.
"""
if transform.import_template:
value = transform.import_template % input_dict
else:
value = input_dict.get(transform.external_name)
if transform.import_transform:
if transform.import_transform.supports_bulkload_state:
value = transform.import_transform(value, bulkload_state=bulkload_state)
else:
value = transform.import_transform(value)
return value
def __create_instance(self, input_dict, bulkload_state):
"""Return a model instance or entity from an input_dict.
Args:
input_dict: Neutral input dictionary describing a single input record.
bulkload_state: bulkload_state object describing the global state.
Returns:
Entity or model instance, or collection of entity or model instances,
to be uploaded.
"""
key = None
if self._create_key:
key = self.__dict_to_prop(self._create_key, input_dict, bulkload_state)
if isinstance(key, (int, long)):
key = datastore.Key.from_path(self._transformer_spec.kind, key)
if self._transformer_spec.model:
if isinstance(key, datastore.Key):
return self._transformer_spec.model(key=key)
else:
return self._transformer_spec.model(key_name=key)
else:
if isinstance(key, datastore.Key):
parent = key.parent()
if key.name() is None:
return datastore.Entity(self._transformer_spec.kind,
parent=parent, id=key.id())
else:
return datastore.Entity(self._transformer_spec.kind,
parent=parent, name=key.name())
elif self._transformer_spec.model:
return self._transformer_spec.model()
return datastore.Entity(self._transformer_spec.kind, name=key)
def __run_import_transforms(self, input_dict, instance, bulkload_state):
"""Fill in a single entity or model instance from an input_dict.
Args:
input_dict: Input dict from the connector object.
instance: Entity or model instance to fill in.
bulkload_state: Passed bulkload state.
"""
for transform in self._transformer_spec.property_map:
if transform.property == '__key__':
continue
value = self.__dict_to_prop(transform, input_dict, bulkload_state)
if self._transformer_spec.model:
setattr(instance, transform.property, value)
else:
instance[transform.property] = value
def __prop_to_dict(self, value, property_name, transform, export_dict,
bulkload_state):
"""Transform a single export-side field value to dict property.
Args:
value: Value from the entity or model instance.
property_name: Name of the value in the entity or model instance.
transform: Transform property, either an ExportEntry or PropertyEntry
export_dict: output dictionary.
bulkload_state: Passed bulkload state.
Raises:
ErrorOnTransform, encapsulating an error encountered during the transform.
"""
if transform.export_transform:
try:
if transform.export_transform.supports_bulkload_state:
transformed_value = transform.export_transform(
value, bulkload_state=bulkload_state)
else:
transformed_value = transform.export_transform(value)
except Exception as err:
raise bulkloader_errors.ErrorOnTransform(
'Error on transform. '
'Property: %s External Name: %s. Code: %s Details: %s' %
(property_name, transform.external_name, transform.export_transform,
err))
else:
transformed_value = default_export_transform(value)
export_dict[transform.external_name] = transformed_value
def __run_export_transforms(self, instance, export_dict, bulkload_state):
"""Fill in export_dict for an entity or model instance.
Args:
instance: Entity or model instance
export_dict: output dictionary.
bulkload_state: Passed bulkload state.
"""
for transform in self._transformer_spec.property_map:
if transform.property == '__key__':
value = instance.key()
elif self._transformer_spec.use_model_on_export:
value = getattr(instance, transform.property, transform.default_value)
else:
value = instance.get(transform.property, transform.default_value)
if transform.export:
for prop in transform.export:
self.__prop_to_dict(value, transform.property, prop, export_dict,
bulkload_state)
elif transform.external_name:
self.__prop_to_dict(value, transform.property, transform, export_dict,
bulkload_state)
class GenericImporter(object):
"""Generic Bulkloader import class for input->dict->model transformation.
The bulkloader will call generate_records and create_entity, and
we'll delegate those to the passed in methods.
"""
def __init__(self, import_record_iterator, dict_to_entity, name,
reserve_keys):
"""Constructor.
Args:
import_record_iterator: Method which yields neutral dictionaries.
dict_to_entity: Method dict_to_entity(input_dict) returns model or entity
instance(s).
name: Name to register with the bulkloader importers (as 'kind').
reserve_keys: Method ReserveKeys(keys) which will advance the id
sequence in the datastore beyond each key.id(). Can be None.
"""
self.import_record_iterator = import_record_iterator
self.dict_to_entity = dict_to_entity
self.kind = name
self.bulkload_state = BulkloadState()
self.reserve_keys = reserve_keys
self.keys_to_reserve = []
def get_keys_to_reserve(self):
"""Required as part of the bulkloader Loader interface.
At the moment, this is not actually used by the bulkloader for import;
instead we will reserve keys if necessary in finalize.
Returns:
List of keys to reserve, currently always [].
"""
return []
def initialize(self, filename, loader_opts):
"""Performs initialization. Merely records the values for later use.
Args:
filename: The string given as the --filename flag argument.
loader_opts: The string given as the --loader_opts flag argument.
"""
self.bulkload_state.loader_opts = loader_opts
self.bulkload_state.filename = filename
def finalize(self):
"""Performs finalization actions after the upload completes.
If keys with numeric ids were used on import, this will call AllocateIds
to ensure that autogenerated IDs will not raise exceptions on conflict
with uploaded entities.
"""
if self.reserve_keys:
self.reserve_keys(self.keys_to_reserve)
def generate_records(self, filename):
"""Iterator yielding neutral dictionaries from the connector object.
Args:
filename: Filename argument passed in on the command line.
Returns:
Iterator yielding neutral dictionaries, later passed to create_entity.
"""
return self.import_record_iterator(filename, self.bulkload_state)
def generate_key(self, line_number, unused_values):
"""Bulkloader method to generate keys, mostly unused here.
This is called by the bulkloader just before it calls create_entity. The
line_number is returned to be passed to the record dict, but | |
If no label information
is available, this will only be the style part of the code.
"""
length = len(data)
code = np.ndarray((length, self.code_dim))
offset = 0
batches = self._np_batcher(data, batch_size=batch_size)
code_batches = \
self._tf_encode_batches(batches, tf_encoded=self._tf_encoded_style)
for batch in code_batches:
end = offset + len(batch)
code[offset: end] = batch
offset = end
if labels is None:
return code
labels_one_hot = self.one_hot(labels, length=length)
return np.concatenate((code, labels_one_hot), axis=1)
def decode(self, code: np.ndarray, batch_size: int = 128,
labels: Optional[np.ndarray] = None) -> np.ndarray:
"""Decode given code values into the data space using the decoder
part of the autoencoder.
Arguments
---------
code:
The codes to be decoded.
Result
------
data:
The reconstructed data.
"""
if code.shape[1] == self.code_dim:
if labels is None:
raise ValueError("The supervised autoencoder needs explicit "
"label information for decoding.")
labels_one_hot = self.one_hot(labels, length=len(code))
code = np.concatenate((code, labels_one_hot), axis=1)
return super().decode(code, batch_size=batch_size)
def recode(self, data: np.ndarray, batch_size: int = 128,
labels: Optional[np.ndarray] = None) -> np.ndarray:
"""Reconstruct data values using the autoencoder, that is first
encode the data and the decode it back into the data space.
Arguments
---------
data:
The data to be recoded.
Result
------
recoded:
The reconstructed data.
"""
# FIXME[hack]: just encode + decode. A full tensorflow
# recoding would probably be more efficient
code = self.encode(data, batch_size=batch_size, labels=labels)
return self.decode(code, batch_size=batch_size)
def _prepare_tensors(self) -> None:
"""Setup TensorFlow properties for the supervised adversarial
autoencoder.
Prepare the tensorflow network (computational graph) realizing
the supervised adversarial autoencoder.
The constructed graph looks as follows:
[inputs] [labels]
| |
inputs_flat |
| |
(encoder) | |
V |
*encoded_style* | [prior_style] [prior_label]
| | | |
+------------------+ +------+-----+
| |
V V
*encoded* prior
| |
+----------------+ |
(decoder) | | |
| V V
V D_fake_logits D_real_logits
*decoded* | | |
| V V V
+-----+ G_loss D_loss_fake D_loss_true
| | | |
[outputs] | V +-------+------+
| | *loss_generator* V
outputs_flat | *loss_discriminator*
V V
*loss_reconstruction*
[...]: input values (tf.placeholder)
inputs
outputs
code
prior_style
prior_label
*...*: output values (tf.tensor)
encoded: encoder output (just style, no label)
decoded: decoder output (flat data)
Tensorflow properties are prefixed with '_tf_'.
"""
# Super class sets up a standard autoencoder from placeholders
# 'inputs', 'outputs', providing 'encoded', 'decoded' and
# 'loss_reconstruction'.
super()._prepare_tensors()
#
# The discriminators
#
prior = \
tf.concat([self._tf_prior_style, self._tf_prior_label], axis=1)
discriminator_real_logits = \
self._tf_discriminator(prior, self._tf_keep_prob)
discriminator_fake_logits = \
self._tf_discriminator(self._tf_encoded, self._tf_keep_prob)
discriminator_fake_labels = tf.zeros_like(discriminator_fake_logits)
discriminator_loss_fake = tf.nn.\
sigmoid_cross_entropy_with_logits(logits=discriminator_fake_logits,
labels=discriminator_fake_labels)
discriminator_real_labels = tf.ones_like(discriminator_real_logits)
discriminator_loss_true = tf.nn.\
sigmoid_cross_entropy_with_logits(logits=discriminator_real_logits,
labels=discriminator_real_labels)
generator_fake_labels = tf.ones_like(discriminator_fake_logits)
generator_loss = tf.nn.\
sigmoid_cross_entropy_with_logits(logits=discriminator_fake_logits,
labels=generator_fake_labels)
self._tf_loss_discriminator = \
tf.reduce_mean(discriminator_loss_fake) + \
tf.reduce_mean(discriminator_loss_true)
self._tf_loss_generator = tf.reduce_mean(generator_loss)
def _tf_encoder(self, data, keep_prob):
"""Encoder for an autoencoder.
"""
self._tf_encoded_style = super()._tf_encoder(data, keep_prob)
return tf.concat([self._tf_encoded_style, self._tf_labels], axis=1)
def train(self,
labeled_train_data: Datasource,
labeled_display_data: Datasource,
sess: tf.Session, saver) -> None:
"""Train the network.
"""
# display_data: (images, images_noised, labels)
# A batch of data used for plotting intermediate results
# during training (taken from the validation dataset)
# Each is a numpy array of length 100 and appropriate shape.
display_data = labeled_display_data[:100, ('array', 'array', 'label')]
#
# prepare the optimizers
#
# obtain the variables used in the model
total_vars = tf.trainable_variables()
# var_ae = [var for var in total_vars
# if "encoder" in var.name or "decoder" in var.name]
# Optimizers
self._tf_define_optimizers(total_vars)
#
# Start the training
#
start_epoch = self._tf_initialize_variables(sess, saver)
start_time = time.time()
total_batch = len(labeled_train_data) // self._conf.batch_size
labeled_batch_iterator = \
labeled_train_data(batch_size=self._conf.batch_size, loop=True,
attributes=('array', 'noisy', 'label'))
for epoch in tqdm(range(start_epoch, self._conf.n_epoch),
initial=start_epoch, total=self._conf.n_epoch):
likelihood = 0
discriminator_value = 0
generator_value = 0
# Adapt the learning rate depending on the epoch
lr_value = self._learning_rate_schedule(epoch)
for _batch_idx in tqdm(range(total_batch)):
batch_xs, batch_noised_xs, batch_ys = \
next(labeled_batch_iterator)
# Sample from the prior distribution
prior_style, prior_label_onehot = \
self._sample_prior(self._conf.prior, zdim=self.code_dim,
nclasses=self.nclasses,
batch_size=self._conf.batch_size,
use_label_info=True)
feed_dict = {
self._tf_inputs: batch_noised_xs,
self._tf_outputs: batch_xs,
self._tf_labels: batch_ys,
self._tf_prior_style: prior_style,
self._tf_prior_label: prior_label_onehot,
self._tf_learning_rate: lr_value,
self._tf_keep_prob: self._conf.keep_prob
}
# AutoEncoder phase
self._tf_train_step(sess, feed_dict)
# Summary
likelihood += \
self._loss_reconstruction_value/total_batch
discriminator_value += \
self._loss_discriminator_value/total_batch
generator_value += \
self._loss_generator_value/total_batch
# every 5th epoch (except the last) plot the manifold canvas
if epoch % 5 == 0 or epoch == (self._conf.n_epoch - 1):
name = f"Manifold_canvas_{epoch}"
self.plot_recoded_images(display_data[1],
targets=display_data[0],
labels=display_data[2],
filename=name)
# output end of epoch information
runtime = time.time() - start_time
print(f"Epoch: {epoch:3d}, "
f"global step: {sess.run(self._tf_global_step)}, "
f"Time: {datetime.timedelta(seconds=runtime)}")
print(f" lr_AE: {lr_value:.5f}"
f" loss_AE: {likelihood:.4f} ")
print(f" lr_D: {lr_value/5:.5f}"
f" loss_D: {discriminator_value:.4f}")
print(f" lr_G: {lr_value:.5f}"
f" loss_G: {generator_value:.4f}\n")
if saver is not None:
saver.save(sess, 'checkpoints/my_test_model',
global_step=self._tf_global_step,
write_meta_graph=False)
print(f"Saver: {saver.last_checkpoints}")
class SemisupervisedAdversarialAutoencoder(LabeledAdversarialAutoencoder):
"""Specialized sublasse for a semi-supervised
Adversarial Autoencoder (AAE) implementation.
The main differences to the fully supervised AAE are
the following:
* the encoder (generator) also outputs class labels. That is the latent
representation is split into two parts: the continuous z and the one-hot
encoded label information y.
* there now are two discriminators, one for each part of the latent
representation, and two loss functions training them:
`_loss_discriminator_style` and `_loss_discriminator_label`.
Training the z part requires z value output from the encoder/generator
()
as well as real z values sampled from the target distribution.
Training the y part requires the label output from the encoder/generator
* the produced class labels can be used as additional training objective
to train the encoder (generator) to minimize crossentropy loss, if
ground truth label are available (supervised case). This loss function
is stored under the name `_crossentropy_labels`. The training process
requires input data with real class labels.
"""
def __init__(self, **kwargs) -> None:
super().__init__(**kwargs)
self._conf.model = 'semi_supervised'
#
# data related properties
#
# model related properties
self._style = None
self._crossentropy_labels = None
# loss functions
self._tf_loss_discriminator_label = None
self._tf_loss_discriminator_style = None
# optimizers
self._op_z_discriminator = None
self._op_y_discriminator = None
self._op_generator = None
self._op_crossentropy_labels = None
self._l_z_discriminator = None
self._l_y_discriminator = None
self._l_generator = None
self._crossentropy = None
def _prepare_tensors(self) -> None:
"""Setup TensorFlow properties for the supervised adversarial
autoencoder.
"""
super()._prepare_tensors()
# placeholders
# FIXME[semi]: self._z_cat
# Y_cat = tf.placeholder(dtype=tf.float32,
# shape=[None, n_cls], name="labels_cat")
# labels_cat = self._tf_prior_label
# FIXME[coding]: code duplication
flat_data_length = \
self._data_shape[0] * self._data_shape[1] * self._data_shape[2]
inputs_flat = tf.reshape(self._tf_inputs, [-1, flat_data_length])
outputs_flat = tf.reshape(self._tf_outputs, [-1, flat_data_length])
# the encoder
self._style, labels_softmax = \
self._tf_semi_encoder(inputs_flat, self._tf_keep_prob,
semi_supervised=False)
_, labels_generated = \
self._tf_semi_encoder(inputs_flat, self._tf_keep_prob,
semi_supervised=True)
latent_inputs = tf.concat([self._style, labels_softmax], axis=1)
# the decoder
self._tf_decoded = \
self._tf_semi_decoder(latent_inputs, self._tf_keep_prob)
#
# the discriminators
#
discriminator_label_fake = \
self._tf_semi_y_discriminator(labels_softmax,
self._tf_keep_prob)
discriminator_label_real = \
self._tf_semi_y_discriminator(self._tf_prior_label,
self._tf_keep_prob)
discriminator_style_fake = \
self._tf_semi_z_discriminator(self._style,
self._tf_keep_prob)
discriminator_style_real = \
self._tf_semi_z_discriminator(self._tf_prior_style,
self._tf_keep_prob)
#
# loss functions
#
self._tf_loss_reconstruction = \
tf.reduce_mean(tf.squared_difference(self._tf_decoded,
outputs_flat))
discriminator_label_zeros = tf.zeros_like(discriminator_label_fake)
discriminator_label_ones = tf.ones_like(discriminator_label_real)
discriminator_loss_label_real = tf.nn.\
sigmoid_cross_entropy_with_logits(logits=discriminator_label_real,
labels=discriminator_label_ones)
discriminator_loss_label_fake = tf.nn.\
sigmoid_cross_entropy_with_logits(logits=discriminator_label_fake,
labels=discriminator_label_zeros)
self._tf_loss_discriminator_label = \
tf.reduce_mean(discriminator_loss_label_real) + \
tf.reduce_mean(discriminator_loss_label_fake)
discriminator_style_zeros = tf.zeros_like(discriminator_style_fake)
discriminator_style_ones = tf.ones_like(discriminator_style_real)
discriminator_loss_style_real = tf.nn.\
sigmoid_cross_entropy_with_logits(logits=discriminator_style_real,
labels=discriminator_style_ones)
discriminator_loss_style_fake = tf.nn.\
sigmoid_cross_entropy_with_logits(logits=discriminator_style_fake,
labels=discriminator_style_zeros)
self._tf_loss_discriminator_style = \
tf.reduce_mean(discriminator_loss_style_real) + \
tf.reduce_mean(discriminator_loss_style_fake)
loss_generator_label = tf.nn.\
sigmoid_cross_entropy_with_logits(logits=discriminator_label_fake,
labels=discriminator_label_ones)
loss_generator_style = tf.nn.\
sigmoid_cross_entropy_with_logits(logits=discriminator_style_fake,
labels=discriminator_style_ones)
self._tf_loss_generator = \
tf.reduce_mean(loss_generator_style) + \
tf.reduce_mean(loss_generator_label)
crossentropy_labels = tf.nn.\
softmax_cross_entropy_with_logits(logits=labels_generated,
labels=self._tf_labels)
self._crossentropy_labels = tf.reduce_mean(crossentropy_labels)
def _tf_semi_encoder(self, data, keep_prob, semi_supervised=False):
"""Encoder for semi-supervised AAE.
Arguments
---------
"""
with tf.variable_scope("semi_encoder", reuse=tf.AUTO_REUSE):
net = tf_helper.dense_layer(data, self._conf.semi_n_hidden,
name="dense_1", keep_prob=keep_prob)
net = tf_helper.dense_layer(net, self._conf.semi_n_hidden,
name="dense_2", keep_prob=keep_prob)
style = tf_helper.dense(net, self.code_dim, name="style")
if semi_supervised is False:
labels_generated = \
tf.nn.softmax(tf_helper.dense(net, self.nclasses,
name="labels"))
else:
labels_generated = \
tf_helper.dense(net, self.nclasses,
name="label_logits")
return style, labels_generated
def _tf_semi_decoder(self, code, keep_prob):
"""Decoder for semi-supervised AAE.
Result
------
decoder:
A flat tensor holding the decoded data.
"""
flat_data_length = \
self._data_shape[0] * self._data_shape[1] * self._data_shape[2]
with tf.variable_scope("semi_decoder", reuse=tf.AUTO_REUSE):
net = tf_helper.dense_layer(code, self._conf.semi_n_hidden,
name="dense_1", keep_prob=keep_prob)
net = tf_helper.dense_layer(net, self._conf.semi_n_hidden,
name="dense_2", keep_prob=keep_prob)
net = tf.nn.sigmoid(tf_helper.dense(net, flat_data_length,
name="dense_3"))
return net
def _tf_semi_z_discriminator(self, style, keep_prob):
"""Discriminator for style codes.
"""
with tf.variable_scope("semi_z_discriminator", reuse=tf.AUTO_REUSE):
net = tf_helper.dense_layer(style, self._conf.semi_n_hidden,
name="dense_1", keep_prob=keep_prob)
net = tf_helper.dense_layer(net, self._conf.semi_n_hidden,
name="dense_2", keep_prob=keep_prob)
logits = tf_helper.dense(net, 1, name="dense_3")
return logits
def _tf_semi_y_discriminator(self, label, keep_prob):
"""Discriminator for class labels.
"""
with tf.variable_scope("semi_y_discriminator", reuse=tf.AUTO_REUSE):
net = | |
# -*- coding: utf-8 -*-
"""
This file contains MLTools class and all developed methods.
"""
# Python2 support
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
import pickle
class MLTools(object):
"""
A Python implementation of several methods needed for machine learning
classification/regression.
Attributes:
last_training_pattern (numpy.ndarray): Full path to the package
to test.
has_trained (boolean): package_name str
cv_best_rmse (float): package_name str
"""
def __init__(self):
self.last_training_pattern = []
self.has_trained = False
self.cv_best_rmse = "Not cross-validated"
#################################################
########### Methods to be overridden ############
#################################################
def _local_train(self, training_patterns, training_expected_targets,
params):
"""
Should be overridden.
"""
return None
def _local_test(self, testing_patterns, testing_expected_targets,
predicting):
"""
Should be overridden.
"""
return None
# ########################
# Public Methods
# ########################
def _ml_search_param(self, database, dataprocess, path_filename, save,
cv, min_f):
"""
Should be overridden.
"""
return None
def _ml_print_parameters(self):
"""
Should be overridden.
"""
return None
def _ml_predict(self, horizon=1):
"""
Predict next targets based on previous training.
Arguments:
horizon (int): number of predictions.
Returns:
numpy.ndarray: a column vector containing all predicted targets.
"""
if not self.has_trained:
print("Error: Train before predict.")
return
# Create first new pattern
new_pattern = np.hstack([self.last_training_pattern[2:],
self.last_training_pattern[0]])
# Create a fake target (1)
new_pattern = np.insert(new_pattern, 0, 1).reshape(1, -1)
predicted_targets = np.zeros((horizon, 1))
for t_counter in range(horizon):
te_errors = self.test(new_pattern, predicting=True)
predicted_value = te_errors.predicted_targets
predicted_targets[t_counter] = predicted_value
# Create a new pattern including the actual predicted value
new_pattern = np.hstack([new_pattern[0, 2:],
np.squeeze(predicted_value)])
# Create a fake target
new_pattern = np.insert(new_pattern, 0, 1).reshape(1, -1)
return predicted_targets
def _ml_train(self, training_matrix, params):
"""
wr
"""
training_patterns = training_matrix[:, 1:]
training_expected_targets = training_matrix[:, 0]
training_predicted_targets = \
self._local_train(training_patterns,
training_expected_targets,
params)
training_errors = Error(training_expected_targets,
training_predicted_targets,
regressor_name=self.regressor_name)
# Save last pattern for posterior predictions
self.last_training_pattern = training_matrix[-1, :]
self.has_trained = True
return training_errors
def _ml_test(self, testing_matrix, predicting=False):
""" wr
"""
testing_patterns = testing_matrix[:, 1:]
testing_expected_targets = testing_matrix[:, 0].reshape(-1, 1)
testing_predicted_targets = self._local_test(testing_patterns,
testing_expected_targets,
predicting)
testing_errors = Error(testing_expected_targets,
testing_predicted_targets,
regressor_name=self.regressor_name)
return testing_errors
def _ml_train_iterative(self, database_matrix, params=[],
sliding_window=168, k=1):
"""
Training method used by Fred 09 paper.
"""
# Number of dimension/lags/order
p = database_matrix.shape[1] - 1
# Amount of training/testing procedures
number_iterations = database_matrix.shape[0] + p - k - sliding_window + 1
print("Number of iterations: ", number_iterations)
# Training set size
tr_size = sliding_window - p - 1
# Sum -z_i value to every input pattern, Z = r_t-(p-1)-k
z = database_matrix[0:-k, 1].reshape(-1, 1) * np.ones((1, p))
database_matrix[k:, 1:] = database_matrix[k:, 1:] - z
pr_target = []
ex_target = []
for i in range(number_iterations):
# Train with sliding window training dataset
self._ml_train(database_matrix[k+i:k+i+tr_size-1, :], params)
# Predicted target with training_data - z_i ( r_t+1 )
pr_t = self._ml_predict(horizon=1)
# Sum z_i value to get r'_t+1 = r_t+1 + z_i
pr_t = pr_t[0][0] + z[i, 0]
pr_target.append(pr_t)
# Expected target
ex_target.append(database_matrix[k+i+tr_size, 0])
pr_result = Error(expected=ex_target, predicted=pr_target)
return pr_result
def save_regressor(self, file_name):
"""
Save current classifier/regressor to file_name file.
"""
try:
# First save all class attributes
file = file_name
with open(file, 'wb') as f:
pickle.dump(self, f, protocol=pickle.HIGHEST_PROTOCOL)
except:
print("Error while saving ", file_name)
return
else:
print("Saved model as: ", file_name)
def load_regressor(self, file_name):
"""
Load classifier/regressor to memory.
"""
try:
# First load all class attributes
file = file_name
with open(file, 'rb') as f:
self = pickle.load(f)
except:
print("Error while loading ", file_name)
return
return self
class Error(object):
"""
Error is a class that saves expected and predicted values to calculate
error metrics.
Attributes:
regressor_name (str): Deprecated.
expected_targets (numpy.ndarray): array of expected values.
predicted_targets (numpy.ndarray): array of predicted values.
dict_errors (dict): a dictionary containing all calculated errors
and their values.
"""
available_error_metrics = ["rmse", "mse", "mae", "me", "mpe", "mape",
"std", "hr", "hr+", "hr-", "accuracy"]
def __init__(self, expected, predicted, regressor_name=""):
if type(expected) is list:
expected = np.array(expected)
if type(predicted) is list:
predicted = np.array(predicted)
expected = expected.flatten()
predicted = predicted.flatten()
self.regressor_name = regressor_name
self.expected_targets = expected
self.predicted_targets = predicted
self.dict_errors = {}
for error in self.available_error_metrics:
self.dict_errors[error] = "Not calculated"
def _calc(self, name, expected, predicted):
"""
a
"""
if self.dict_errors[name] == "Not calculated":
if name == "mae":
error = expected - predicted
self.dict_errors[name] = np.mean(np.fabs(error))
elif name == "me":
error = expected - predicted
self.dict_errors[name] = error.mean()
elif name == "mse":
error = expected - predicted
self.dict_errors[name] = (error ** 2).mean()
elif name == "rmse":
error = expected - predicted
self.dict_errors[name] = np.sqrt((error ** 2).mean())
elif name == "mpe":
if np.count_nonzero(expected != 0) == 0:
self.dict_errors[name] = np.nan
else:
# Remove all indexes that have 0, so I can calculate
# relative error
find_zero = expected != 0
_et = np.extract(find_zero, expected)
_pt = np.extract(find_zero, predicted)
relative_error = (_et - _pt) / _et
self.dict_errors[name] = 100 * relative_error.mean()
elif name == "mape":
if np.count_nonzero(expected != 0) == 0:
self.dict_errors[name] = np.nan
else:
# Remove all indexes that have 0, so I can calculate
# relative error
find_zero = expected != 0
_et = np.extract(find_zero, expected)
_pt = np.extract(find_zero, predicted)
relative_error = (_et - _pt) / _et
self.dict_errors[name] = \
100 * np.fabs(relative_error).mean()
elif name == "std":
error = expected - predicted
self.dict_errors[name] = np.std(error)
elif name == "hr":
_c = expected * predicted
if np.count_nonzero(_c != 0) == 0:
self.dict_errors[name] = np.nan
else:
self.dict_errors[name] = np.count_nonzero(_c > 0) / \
np.count_nonzero(_c != 0)
elif name == "hr+":
_a = expected
_b = predicted
if np.count_nonzero(_b > 0) == 0:
self.dict_errors[name] = np.nan
else:
self.dict_errors[name] = \
np.count_nonzero((_a > 0) * (_b > 0)) / \
np.count_nonzero(_b > 0)
elif name == "hr-":
_a = expected
_b = predicted
if np.count_nonzero(_b < 0) == 0:
self.dict_errors[name] = np.nan
else:
self.dict_errors[name] = \
np.count_nonzero((_a < 0) * (_b < 0)) / \
np.count_nonzero(_b < 0)
elif name == "accuracy":
_a = expected.astype(int)
_b = np.round(predicted).astype(int)
self.dict_errors[name] = np.count_nonzero(_a == _b) / _b.size
else:
print("Error:", name,
"- Invalid error or not available to calculate.")
return
def calc_metrics(self):
"""
Calculate all error metrics.
Available error metrics are "rmse", "mse", "mae", "me", "mpe",
"mape", "std", "hr", "hr+", "hr-" and "accuracy".
"""
for error in sorted(self.dict_errors.keys()):
self._calc(error, self.expected_targets, self.predicted_targets)
def print_errors(self):
"""
Print all errors metrics.
Note:
For better printing format, install :mod:`prettytable`.
"""
self.calc_metrics()
try:
from prettytable import PrettyTable
table = PrettyTable(["Error", "Value"])
table.align["Error"] = "l"
table.align["Value"] = "l"
for error in sorted(self.dict_errors.keys()):
table.add_row([error, np.around(self.dict_errors[error], decimals=8)])
print()
print(table.get_string(sortby="Error"))
print()
except ImportError:
print("For better table format install 'prettytable' module.")
print()
for error in sorted(self.dict_errors.keys()):
print(error, np.around(self.dict_errors[error], decimals=8))
print()
def print_values(self):
"""
Print expected and predicted values.
"""
print("Expected: ", self.expected_targets.reshape(1, -1), "\n",
"Predicted: ", self.predicted_targets.reshape(1, -1), "\n")
def get(self, error):
"""
Calculate and return value of an error.
Arguments:
error (str): Error to be calculated.
Returns:
float: value of desired error.
"""
self._calc(error, self.expected_targets, self.predicted_targets)
return self.dict_errors[error]
def get_std(self):
self._calc("std", self.expected_targets, self.predicted_targets)
return self.dict_errors["std"]
def get_mae(self):
self._calc("mae", self.expected_targets, self.predicted_targets)
return self.dict_errors["mae"]
def get_mse(self):
self._calc("mse", self.expected_targets, self.predicted_targets)
return self.dict_errors["mse"]
def get_rmse(self):
self._calc("rmse", self.expected_targets, self.predicted_targets)
return self.dict_errors["rmse"]
def get_mpe(self):
self._calc("mpe", self.expected_targets, self.predicted_targets)
return self.dict_errors["mpe"]
def get_mape(self):
self._calc("mape", self.expected_targets, self.predicted_targets)
return self.dict_errors["mape"]
def get_me(self):
self._calc("me", self.expected_targets, self.predicted_targets)
return self.dict_errors["me"]
def get_hr(self):
self._calc("hr", self.expected_targets, self.predicted_targets)
return self.dict_errors["hr"]
def get_hrm(self):
self._calc("hr-", self.expected_targets, self.predicted_targets)
return self.dict_errors["hr-"]
def get_hrp(self):
self._calc("hr+", self.expected_targets, self.predicted_targets)
return self.dict_errors["hr+"]
def get_accuracy(self):
self._calc("accuracy", self.expected_targets, self.predicted_targets)
return self.dict_errors["accuracy"]
def get_error(self):
return (self.expected_targets - self.predicted_targets).flatten()
def get_anderson(self):
"""
Anderson-Darling test for data coming from a particular
distribution.
Returns:
tuple: statistic value, critical values and significance values.
Note:
Need scipy.stats module to perform Anderson-Darling test.
"""
try:
from scipy import stats
except ImportError:
raise ImportError("Need 'scipy.stats' module to calculate "
"anderson-darling test.")
error = (self.expected_targets - self.predicted_targets).flatten()
# from matplotlib import pyplot as plt
# import matplotlib.mlab as mlab
#
# plt.figure(figsize=(24.0, 12.0))
# _, bins, _ = plt.hist(error, 50, normed=1)
# _mu = np.mean(error)
# _sigma = np.std(error)
# plt.plot(bins, mlab.normpdf(bins, _mu, _sigma))
# plt.show()
# plt.close()
# Calculate Anderson-Darling normality test index
ad_statistic, ad_c, ad_s = stats.anderson(error, "norm")
return ad_statistic, ad_c, ad_s
def get_shapiro(self):
"""
| |
("bn", "Bengali"),
("br", "Breton"),
("bs", "Bosnian"),
("ca", "Catalan"),
("cs", "Czech"),
("cy", "Welsh"),
("da", "Danish"),
("de", "German"),
("dsb", "Lower Sorbian"),
("el", "Greek"),
("en", "English"),
("en-au", "Australian English"),
("en-gb", "British English"),
("eo", "Esperanto"),
("es", "Spanish"),
("es-ar", "Argentinian Spanish"),
("es-co", "Colombian Spanish"),
("es-mx", "Mexican Spanish"),
("es-ni", "Nicaraguan Spanish"),
("es-ve", "Venezuelan Spanish"),
("et", "Estonian"),
("eu", "Basque"),
("fa", "Persian"),
("fi", "Finnish"),
("fr", "French"),
("fy", "Frisian"),
("ga", "Irish"),
("gd", "Scottish Gaelic"),
("gl", "Galician"),
("he", "Hebrew"),
("hi", "Hindi"),
("hr", "Croatian"),
("hsb", "Upper Sorbian"),
("hu", "Hungarian"),
("hy", "Armenian"),
("ia", "Interlingua"),
("id", "Indonesian"),
("io", "Ido"),
("is", "Icelandic"),
("it", "Italian"),
("ja", "Japanese"),
("ka", "Georgian"),
("kab", "Kabyle"),
("kk", "Kazakh"),
("km", "Khmer"),
("kn", "Kannada"),
("ko", "Korean"),
("lb", "Luxembourgish"),
("lt", "Lithuanian"),
("lv", "Latvian"),
("mk", "Macedonian"),
("ml", "Malayalam"),
("mn", "Mongolian"),
("mr", "Marathi"),
("my", "Burmese"),
("nb", "Norwegian Bokmål"),
("ne", "Nepali"),
("nl", "Dutch"),
("nn", "Norwegian Nynorsk"),
("os", "Ossetic"),
("pa", "Punjabi"),
("pl", "Polish"),
("pt", "Portuguese"),
("pt-br", "Brazilian Portuguese"),
("ro", "Romanian"),
("ru", "Russian"),
("sk", "Slovak"),
("sl", "Slovenian"),
("sq", "Albanian"),
("sr", "Serbian"),
("sr-latn", "Serbian Latin"),
("sv", "Swedish"),
("sw", "Swahili"),
("ta", "Tamil"),
("te", "Telugu"),
("th", "Thai"),
("tr", "Turkish"),
("tt", "Tatar"),
("udm", "Udmurt"),
("uk", "Ukrainian"),
("ur", "Urdu"),
("vi", "Vietnamese"),
("zh-hans", "Simplified Chinese"),
("zh-hant", "Traditional Chinese"),
],
max_length=255,
null=True,
verbose_name="Language",
),
),
(
"national_identification_number",
models.CharField(
blank=True,
max_length=255,
null=True,
verbose_name="National identification number",
),
),
(
"address_protection",
models.BooleanField(
default=False, verbose_name="Address protection"
),
),
(
"sap_customer_number",
models.CharField(
blank=True,
max_length=255,
null=True,
verbose_name="SAP customer number",
),
),
(
"electronic_billing_address",
models.CharField(
blank=True,
max_length=255,
null=True,
verbose_name="Electronic billing address",
),
),
(
"partner_code",
models.CharField(
blank=True,
max_length=255,
null=True,
verbose_name="Partner code",
),
),
(
"is_lessor",
models.BooleanField(default=False, verbose_name="Is a lessor"),
),
(
"sap_sales_office",
models.CharField(
blank=True,
max_length=255,
null=True,
verbose_name="SAP sales office",
),
),
("note", models.TextField(blank=True, null=True, verbose_name="Note")),
],
options={
"verbose_name": "Contact",
"verbose_name_plural": "Contacts",
"ordering": ["type", "name", "last_name", "first_name"],
},
),
migrations.CreateModel(
name="Contract",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("deleted", models.DateTimeField(editable=False, null=True)),
(
"created_at",
models.DateTimeField(
auto_now_add=True, verbose_name="Time created"
),
),
(
"modified_at",
models.DateTimeField(auto_now=True, verbose_name="Time modified"),
),
(
"contract_number",
models.CharField(
blank=True,
max_length=255,
null=True,
verbose_name="Contract number",
),
),
(
"signing_date",
models.DateField(
blank=True, null=True, verbose_name="Signing date"
),
),
(
"sign_by_date",
models.DateField(
blank=True, null=True, verbose_name="Sign by date"
),
),
(
"signing_note",
models.TextField(
blank=True, null=True, verbose_name="Signing note"
),
),
(
"first_call_sent",
models.DateField(
blank=True, null=True, verbose_name="First call sent"
),
),
(
"second_call_sent",
models.DateField(
blank=True, null=True, verbose_name="Second call sent"
),
),
(
"third_call_sent",
models.DateField(
blank=True, null=True, verbose_name="Third call sent"
),
),
(
"is_readjustment_decision",
models.BooleanField(
blank=True, null=True, verbose_name="Is readjustment decision"
),
),
(
"ktj_link",
models.CharField(
blank=True, max_length=1024, null=True, verbose_name="KTJ link"
),
),
(
"institution_identifier",
models.CharField(
blank=True,
max_length=255,
null=True,
verbose_name="Institution identifier",
),
),
],
options={"verbose_name": "Contract", "verbose_name_plural": "Contracts"},
),
migrations.CreateModel(
name="ContractChange",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"signing_date",
models.DateField(
blank=True, null=True, verbose_name="Signing date"
),
),
(
"sign_by_date",
models.DateField(
blank=True, null=True, verbose_name="Sign by date"
),
),
(
"first_call_sent",
models.DateField(
blank=True, null=True, verbose_name="First call sent"
),
),
(
"second_call_sent",
models.DateField(
blank=True, null=True, verbose_name="Second call sent"
),
),
(
"third_call_sent",
models.DateField(
blank=True, null=True, verbose_name="Third call sent"
),
),
(
"description",
models.TextField(blank=True, null=True, verbose_name="Description"),
),
],
options={
"verbose_name": "Contract change",
"verbose_name_plural": "Contract changes",
},
),
migrations.CreateModel(
name="ContractRent",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("deleted", models.DateTimeField(editable=False, null=True)),
(
"created_at",
models.DateTimeField(
auto_now_add=True, verbose_name="Time created"
),
),
(
"modified_at",
models.DateTimeField(auto_now=True, verbose_name="Time modified"),
),
(
"amount",
models.DecimalField(
decimal_places=2, max_digits=10, verbose_name="Amount"
),
),
(
"period",
enumfields.fields.EnumField(
enum=leasing.enums.PeriodType,
max_length=30,
verbose_name="Period",
),
),
(
"base_amount",
models.DecimalField(
blank=True,
decimal_places=2,
max_digits=10,
null=True,
verbose_name="Base amount",
),
),
(
"base_amount_period",
enumfields.fields.EnumField(
blank=True,
enum=leasing.enums.PeriodType,
max_length=30,
null=True,
verbose_name="Base amount period",
),
),
(
"base_year_rent",
models.DecimalField(
blank=True,
decimal_places=2,
max_digits=10,
null=True,
verbose_name="Base year rent",
),
),
(
"start_date",
models.DateField(blank=True, null=True, verbose_name="Start date"),
),
(
"end_date",
models.DateField(blank=True, null=True, verbose_name="End date"),
),
],
options={
"verbose_name": "Contract rent",
"verbose_name_plural": "Contract rents",
},
),
migrations.CreateModel(
name="ContractType",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=255, verbose_name="Name")),
],
options={
"verbose_name": "Contract type",
"verbose_name_plural": "Contract types",
"ordering": ["name"],
"abstract": False,
},
),
migrations.CreateModel(
name="Decision",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("deleted", models.DateTimeField(editable=False, null=True)),
(
"created_at",
models.DateTimeField(
auto_now_add=True, verbose_name="Time created"
),
),
(
"modified_at",
models.DateTimeField(auto_now=True, verbose_name="Time modified"),
),
(
"reference_number",
models.CharField(
blank=True,
max_length=255,
null=True,
verbose_name="Reference number",
),
),
(
"decision_date",
models.DateField(
blank=True, null=True, verbose_name="Decision date"
),
),
(
"section",
models.CharField(
blank=True, max_length=255, null=True, verbose_name="Section"
),
),
(
"description",
models.TextField(blank=True, null=True, verbose_name="Description"),
),
],
options={"verbose_name": "Decision", "verbose_name_plural": "Decisions"},
),
migrations.CreateModel(
name="DecisionMaker",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=255, verbose_name="Name")),
],
options={
"verbose_name": "Decision maker",
"verbose_name_plural": "Decision makers",
"ordering": ["name"],
"abstract": False,
},
),
migrations.CreateModel(
name="DecisionType",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=255, verbose_name="Name")),
(
"kind",
enumfields.fields.EnumField(
blank=True,
enum=leasing.enums.DecisionTypeKind,
max_length=30,
null=True,
verbose_name="Decision type kind",
),
),
],
options={
"verbose_name": "Decision type",
"verbose_name_plural": "Decision types",
"ordering": ["name"],
"abstract": False,
},
),
migrations.CreateModel(
name="District",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=255, verbose_name="Name")),
(
"identifier",
models.CharField(max_length=255, verbose_name="Identifier"),
),
],
options={
"verbose_name": "District",
"verbose_name_plural": "Districts",
"ordering": ("municipality__name", "name"),
},
),
migrations.CreateModel(
name="EmailLog",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("deleted", models.DateTimeField(editable=False, null=True)),
(
"created_at",
models.DateTimeField(
auto_now_add=True, verbose_name="Time created"
),
),
(
"modified_at",
models.DateTimeField(auto_now=True, verbose_name="Time modified"),
),
(
"type",
enumfields.fields.EnumField(
enum=leasing.enums.EmailLogType,
max_length=30,
verbose_name="Email log type",
),
),
("text", models.TextField(blank=True, null=True, verbose_name="Text")),
(
"sent_at",
models.DateTimeField(
blank=True, null=True, verbose_name="Time created"
),
),
("object_id", models.PositiveIntegerField()),
],
options={"verbose_name": "Email log", "verbose_name_plural": "Email logs"},
),
migrations.CreateModel(
name="EqualizedRent",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"start_date",
models.DateField(blank=True, null=True, verbose_name="Start date"),
),
(
"end_date",
models.DateField(blank=True, null=True, verbose_name="End date"),
),
(
"payable_amount",
models.DecimalField(
decimal_places=2, max_digits=10, verbose_name="Payable amount"
),
),
(
"equalized_payable_amount",
models.DecimalField(
decimal_places=2,
max_digits=10,
verbose_name="Equalized payable amount",
),
),
(
"equalization_factor",
models.DecimalField(
decimal_places=6,
max_digits=8,
verbose_name="Equalization factor",
),
),
],
options={
"verbose_name": "Equalized rent",
"verbose_name_plural": "Equalized rents",
},
),
migrations.CreateModel(
name="Financing",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=255, verbose_name="Name")),
],
options={
"verbose_name": "Form of financing",
"verbose_name_plural": "Forms of financing",
"ordering": ["name"],
"abstract": False,
},
),
migrations.CreateModel(
name="FixedInitialYearRent",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("deleted", models.DateTimeField(editable=False, null=True)),
(
"created_at",
models.DateTimeField(
auto_now_add=True, verbose_name="Time created"
),
),
(
"modified_at",
models.DateTimeField(auto_now=True, verbose_name="Time modified"),
),
(
"amount",
models.DecimalField(
decimal_places=2, max_digits=10, verbose_name="Amount"
),
),
(
"start_date",
models.DateField(blank=True, null=True, verbose_name="Start date"),
),
(
"end_date",
models.DateField(blank=True, null=True, verbose_name="End date"),
),
],
options={
"verbose_name": "Fixed initial year rent",
"verbose_name_plural": "Fixed initial year rents",
},
),
migrations.CreateModel(
name="Hitas",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=255, verbose_name="Name")),
],
options={
"verbose_name": "Hitas",
"verbose_name_plural": "Hitas",
"ordering": ["name"],
"abstract": False,
},
),
migrations.CreateModel(
name="Index",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("number", models.PositiveIntegerField(verbose_name="Number")),
("year", models.PositiveSmallIntegerField(verbose_name="Year")),
(
"month",
models.PositiveSmallIntegerField(
blank=True,
null=True,
validators=[
django.core.validators.MinValueValidator(1),
django.core.validators.MaxValueValidator(12),
],
verbose_name="Month",
),
),
],
options={
"verbose_name": "Index",
"verbose_name_plural": "Indexes",
"ordering": ("-year", "-month"),
},
),
migrations.CreateModel(
name="InfillDevelopmentCompensation",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("deleted", models.DateTimeField(editable=False, null=True)),
(
"created_at",
models.DateTimeField(
auto_now_add=True, verbose_name="Time created"
),
),
(
"modified_at",
models.DateTimeField(auto_now=True, verbose_name="Time modified"),
),
(
"name",
models.CharField(
blank=True, max_length=255, null=True, verbose_name="Name"
),
),
(
"reference_number",
models.CharField(
blank=True,
max_length=255,
null=True,
verbose_name="Reference number",
),
),
(
"detailed_plan_identifier",
models.CharField(
blank=True,
max_length=255,
null=True,
verbose_name="Detailed plan identifier",
),
),
(
"state",
enumfields.fields.EnumField(
blank=True,
enum=leasing.enums.InfillDevelopmentCompensationState,
max_length=30,
null=True,
verbose_name="State",
),
),
(
"lease_contract_change_date",
models.DateField(
blank=True, null=True, verbose_name="Lease contract change date"
),
),
("note", models.TextField(blank=True, null=True, verbose_name="Note")),
(
"geometry",
django.contrib.gis.db.models.fields.MultiPolygonField(
blank=True, null=True, srid=4326, verbose_name="Geometry"
),
),
],
options={
"verbose_name": "Infill development compensation",
"verbose_name_plural": "Infill development compensations",
},
),
migrations.CreateModel(
name="Inspection",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"inspector",
models.CharField(
blank=True, max_length=255, null=True, verbose_name="Inspector"
),
),
(
"supervision_date",
models.DateField(
blank=True, null=True, verbose_name="Supervision date"
),
),
(
"supervised_date",
models.DateField(
blank=True, null=True, verbose_name="Supervised date"
),
),
(
"description",
models.TextField(blank=True, null=True, verbose_name="Description"),
),
],
options={
"verbose_name": "Inspection",
"verbose_name_plural": "Inspections",
},
),
migrations.CreateModel(
name="IntendedUse",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=255, verbose_name="Name")),
],
options={
"verbose_name": "Intended use",
"verbose_name_plural": "Intended uses",
"ordering": ["name"],
"abstract": False,
},
),
migrations.CreateModel(
name="InterestRate",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("start_date", models.DateField(verbose_name="Start date")),
("end_date", | |
<reponame>Roboy/LSM_SpiNNaker_MyoArm
from pacman import exceptions
from pacman.model.resources.resource_container import ResourceContainer
from pacman.model.resources.dtcm_resource import DTCMResource
from pacman.model.resources.sdram_resource import SDRAMResource
from pacman.utilities import utility_calls
from pacman.model.resources.cpu_cycles_per_tick_resource \
import CPUCyclesPerTickResource
from spinn_machine.utilities.ordered_set import OrderedSet
class ResourceTracker(object):
""" Tracks the usage of resources of a machine
"""
def __init__(self, machine, chips=None):
"""
:param machine: The machine to track the usage of
:type machine: :py:class:`spinn_machine.machine.Machine`
:param chips: If specified, this list of chips will be used\
instead of the list from the machine. Note that the order\
will be maintained, so this can be used either to reduce\
the set of chips used, or to re-order the chips. Note\
also that on deallocation, the order is no longer\
guaranteed.
:type chips: iterable of (x, y) tuples of coordinates of chips
"""
# The amount of SDRAM used by each chip,
# indexed by the (x, y) tuple of coordinates of the chip
# Note that entries are only added when the SDRAM is first used
self._sdram_tracker = dict()
# The set of processor ids available on each chip,
# indexed by the (x, y) tuple of coordinates of the chip
# Note that entries are only added when a core is first used
self._core_tracker = dict()
# The machine object
self._machine = machine
# Set of tags available indexed by board address
# Note that entries are only added when a board is first used
self._tags_by_board = dict()
# Set of boards with available ip tags
self._boards_with_ip_tags = OrderedSet()
# Set of (board_address, tag) assigned to an ip tag indexed by
# (ip address, port, strip_sdp) - Note not reverse ip tags
self._ip_tags_address_and_port = dict()
# The (ip address, port) assigned to an ip tag indexed by
# (board address, tag)
self._address_and_port_ip_tag = dict()
# The (board address, port) combinations already assigned to a
# reverse ip tag - Note not ip tags
self._reverse_ip_tag_listen_port = set()
# The port assigned to a reverse ip tag, indexed by
# (board address, tag) - Note not ip tags
self._listen_port_reverse_ip_tag = dict()
# A count of how many allocations are sharing the same ip tag -
# Note not reverse ip tags
self._n_ip_tag_allocations = dict()
# Board address indexed by (x, y) tuple of coordinates of the chip
self._ethernet_area_codes = dict()
# (x, y) tuple of coordinates of Ethernet connected chip indexed by
# board address
self._ethernet_chips = dict()
# Set of (x, y) tuples of coordinates of chips which have available
# processors
self._chips_available = OrderedSet(chips)
if chips is None:
for chip in machine.chips:
key = (chip.x, chip.y)
self._chips_available.add(key)
# Initialise the Ethernet area codes
for (chip_x, chip_y) in self._chips_available:
chip = self._machine.get_chip_at(chip_x, chip_y)
key = (chip_x, chip_y)
# add area codes for Ethernets
if (chip.nearest_ethernet_x is not None and
chip.nearest_ethernet_y is not None):
ethernet_connected_chip = machine.get_chip_at(
chip.nearest_ethernet_x, chip.nearest_ethernet_y)
if ethernet_connected_chip is not None:
ethernet_area_code = ethernet_connected_chip.ip_address
if ethernet_area_code not in self._ethernet_area_codes:
self._ethernet_area_codes[
ethernet_area_code] = OrderedSet()
self._boards_with_ip_tags.add(ethernet_area_code)
self._ethernet_chips[ethernet_area_code] = (
chip.nearest_ethernet_x, chip.nearest_ethernet_y)
self._ethernet_area_codes[ethernet_area_code].add(key)
def _get_usable_ip_tag_chips(self):
""" Get the coordinates of any chips that have available ip tags
:return: Generator of tuples of (x, y) coordinates of chips
:rtype: generator of (int, int)
"""
for board_address in self._boards_with_ip_tags:
for key in self._ethernet_area_codes[board_address]:
if (key not in self._core_tracker or
len(self._core_tracker[key]) > 0):
yield key
def _get_usable_chips(self, chips, board_address,
ip_tags, reverse_ip_tags):
""" Get all chips that are available on a board given the constraints
:param chips: iterable of tuples of (x, y) coordinates of chips to \
look though for usable chips, or None to use all available\
chips
:type chips: iterable of (int, int)
:param board_address: the board address to check for usable chips on
:type board_address: str or None
:param ip_tags: list of ip tag constraints
:type ip_tags: list of\
:py:class:`pacman.model.constraints.tag_allocator_constraints.tag_allocator_require_iptag_constraint.TagAllocatorRequireIptagConstraint`
:param reverse_ip_tags: list of reverse ip tag constraints
:type reverse_ip_tags: list of\
:py:class:`pacman.model.constraints.tag_allocator_constraints.tag_allocator_require_reverse_iptag_constraint.TagAllocatorRequireReverseIptagConstraint`
:return: iterable of tuples of (x, y) coordinates of usable chips
:rtype: iterable of tuple of (x, y)
:raise PacmanInvalidParameterException:
* If the board address is unknown
* When either or both chip coordinates of any chip are none
* When a non-existent chip is specified
* When all the chips in the specified board have been used
"""
if chips is not None:
chips_to_use = list()
area_code = None
if board_address is not None:
if board_address not in self._ethernet_area_codes:
raise exceptions.PacmanInvalidParameterException(
"board_address", str(board_address),
"Unrecognised board address")
area_code = self._ethernet_area_codes[board_address]
for (chip_x, chip_y) in chips:
if ((chip_x is None and chip_y is not None) or
(chip_x is not None and chip_y is None)):
raise exceptions.PacmanInvalidParameterException(
"chip_x and chip_y", "{} and {}".format(
chip_x, chip_y),
"Either both or neither must be None")
elif self._machine.get_chip_at(chip_x, chip_y) is None:
raise exceptions.PacmanInvalidParameterException(
"chip_x and chip_y", "{} and {}".format(
chip_x, chip_y),
"No such chip was found in the machine")
elif ((chip_x, chip_y) in self._chips_available and
(area_code is None or (chip_x, chip_y) in area_code)):
chips_to_use.append((chip_x, chip_y))
if len(chips_to_use) == 0:
raise exceptions.PacmanInvalidParameterException(
"chips and board_address",
"{} and {}".format(chips, board_address),
"No valid chips found on the specified board")
return chips_to_use
elif board_address is not None:
return self._ethernet_area_codes[board_address]
elif ((ip_tags is not None and len(ip_tags) > 0) or
(reverse_ip_tags is not None and len(reverse_ip_tags) > 0)):
return self._get_usable_ip_tag_chips()
return self._chips_available
def max_available_cores_on_chips_that_satisfy(
self, placement_constraint, ip_tag_constraints,
reverse_ip_tag_constraints):
""" Get the max number of cores on a chip that satisfy these\
constraints
:param placement_constraint: placement constraint
:param ip_tag_constraints: ip_tag constraint
:param reverse_ip_tag_constraints: reverse ip_tag constraints
:return: the max number of cores
:rtype: int
"""
if placement_constraint is None:
chips = self._get_usable_chips(
ip_tags=ip_tag_constraints, chips=None, board_address=None,
reverse_ip_tags=reverse_ip_tag_constraints)
max_cores = 0
for chip in chips:
if chip not in self._core_tracker:
cores = len(list(self._machine.get_chip_at(
chip[0], chip[1]).processors))
else:
cores = len(self._core_tracker[chip])
if cores > max_cores:
max_cores = cores
return max_cores
else:
cores = self._core_tracker[(placement_constraint.x,
placement_constraint.y)]
return len(cores)
def _is_sdram_available(self, chip, key, resources):
""" Check if the SDRAM available on a given chip is enough for the\
given resources.
:param chip: The chip to check the resources of
:type chip: :py:class:`spinn_machine.chip.Chip`
:param key: The (x, y) coordinates of the chip
:type key: tuple of (int, int)
:param resources: the resources containing the SDRAM required
:type resources:\
:py:class:`pacman.model.resources.resource_container.ResourceContainer`
:return: True if there is enough SDRAM available, or False otherwise
:rtype: bool
"""
if key in self._sdram_tracker:
return ((chip.sdram.size - self._sdram_tracker[key]) >=
resources.sdram.get_value())
else:
return chip.sdram >= resources.sdram.get_value()
def _sdram_available(self, chip, key):
""" Return the amount of SDRAM available on a chip
:param chip: The chip to check the resources of
:type chip: :py:class:`spinn_machine.chip.Chip`
:param key: The (x, y) coordinates of the chip
:type key: tuple of (int, int)
:return: the SDRAM available
:rtype: int
"""
if key not in self._sdram_tracker:
return chip.sdram.size
return chip.sdram.size - self._sdram_tracker[key]
def sdram_avilable_on_chip(self, chip_x, chip_y):
""" Get the available SDRAM on the chip at coordinates chip_x, chip_y
:param chip_x: x coord of the chip in question
:param chip_y: y coord of the chip in question
:return: the SDRAM remaining
"""
key = (chip_x, chip_y)
chip = self._machine.get_chip_at(chip_x, chip_y)
return self._sdram_available(chip, key)
def _best_core_available(self, chip, key, processor_id):
""" Locate the best core available on a chip
:param chip: The chip to check the resources of
:type chip: :py:class:`spinn_machine.chip.Chip`
:param key: The (x, y) coordinates of the chip
:type key: tuple of (int, int)
:param processor_id: A fixed processor id
:type processor_id: int
:return: The processor id selected as the best on this chip
"""
if processor_id is not None:
return processor_id
# TODO: Check for the best core; currently assumes all are the same
if key not in self._core_tracker:
for processor in chip.processors:
if not processor.is_monitor:
return processor.processor_id
return iter(self._core_tracker[key]).next()
def _is_core_available(self, chip, key, processor_id, resources):
""" Check if there is a core available on a given chip given the\
constraints
:param chip: The chip to check the resources of
:type chip: :py:class:`spinn_machine.chip.Chip`
:param key: The (x, y) coordinates of the chip
:type key: tuple of (int, int)
:param processor_id: A constraining fixed processor id
:type processor_id: | |
limit(optional): maximum number of entries to return
# component(optional): only report promotions for this component
commit_hash = request.args.get('commit_hash', None)
distro_hash = request.args.get('distro_hash', None)
extended_hash = request.args.get('extended_hash', None)
agg_hash = request.args.get('aggregate_hash', None)
promote_name = request.args.get('promote_name', None)
offset = int(request.args.get('offset', 0))
limit = int(request.args.get('limit', 100))
component = request.args.get('component', None)
if request.headers.get('Content-Type') == 'application/json':
# This is the old, deprecated method of in-body parameters
# We will keep it for backwards compatibility
if commit_hash is None:
commit_hash = request.json.get('commit_hash', None)
if distro_hash is None:
distro_hash = request.json.get('distro_hash', None)
if extended_hash is None:
extended_hash = request.json.get('extended_hash', None)
if agg_hash is None:
agg_hash = request.json.get('aggregate_hash', None)
if promote_name is None:
promote_name = request.json.get('promote_name', None)
if offset == 0:
offset = int(request.json.get('offset', 0))
if limit == 100:
limit = int(request.json.get('limit', 100))
if component is None:
component = request.json.get('component', None)
config_options = _get_config_options(app.config['CONFIG_FILE'])
# Make sure we do not exceed
if limit > max_limit:
limit = max_limit
if ((commit_hash and not distro_hash) or
(distro_hash and not commit_hash)):
raise InvalidUsage('Both commit_hash and distro_hash must be '
'specified if any of them is.',
status_code=400)
# Find the commit id for commit_hash/distro_hash
session = _get_db()
if commit_hash and distro_hash:
commit = _get_commit(session, commit_hash, distro_hash, extended_hash)
if commit is None:
raise InvalidUsage('commit_hash+distro_hash+extended_hash '
'combination not found', status_code=400)
commit_id = commit.id
else:
commit_id = None
# Now find the promotions, and filter if necessary
promotions = session.query(Promotion)
if commit_id is not None:
promotions = promotions.filter(Promotion.commit_id == commit_id)
if promote_name is not None:
promotions = promotions.filter(
Promotion.promotion_name == promote_name)
if agg_hash is not None:
promotions = promotions.filter(Promotion.aggregate_hash == agg_hash)
if component is not None:
promotions = promotions.filter(Promotion.component == component)
promotions = promotions.order_by(desc(Promotion.id)).limit(limit).\
offset(offset)
# And format the output
data = []
for promotion in promotions:
commit = getCommits(session, limit=0).filter(
Commit.id == promotion.commit_id).first()
repo_hash = _repo_hash(commit)
repo_url = "%s/%s" % (config_options.baseurl,
commit.getshardedcommitdir())
d = {'timestamp': promotion.timestamp,
'commit_hash': commit.commit_hash,
'distro_hash': commit.distro_hash,
'extended_hash': commit.extended_hash,
'aggregate_hash': promotion.aggregate_hash,
'repo_hash': repo_hash,
'repo_url': repo_url,
'promote_name': promotion.promotion_name,
'component': promotion.component,
'user': promotion.user}
data.append(d)
return jsonify(data)
@app.route('/api/metrics/builds', methods=['GET'])
def get_metrics():
# start_date: start date for period, in YYYY-mm-dd format (UTC)
# end_date: end date for period, in YYYY-mm-dd format (UTC)
# package_name (optional): return metrics for package_name
start_date = request.args.get('start_date', None)
end_date = request.args.get('end_date', None)
package_name = request.args.get('package_name', None)
if request.headers.get('Content-Type') == 'application/json':
# This is the old, deprecated method of in-body parameters
# We will keep it for backwards compatibility
if start_date is None:
start_date = request.json.get('start_date', None)
if end_date is None:
end_date = request.json.get('end_date', None)
if package_name is None:
package_name = request.json.get('package_name', None)
if start_date is None or end_date is None:
raise InvalidUsage('Missing parameters', status_code=400)
# Convert dates to timestamp
fmt = '%Y-%m-%d'
try:
start_timestamp = int(calendar.timegm(time.strptime(start_date, fmt)))
end_timestamp = int(calendar.timegm(time.strptime(end_date, fmt)))
except ValueError:
raise InvalidUsage('Invalid date format, it must be YYYY-mm-dd',
status_code=400)
# Find the commits count for each metric
session = _get_db()
commits = session.query(Commit).filter(
Commit.status == 'SUCCESS',
Commit.dt_build >= start_timestamp,
Commit.dt_build < end_timestamp)
if package_name:
commits = commits.filter(
Commit.project_name == package_name)
successful_commits = commits.count()
commits = session.query(Commit).filter(
Commit.status == 'FAILED',
Commit.dt_build >= start_timestamp,
Commit.dt_build <= end_timestamp)
if package_name:
commits = commits.filter(
Commit.project_name == package_name)
failed_commits = commits.count()
total_commits = successful_commits + failed_commits
result = {'succeeded': successful_commits,
'failed': failed_commits,
'total': total_commits}
return jsonify(result), 200
@app.route('/api/last_tested_repo', methods=['POST'])
@auth.login_required
@_json_media_type
def last_tested_repo_POST():
# max_age: Maximum age in hours, used as base for the search
# success(optional): find repos with a successful/unsuccessful vote
# job_id(optional); name of the CI that sent the vote
# reporting_job_id: name of the CI that will test this repo
# sequential_mode(optional): if set to true, change the search algorithm
# to only use previous_job_id as CI name to
# search for. Defaults to false
# previous_job_id(optional): CI name to search for, if sequential_mode is
# True
# component(optional): only get votes for this component
max_age = request.json.get('max_age', None)
my_job_id = request.json.get('reporting_job_id', None)
job_id = request.json.get('job_id', None)
success = request.json.get('success', None)
sequential_mode = request.json.get('sequential_mode', None)
previous_job_id = request.json.get('previous_job_id', None)
component = request.json.get('component', None)
if success is not None:
success = bool(strtobool(success))
if sequential_mode is not None:
sequential_mode = bool(strtobool(sequential_mode))
if sequential_mode and previous_job_id is None:
raise InvalidUsage('Missing parameter previous_job_id',
status_code=400)
if (max_age is None or my_job_id is None):
raise InvalidUsage('Missing parameters', status_code=400)
# Calculate timestamp as now - max_age
if int(max_age) == 0:
timestamp = 0
else:
oldest_time = datetime.now() - timedelta(hours=int(max_age))
timestamp = time.mktime(oldest_time.timetuple())
session = _get_db()
try:
if sequential_mode:
# CI pipeline case
vote = getVote(session, timestamp, success, previous_job_id,
component=component, fallback=False)
else:
# Normal case
vote = getVote(session, timestamp, success, job_id,
component=component)
except Exception as e:
raise e
newvote = CIVote(commit_id=vote.commit_id, ci_name=my_job_id,
ci_url='', ci_vote=False, ci_in_progress=True,
timestamp=int(time.time()), notes='',
user=auth.username(), component=vote.component)
session.add(newvote)
session.commit()
commit = session.query(Commit).filter(
Commit.status == 'SUCCESS',
Commit.id == vote.commit_id).first()
result = {'commit_hash': commit.commit_hash,
'distro_hash': commit.distro_hash,
'extended_hash': commit.extended_hash,
'timestamp': newvote.timestamp,
'job_id': newvote.ci_name,
'success': newvote.ci_vote,
'in_progress': newvote.ci_in_progress,
'user': newvote.user,
'component': newvote.component}
return jsonify(result), 201
@app.route('/api/report_result', methods=['POST'])
@auth.login_required
@_json_media_type
def report_result():
# job_id: name of CI
# commit_hash: commit hash
# distro_hash: distro hash
# extended_hash(optional): extended hash
# aggregate_hash: hash of aggregate.
# url: URL where more information can be found
# timestamp: CI execution timestamp
# success: boolean
# notes(optional): notes
# Either commit_hash+distro_hash or aggregate_hash must be provided
try:
timestamp = request.json['timestamp']
job_id = request.json['job_id']
success = request.json['success']
url = request.json['url']
except KeyError:
raise InvalidUsage('Missing parameters', status_code=400)
commit_hash = request.json.get('commit_hash', None)
distro_hash = request.json.get('distro_hash', None)
extended_hash = request.json.get('extended_hash', None)
aggregate_hash = request.json.get('aggregate_hash', None)
if not commit_hash and not distro_hash and not aggregate_hash:
raise InvalidUsage('Missing parameters', status_code=400)
if commit_hash and not distro_hash:
raise InvalidUsage('If commit_hash is provided, distro_hash '
'must be provided too', status_code=400)
if distro_hash and not commit_hash:
raise InvalidUsage('If distro_hash is provided, commit_hash '
'must be provided too', status_code=400)
if (aggregate_hash and distro_hash) or (aggregate_hash and commit_hash):
raise InvalidUsage('aggregate_hash and commit/distro_hash cannot be '
'combined', status_code=400)
notes = request.json.get('notes', '')
session = _get_db()
# We have two paths here: one for votes on commit/distro/extended hash,
# another for votes on aggregate_hash
component = None
if commit_hash:
commit = _get_commit(session, commit_hash, distro_hash, extended_hash)
if commit is None:
raise InvalidUsage('commit_hash+distro_hash+extended_hash '
'combination not found', status_code=400)
commit_id = commit.id
out_ext_hash = commit.extended_hash
component = commit.component
vote = CIVote(commit_id=commit_id, ci_name=job_id, ci_url=url,
ci_vote=bool(strtobool(success)), ci_in_progress=False,
timestamp=int(timestamp), notes=notes,
user=auth.username(), component=component)
else:
out_ext_hash = None
prom = session.query(Promotion).filter(
Promotion.aggregate_hash == aggregate_hash).first()
if prom is None:
raise InvalidUsage('aggregate_hash not found',
status_code=400)
vote = CIVote_Aggregate(ref_hash=aggregate_hash, ci_name=job_id,
ci_url=url, ci_vote=bool(strtobool(success)),
ci_in_progress=False, timestamp=int(timestamp),
notes=notes, user=auth.username())
session.add(vote)
session.commit()
result = {'commit_hash': commit_hash,
'distro_hash': distro_hash,
'extended_hash': out_ext_hash,
'aggregate_hash': aggregate_hash,
'timestamp': timestamp,
'job_id': job_id,
'success': bool(strtobool(success)),
'in_progress': False,
'url': url,
'notes': notes,
'user': auth.username(),
'component': component}
return jsonify(result), 201
@app.route('/api/promote', methods=['POST'])
@auth.login_required
@_json_media_type
def promote():
# commit_hash: commit hash
# distro_hash: distro hash
# extended_hash (optional): extended hash
# promote_name: symlink name
try:
commit_hash = request.json['commit_hash']
distro_hash = request.json['distro_hash']
promote_name = request.json['promote_name']
except KeyError:
raise InvalidUsage('Missing parameters', status_code=400)
extended_hash = request.json.get('extended_hash', None)
# Check for invalid promote names
if (promote_name == 'consistent' or promote_name == 'current'):
raise InvalidUsage('Invalid promote_name %s' % promote_name,
status_code=403)
config_options = _get_config_options(app.config['CONFIG_FILE'])
session = _get_db()
commit = _get_commit(session, commit_hash, distro_hash, extended_hash)
if commit is None:
raise InvalidUsage('commit_hash+distro_hash+extended_hash combination'
' not found', status_code=400)
# If the commit has been purged, do not move on
if commit.flags & FLAG_PURGED:
raise InvalidUsage('commit_hash+distro_hash+extended_hash has been '
'purged, cannot promote it', status_code=410)
if config_options.use_components:
base_directory = os.path.join(app.config['REPO_PATH'], "component/%s" %
commit.component)
else:
base_directory = app.config['REPO_PATH']
target_link = os.path.join(base_directory, promote_name)
# Check for invalid target links, like ../promotename
target_dir = os.path.dirname(os.path.abspath(target_link))
if not os.path.samefile(target_dir, base_directory):
raise InvalidUsage('Invalid promote_name %s' % promote_name,
status_code=403)
# We should create a relative symlink
yumrepodir = commit.getshardedcommitdir()
if config_options.use_components:
# In this case, the relative path should not include
# the component part
yumrepodir = yumrepodir.replace("component/%s/" % commit.component, '')
# Remove symlink if it exists, so we can create it again
if os.path.lexists(os.path.abspath(target_link)):
os.remove(target_link)
try:
os.symlink(yumrepodir, target_link)
except Exception as e:
raise InvalidUsage("Symlink creation failed with error: %s" %
e, status_code=500)
# Once the updated symlink is created, if we are using | |
<gh_stars>1-10
#! /usr/bin/env python
# -*- coding: iso-8859-1 -*-
from downloadCommon import DownloadCommon, getSeqName
from DdlCommonInterface import DdlCommonInterface
import re
class OracleDownloader(DownloadCommon):
def __init__(self):
self.strDbms = 'oracle'
def connect(self, info):
try:
import cx_Oracle
except:
print "Missing Oracle support through cx_Oracle, see http://www.computronix.com/utilities.shtml#Oracle"
return
self.version = info['version']
self.conn = cx_Oracle.connect(info['user'], info['pass'], info['dbname'])
self.cursor = self.conn.cursor()
def _tableInfo(self, strTable):
self.cursor.execute("select * from %s" % (strTable,))
for col in self.cursor.description:
print col[0]
def useConnection(self, con, version):
self.conn = con
self.version = version
self.cursor = self.conn.cursor()
def getTables(self, tableList):
""" Returns the list of tables as a array of strings """
if tableList and len(tableList) > 0:
# Note we are always case insensitive here
inTables = "AND upper(TABLE_NAME) IN ('%s')" % ("' , '".join([name.upper() for name in tableList]))
else:
inTables = ""
strQuery = """SELECT TABLE_NAME FROM ALL_TABLES WHERE
TABLE_NAME NOT IN ('DUAL')
AND OWNER NOT IN ('SYS', 'SYSTEM', 'OLAPSYS', 'WKSYS', 'WMSYS', 'CTXSYS', 'DMSYS', 'MDSYS', 'EXFSYS', 'ORDSYS')
AND TABLE_NAME NOT LIKE 'BIN$%%'
%s
ORDER BY TABLE_NAME
""" % (inTables)
self.cursor.execute(strQuery)
rows = self.cursor.fetchall()
if rows:
return self._confirmReturns([x[0] for x in rows], tableList)
return []
def getTableColumns(self, strTable):
""" Returns column in this format
(nColIndex, strColumnName, strColType, CHARACTER_MAXIMUM_LENGTH, NUMERIC_PRECISION, bNotNull, strDefault, auto_increment)
"""
strSql = """
SELECT COLUMN_ID, COLUMN_NAME, DATA_TYPE, DATA_LENGTH, DATA_PRECISION, DATA_SCALE, NULLABLE, DATA_DEFAULT
FROM ALL_TAB_COLUMNS
WHERE TABLE_NAME = :tablename
ORDER BY COLUMN_ID"""
# self._tableInfo('ALL_TAB_COLUMNS')
self.cursor.execute(strSql, { 'tablename' : strTable})
rows = self.cursor.fetchall()
ret = []
fixNames = {
'character varying' : 'varchar',
}
for row in rows:
attnum, name, type, size, numsize, numprec, nullable, default = row
if type in fixNames:
type = fixNames[type]
if nullable == "Y":
bNotNull = False
else:
bNotNull = True
# TODO
bAutoIncrement = False
if numsize != None or numprec != None:
size = numsize
if type == 'DATE' or type == 'TIMESTAMP':
size = None
elif type == 'FLOAT' and size == 126:
size = None
if default:
default = default.rstrip()
#~ if name.upper() == name:
#~ name = name.lower()
ret.append((name, type, size, numprec, bNotNull, default, bAutoIncrement))
return ret
def getTableComment(self, strTableName):
""" Returns the comment as a string """
# TODO
strSql = "SELECT COMMENTS from ALL_TAB_COMMENTS WHERE TABLE_NAME = :TABLENAME"
self.cursor.execute(strSql, { 'TABLENAME' : strTableName })
rows = self.cursor.fetchall()
if rows:
return rows[0][0]
return None
def getColumnComment(self, strTableName, strColumnName):
""" Returns the comment as a string """
strSql = "SELECT COMMENTS from ALL_COL_COMMENTS WHERE TABLE_NAME = :TABLENAME AND COLUMN_NAME = :COLUMNAME"
self.cursor.execute(strSql, { 'TABLENAME' : strTableName, 'COLUMNAME' : strColumnName })
rows = self.cursor.fetchall()
if rows:
return rows[0][0]
return []
return None
def getTableIndexes(self, strTableName):
""" Returns
(strIndexName, [strColumns,], bIsUnique, bIsPrimary, bIsClustered)
or []
"""
#self._tableInfo("ALL_INDEXES")
strSql = """SELECT index_name, uniqueness, clustering_factor
FROM ALL_INDEXES
WHERE table_name = :tablename
"""
self.cursor.execute(strSql, { 'tablename' : strTableName} )
rows = self.cursor.fetchall()
ret = []
if not rows:
return ret
#self._tableInfo("ALL_IND_COLUMNS")
for row in rows:
(strIndexName, bIsUnique, bIsClustered) = row
strSql = """SELECT column_name FROM ALL_IND_COLUMNS
WHERE table_name = :tablename AND index_name = :indexname
ORDER BY COLUMN_POSITION """
self.cursor.execute(strSql, { 'tablename' : strTableName, 'indexname' : strIndexName } )
colrows = self.cursor.fetchall()
colList = [col[0] for col in colrows]
bIsPrimary = False
if bIsUnique == 'UNIQUE':
strSql = """select c.*
from all_constraints c, all_cons_columns cc
where c.table_name = :tablename
and cc.constraint_name = c.constraint_name
and c.constraint_type = 'P'
and cc.column_name in (:colnames)
and c.status = 'ENABLED'"""
self.cursor.execute(strSql, { 'tablename' : strTableName, 'colnames' : ','.join(colList) })
indexRows = self.cursor.fetchall()
if indexRows and len(indexRows) > 0:
bIsPrimary = True
ret.append((strIndexName, colList, bIsUnique, bIsPrimary, bIsClustered))
return ret
def _getTableViaConstraintName(self, strConstraint):
""" Returns strTablename """
strSql = """SELECT TABLE_NAME FROM ALL_CONSTRAINTS WHERE CONSTRAINT_NAME = :strConstraint"""
self.cursor.execute(strSql, { 'strConstraint' : strConstraint } )
rows = self.cursor.fetchall()
if rows:
return rows[0][0]
return None
def _getColumnsViaConstraintName(self, strConstraint):
""" Returns strTablename """
strSql = """SELECT COLUMN_NAME FROM all_cons_columns WHERE CONSTRAINT_NAME = :strConstraint ORDER BY POSITION"""
self.cursor.execute(strSql, { 'strConstraint' : strConstraint } )
rows = self.cursor.fetchall()
if rows:
return [ col[0] for col in rows ]
return []
def getTableRelations(self, strTableName):
""" Returns
(strConstraintName, colName, fk_table, fk_columns, confupdtype, confdeltype)
or []
"""
# CONSTRAINT_TYPE == "P" primary key
# CONSTRAINT_TYPE == "R" 'Ref. Integrity'
# CONSTRAINT_TYPE == "U" 'Unique Constr.'
# CONSTRAINT_TYPE == "C" 'Check Constr.'
strSql = """SELECT CONSTRAINT_NAME, TABLE_NAME, R_CONSTRAINT_NAME, DELETE_RULE
FROM ALL_CONSTRAINTS
WHERE TABLE_NAME = :tablename
AND CONSTRAINT_TYPE = 'R'
AND STATUS='ENABLED'
"""
self.cursor.execute(strSql, { 'tablename' : strTableName })
rows = self.cursor.fetchall()
ret = []
if not rows:
return ret
for row in rows:
(strConstraintName, strTable, fk_constraint, chDelType) = row
# Todo get the fk table name
# and the col list
# and the fk col list
if fk_constraint:
fk_table = self._getTableViaConstraintName(fk_constraint)
else:
fk_table = None
colList = self._getColumnsViaConstraintName(strConstraintName)
if fk_constraint:
fkColList = self._getColumnsViaConstraintName(fk_constraint)
else:
fkColList = []
#print "del type %s" % (chDelType)
if chDelType == 'NO ACTION':
chDelType = 'a'
elif chDelType == 'CASCADE':
chDelType = 'c'
elif chDelType == 'SET NULL':
chDelType = 'n'
elif chDelType == 'DEFAULT': # Check TODO
chDelType = 'd'
chUpdateType = ''
ret.append((strConstraintName, colList, fk_table, fkColList, chUpdateType, chDelType))
return ret
def getViews(self, viewList):
""" Returns the list of views as a array of strings """
if viewList and len(viewList) > 0:
inViews = "AND VIEW_NAME IN ('%s')" % ("','".join([name.upper() for name in viewList]))
else:
inViews = ""
strQuery = """SELECT VIEW_NAME
FROM ALL_VIEWS
WHERE OWNER NOT IN ('SYS', 'SYSTEM', 'OLAPSYS', 'WKSYS', 'WMSYS', 'CTXSYS', 'DMSYS', 'MDSYS', 'EXFSYS', 'ORDSYS', 'WK_TEST', 'XDB')
%s
ORDER BY VIEW_NAME""" % (inViews)
self.cursor.execute(strQuery)
rows = self.cursor.fetchall()
if rows:
return self._confirmReturns([x[0] for x in rows], viewList)
return []
def getViewDefinition(self, strViewName):
strQuery = "SELECT TEXT FROM ALL_VIEWS WHERE VIEW_NAME = :viewName"
self.cursor.execute(strQuery, { 'viewName' : strViewName })
rows = self.cursor.fetchall()
if rows:
return rows[0][0].rstrip()
return None
def getFunctions(self, functionList):
""" Returns functions """
if functionList and len(functionList) > 0:
inFunctions = "AND OBJECT_NAME IN ('%s')" % ("','".join([name.upper() for name in functionList]))
else:
inFunctions = ""
strQuery = """SELECT OBJECT_NAME
FROM ALL_OBJECTS
WHERE OBJECT_TYPE in ('PROCEDURE', 'FUNCTION')
AND OWNER NOT IN ('SYS', 'SYSTEM', 'OLAPSYS', 'WKSYS', 'WMSYS', 'CTXSYS', 'DMSYS', 'MDSYS', 'EXFSYS', 'ORDSYS', 'WK_TEST', 'XDB')
%s
ORDER BY OBJECT_NAME""" % (inFunctions)
self.cursor.execute(strQuery)
rows = self.cursor.fetchall()
if rows:
return self._confirmReturns([x[0] for x in rows], functionList)
return []
def getFunctionDefinition(self, strSpecifiName):
""" Returns (routineName, parameters, return, language, definition) """
strSpecifiName = strSpecifiName.upper()
strQuery = "select TEXT from all_source where name=:strSpecifiName ORDER BY LINE"
self.cursor.execute(strQuery, { 'strSpecifiName' : strSpecifiName })
rows = self.cursor.fetchall()
if not rows:
return (None, None, None, None)
lines = []
for row in rows:
lines.append(row[0])
strDefinition = ''.join(lines)
strDefinition = strDefinition.rstrip('; ') # Remove trailing ; on last line
re_def = re.compile(r".+\s(AS|IS)\s", re.IGNORECASE | re.MULTILINE | re.DOTALL)
strDefinition = re_def.sub('', strDefinition)
strQuery = """select lower(ARGUMENT_NAME), lower(DATA_TYPE), SEQUENCE, IN_OUT
FROM ALL_ARGUMENTS
WHERE object_name = :strSpecifiName AND ARGUMENT_NAME is not null
AND IN_OUT IN ('IN', 'IN/OUT')
ORDER BY POSITION"""
self.cursor.execute(strQuery, { 'strSpecifiName' : strSpecifiName })
rows = self.cursor.fetchall()
parameters = []
if rows:
for row in rows:
(ARGUMENT_NAME, DATA_TYPE, SEQUENCE, IN_OUT) = row
if ARGUMENT_NAME:
parameters.append(ARGUMENT_NAME + " " + DATA_TYPE)
else:
parameters.append(DATA_TYPE)
strQuery = """select lower(DATA_TYPE)
FROM ALL_ARGUMENTS
WHERE object_name = :strSpecifiName
AND IN_OUT = 'OUT'"""
self.cursor.execute(strQuery, { 'strSpecifiName' : strSpecifiName })
rows = self.cursor.fetchall()
strReturn = None
if rows:
if len(rows) > 1:
print "More than one return statement?, please check code"
else:
DATA_TYPE = rows[0][0]
strReturn = DATA_TYPE
return (strSpecifiName.lower(), parameters, strReturn, None, strDefinition)
class DdlOracle(DdlCommonInterface):
def __init__(self, strDbms):
DdlCommonInterface.__init__(self, strDbms)
self.params['max_id_len'] = { 'default' : 63 }
self.params['alter_default'] = ['ALTER TABLE %(table_name)s | |
APK's label according to a labeling scheme
targetLabel = -1
targetKey = targetAPK[targetAPK.rfind("/")+1:].replace("_data", "")
if os.path.exists("%s/%s.report" % (VT_REPORTS_DIR, targetKey)):
report = eval(open("%s/%s.report" % (VT_REPORTS_DIR, targetKey)).read())
prettyPrint("VirusTotal report \"%s.report\" found" % targetKey, "debug")
if "positives" in report.keys():
if labeling == "old":
if "additional_info" in report.keys():
if "positives_delta" in report["additional_info"].keys():
targetLabel = 1 if report["positives"] - report["additional_info"]["positives_delta"] >= 1 else 0
else:
continue
if labeling == "vt1-vt1":
targetLabel = 1 if report["positives"] >= 1 else 0
elif labeling == "vt50p-vt50p":
targetLabel = 1 if report["positives"]/float(report["total"]) >= 0.5 else 0
elif labeling == "vt50p-vt1":
if report["positives"]/float(report["total"]) >= 0.5:
targetLabel = 1
elif report["positives"] == 0:
targetLabel = 0
else:
targetLabel = random.randint(0, 1)
# Start the comparison
similarities = []
if matchingDepth >= 1:
if "name" in sourceInfo.keys() and "name" in targetInfo.keys():
similarities.append(stringRatio(sourceInfo["name"], targetInfo["name"]))
if "package" in sourceInfo.keys() and "package" in targetInfo.keys():
similarities.append(stringRatio(sourceInfo["package"], targetInfo["package"]))
if "icon" in sourceInfo.keys() and "icon" in targetInfo.keys():
if sourceInfo["icon"] != None and targetInfo["icon"] != None:
sourceIcon = "%s/tmp_%s/%s" % (sourceAPK[:sourceAPK.rfind("/")], sourceInfo["package"], sourceInfo["icon"])
targetIcon = "%s/%s" % (targetAPK, targetInfo["icon"][targetInfo["icon"].rfind('/')+1:])
if os.path.exists(sourceIcon) and os.path.exists(targetIcon):
similarities.append(simImages(sourceIcon, targetIcon))
if matchingDepth >= 2:
if "activities" in sourceInfo.keys() and "activities" in targetInfo.keys():
similarities.append(listsRatio(sourceInfo["activities"], targetInfo["activities"]))
if "permissions" in sourceInfo.keys() and "permissions" in targetInfo.keys():
similarities.append(listsRatio(sourceInfo["permissions"], targetInfo["permissions"]))
if "providers" in sourceInfo.keys() and "providers" in targetInfo.keys():
similarities.append(listsRatio(sourceInfo["providers"], targetInfo["providers"]))
if "receivers" in sourceInfo.keys() and "receivers" in targetInfo.keys():
similarities.append(listsRatio(sourceInfo["receivers"], targetInfo["receivers"]))
if "services" in sourceInfo.keys() and "services" in targetInfo.keys():
similarities.append(listsRatio(sourceInfo["services"], targetInfo["services"]))
if "files" in sourceInfo.keys() and "files" in targetInfo.keys():
similarities.append(listsRatio(sourceInfo["files"], targetInfo["files"]))
if matchingDepth >= 3:
if "libraries" in sourceInfo.keys() and "libraries" in targetInfo.keys():
similarities.append(listsRatio(sourceInfo["libraries"], targetInfo["libraries"]))
if "classes" in sourceInfo.keys() and "classes" in targetInfo.keys():
similarities.append(listsRatio(sourceInfo["classes"], targetInfo["classes"]))
if "methods" in sourceInfo.keys() and "methods" in targetInfo.keys():
similarities.append(listsRatio(sourceInfo["methods"], targetInfo["methods"]))
if matchingDepth >= 4:
if os.path.exists("%s/%s_data/call_graph.gpickle" % (infoDir, sourceKey)) and os.path.exists("%s/call_graph.gpickle" % targetAPK):
try:
prettyPrint("Loading source graph from \"%s/%s_data/call_graph.gpickle\"" % (infoDir, sourceKey), "debug")
sourceGraph = nx.read_gpickle("%s/%s_data/call_graph.gpickle" % (infoDir, sourceKey))
prettyPrint("Loading target graph from \"%s/call_graph.gpickle\"" % targetAPK, "debug")
targetGraph = nx.read_gpickle("%s/call_graph.gpickle" % targetAPK)
except exceptions.EOFError as e:
prettyPrint("Could not read call source or target graphs. Skipping", "warning")
continue
if fastSearch:
isomorphic = nx.algorithms.could_be_isomorphic(sourceGraph, targetGraph)
else:
isomorphic = nx.algorithms.is_isomorphic(sourceGraph, targetGraph)
if isomorphic:
similarities.append(1.0)
else:
similarities.append(0.0)
else:
# Use SimiDroid to perform comparison
curDir = os.path.abspath(".")
os.chdir(SIMIDROID_DIR)
cmd = "java -jar SimiDroid.jar %s %s" % (sourceAPK, targetAPK)
outFile = "%s-%s.json" % (sourceAPK[sourceAPK.rfind('/')+1:].replace(".apk", ""), targetAPK[targetAPK.rfind("/")+1:].replace(".apk", ""))
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
p.communicate()
if not os.path.exists(outFile):
prettyPrint("Could not find SimiDroid output file. Skipping", "warning")
continue
outContent = json.loads(open(outFile).read())
os.chdir(curDir)
if len(similarities) >= 1:
similarity = float(sum(similarities))/float(len(similarities)) if useSimiDroid == False else float(outContent["conclusion"]["simiScore"])
else:
similarity = 0.0
prettyPrint("Similarity score: %s" % similarity)
# Delete targetInfo to free memory?
prettyPrint("Releasing object and invoking Garbage Collector", "debug")
targetGraph = None
gc.collect()
if similarity >= matchingThreshold:
prettyPrint("Got a match between source \"%s\" and app \"%s\", with score %s" % (sourceAPK[sourceAPK.rfind("/")+1:].replace(".apk", ""), targetAPK[targetAPK.rfind("/")+1:].replace(".apk", ""), similarity), "output")
if useSimiDroid == False:
matchings[targetInfo["package"]] = (similarity, targetLabel)
else:
matchings[targetAPK] = (similarity, targetLabel)
currentTime = time.time()
if (fastSearch and len(matchings) >= matchWith) or (currentTime - startTime >= matchingTimeout):
# Return what we've got so far
if len(matchings) >= matchWith:
return sortDictByValue(matchings, True)
except Exception as e:
prettyPrintError(e)
return []
return sortDictByValue(matchings, True)
def matchAppsDynamic(sourceAPK, dataSource="droidmon", fastSearch=True, includeArguments=True, matchingThreshold=0.67, matchWith=10, labeling="vt1-vt1"):
"""
Matches apps according to similarities between their traces or runtime behaviors
:param sourceAPK: The path to the source APK (the original app you wish to match)
:type sourceAPK: str
:param infoDir: The path to the directory containing target traces (against which you wish to match)
:type infoDir: str
:param dataSource: The source of runtime behavior to compare (options: "droidmon", "virustotal")
:type dataSource: str
:param fastSearch: Whether to return matchings one maximum number of matches [matchWith] is reached
:type fastSearch: boolean
:param includeArguments: Whether to include method arguments in droidmon traces
:type includeArguments: boolean
:param matchingThreshold: A similarity percentage above which apps are considered similar
:type matchingThreshold: float
:param matchWith: The number of matchings to return (default: 1)
:type matchWith: int
:param fastSearch: Whether to return matchings one maximum number of matches [matchWith] is reached
:type fastSearch: boolean
:param labeling: The labeling scheme adopted to label APK's as malicious and benign
:type labeling: str
:return: A list of tuples (str, (float, float)) depicting the matched app, the similarity measure and the matched app's label
"""
try:
# Get the log/behavior of the source APK
sourceKey = sourceAPK[sourceAPK.rfind("/")+1:].replace(".apk", "")
if dataSource == "droidmon":
sourceLogs = glob.glob("%s/%s*.filtered" % (LOGS_DIR, sourceKey))
if len(sourceLogs) < 1:
prettyPrint("Could not find \"Droidmon\" logs for app \"%s\"" % sourceKey, "warning")
return []
sourceLog = sourceLogs[random.randint(0, len(sourceLogs)-1)]
for log in sourceLogs:
if os.path.getsize(log) > os.path.getsize(sourceLog):
sourceLog = log
sourceBehavior = parseDroidmonLog(sourceLog, includeArguments=includeArguments)
else:
if not os.path.exists("%s/%s.report" % (VT_REPORTS_DIR, sourceKey)):
prettyPrint("Could not find a \"VirusTotal\" report for \"%s\"" % sourceKey, "warning")
return []
report = eval(open("%s/%s.report" % (VT_REPORTS_DIR, sourceKey)).read())
if not "additional_info" in report.keys():
prettyPrint("Could not find the key \"additional_info\" in the report", "warning")
return []
if not "android-behaviour" in report["additional_info"].keys():
prettyPrint("Could not find the key \"android-behaviour\" in the report", "warning")
return []
sourceBehavior = report["additional_info"]["android-behaviour"]
# Get the target apps
if dataSource == "droidmon":
targetApps = glob.glob("%s/*.filtered" % LOGS_DIR)
elif dataSource == "virustotal":
targetApps = glob.glob("%s/*.report" % VT_REPORTS_DIR)
if len(targetApps) < 1:
prettyPrint("Could not find \"Droidmon\" logs or \"VirusTotal\" reports to match apps", "warning")
return []
matchings = []
similarity = 0.0
for target in targetApps:
# Load targetBehavior
if dataSource == "droidmon":
targetBehavior = parseDroidmonLog(target, includeArguments=includeArguments)
else:
report = eval(open(target).read())
try:
targetBehavior = report["additional_info"]["android-behaviour"]
except Exception as e:
#prettyPrint("Could not load \"VirusTotal\" runtime behavior for \"%s\". Skipping." % target, "warning")
continue
# Retrieve the APK's label according to a labeling scheme
targetLabel = -1
tmp = target[target.rfind("/")+1:].replace(".filtered", "")
targetKey = tmp[:tmp.find("_")] if dataSource == "droidmon" else tmp.replace(".report", "")
if os.path.exists("%s/%s.report" % (VT_REPORTS_DIR, targetKey)):
report = eval(open("%s/%s.report" % (VT_REPORTS_DIR, targetKey)).read())
#prettyPrint("VirusTotal report \"%s.report\" found" % targetKey, "debug")
if "positives" in report.keys():
if labeling == "old":
if "additional_info" in report.keys():
if "positives_delta" in report["additional_info"].keys():
targetLabel = 1 if report["positives"] - report["additional_info"]["positives_delta"] >= 1 else 0
else:
continue
if labeling == "vt1-vt1":
targetLabel = 1 if report["positives"] >= 1 else 0
elif labeling == "vt50p-vt50p":
targetLabel = 1 if report["positives"]/float(report["total"]) >= 0.5 else 0
elif labeling == "vt50p-vt1":
if report["positives"]/float(report["total"]) >= 0.5:
targetLabel = 1
elif report["positives"] == 0:
targetLabel = 0
else:
targetLabel = -1
if targetLabel == -1:
prettyPrint("Could not label \"%s\" under the \"%s\" scheme" % (targetKey, labeling), "warning")
continue
# Start the comparison
if dataSource == "droidmon":
# Compare trace
similarity = tracesRatio(sourceBehavior, targetBehavior)
else:
# Compare different lists in the "android-behaviour"
similarity = compareVirusTotalBehavior(sourceBehavior, targetBehavior)
#prettyPrint("Similarity score: %s" % similarity, "debug")
if similarity >= matchingThreshold:
prettyPrint("Got a match between source \"%s\" and app \"%s\", with score %s" % (sourceKey, targetKey, similarity), "output")
matchings.append((targetKey, (similarity, targetLabel)))
if (fastSearch and len(matchings) >= matchWith):
# Return what we've got so far
if len(matchings) >= matchWith:
return matchings[:matchWith]
else:
return matchings
except Exception as e:
prettyPrintError(e)
return []
return matchings
def matchTrace(sourceApp, alignTraces=False, compareTraces=False, includeArguments=False, matchingThreshold=0.50, maxChunkSize=0, labeling="vt1-vt1"):
"""
Matches a droidmon trace to other droidmon traces in Maat's repository organized as clusters according to their lengths
:param sourceApp: The path to the APK whose trace we wish to match
:type sourceApp: str
:param alignTraces: Whether to measure trace similarity according to alignment
:type alignTraces: boolean
:param compareTraces: Whether to compare traces or settle for labels of traces in a cluster
:type compareTraces: boolean
:param includeArguments: Whether to include method arguments in droidmon traces
:type includeArguments: boolean
:param matchingThreshold: A similarity percentage above which apps are considered similar
:type matchingThreshold: float
:maxChunkSize: The maximum size of chunks to shorten in traces (default: 0)
:type maxChunkSize: int
:param labeling: The labeling scheme adopted to label APK's as malicious and benign
:type labeling: str
:return: A list of tuples (str, | |
"""Provides algorithms with access to most of garage's features."""
import copy
import os
import time
import cloudpickle
from dowel import logger, tabular
# This is avoiding a circular import
from garage.experiment.deterministic import get_seed, set_seed
from garage.experiment.experiment import dump_json
from garage.experiment.snapshotter import Snapshotter
tf = None
class ExperimentStats:
# pylint: disable=too-few-public-methods
"""Statistics of a experiment.
Args:
total_epoch (int): Total epoches.
total_itr (int): Total Iterations.
total_env_steps (int): Total environment steps collected.
last_episode (list[dict]): Last sampled episodes.
"""
def __init__(self, total_epoch, total_itr, total_env_steps, last_episode):
self.total_epoch = total_epoch
self.total_itr = total_itr
self.total_env_steps = total_env_steps
self.last_episode = last_episode
class TrainArgs:
# pylint: disable=too-few-public-methods
"""Arguments to call train() or resume().
Args:
n_epochs (int): Number of epochs.
batch_size (int): Number of environment steps in one batch.
plot (bool): Visualize an episode of the policy after after each epoch.
store_episodes (bool): Save episodes in snapshot.
pause_for_plot (bool): Pause for plot.
start_epoch (int): The starting epoch. Used for resume().
"""
def __init__(self, n_epochs, batch_size, plot, store_episodes,
pause_for_plot, start_epoch):
self.n_epochs = n_epochs
self.batch_size = batch_size
self.plot = plot
self.store_episodes = store_episodes
self.pause_for_plot = pause_for_plot
self.start_epoch = start_epoch
class Trainer:
"""Base class of trainer.
Use trainer.setup(algo, env) to setup algorithm and environment for trainer
and trainer.train() to start training.
Args:
snapshot_config (garage.experiment.SnapshotConfig): The snapshot
configuration used by Trainer to create the snapshotter.
If None, it will create one with default settings.
Note:
For the use of any TensorFlow environments, policies and algorithms,
please use TFTrainer().
Examples:
| # to train
| trainer = Trainer()
| env = Env(...)
| policy = Policy(...)
| algo = Algo(
| env=env,
| policy=policy,
| ...)
| trainer.setup(algo, env)
| trainer.train(n_epochs=100, batch_size=4000)
| # to resume immediately.
| trainer = Trainer()
| trainer.restore(resume_from_dir)
| trainer.resume()
| # to resume with modified training arguments.
| trainer = Trainer()
| trainer.restore(resume_from_dir)
| trainer.resume(n_epochs=20)
"""
def __init__(self, snapshot_config):
self._snapshotter = Snapshotter(snapshot_config.snapshot_dir,
snapshot_config.snapshot_mode,
snapshot_config.snapshot_gap)
self._has_setup = False
self._plot = False
self._seed = None
self._train_args = None
self._stats = ExperimentStats(total_itr=0,
total_env_steps=0,
total_epoch=0,
last_episode=None)
self._algo = None
self._env = None
self._sampler = None
self._plotter = None
self._start_time = None
self._itr_start_time = None
self.step_itr = None
self.step_episode = None
# only used for off-policy algorithms
self.enable_logging = True
self._n_workers = None
self._worker_class = None
self._worker_args = None
def setup(self, algo, env):
"""Set up trainer for algorithm and environment.
This method saves algo and env within trainer and creates a sampler.
Note:
After setup() is called all variables in session should have been
initialized. setup() respects existing values in session so
policy weights can be loaded before setup().
Args:
algo (RLAlgorithm): An algorithm instance. If this algo want to use
samplers, it should have a `_sampler` field.
env (Environment): An environment instance.
"""
self._algo = algo
self._env = env
self._seed = get_seed()
if hasattr(self._algo, '_sampler'):
# pylint: disable=protected-access
self._sampler = self._algo._sampler
self._has_setup = True
def _start_worker(self):
"""Start Plotter and Sampler workers."""
if self._plot:
# pylint: disable=import-outside-toplevel
from garage.plotter import Plotter
self._plotter = Plotter()
self._plotter.init_plot(self.get_env_copy(), self._algo.policy)
def _shutdown_worker(self):
"""Shutdown Plotter and Sampler workers."""
if self._sampler is not None:
self._sampler.shutdown_worker()
if self._plot:
self._plotter.close()
def obtain_episodes(self,
itr,
batch_size=None,
agent_update=None,
env_update=None):
"""Obtain one batch of episodes.
Args:
itr (int): Index of iteration (epoch).
batch_size (int): Number of steps in batch. This is a hint that the
sampler may or may not respect.
agent_update (object): Value which will be passed into the
`agent_update_fn` before doing sampling episodes. If a list is
passed in, it must have length exactly `factory.n_workers`, and
will be spread across the workers.
env_update (object): Value which will be passed into the
`env_update_fn` before sampling episodes. If a list is passed
in, it must have length exactly `factory.n_workers`, and will
be spread across the workers.
Raises:
ValueError: If the trainer was initialized without a sampler, or
batch_size wasn't provided here or to train.
Returns:
EpisodeBatch: Batch of episodes.
"""
if self._sampler is None:
raise ValueError('trainer was not initialized with `sampler`. '
'the algo should have a `_sampler` field when'
'`setup()` is called')
if batch_size is None and self._train_args.batch_size is None:
raise ValueError(
'trainer was not initialized with `batch_size`. '
'Either provide `batch_size` to trainer.train, '
' or pass `batch_size` to trainer.obtain_samples.')
episodes = None
if agent_update is None:
policy = getattr(self._algo, 'exploration_policy', None)
if policy is None:
# This field should exist, since self.make_sampler would have
# failed otherwise.
policy = self._algo.policy
agent_update = policy.get_param_values()
episodes = self._sampler.obtain_samples(
itr, (batch_size or self._train_args.batch_size),
agent_update=agent_update,
env_update=env_update)
self._stats.total_env_steps += sum(episodes.lengths)
return episodes
def obtain_samples(self,
itr,
batch_size=None,
agent_update=None,
env_update=None):
"""Obtain one batch of samples.
Args:
itr (int): Index of iteration (epoch).
batch_size (int): Number of steps in batch.
This is a hint that the sampler may or may not respect.
agent_update (object): Value which will be passed into the
`agent_update_fn` before sampling episodes. If a list is passed
in, it must have length exactly `factory.n_workers`, and will
be spread across the workers.
env_update (object): Value which will be passed into the
`env_update_fn` before sampling episodes. If a list is passed
in, it must have length exactly `factory.n_workers`, and will
be spread across the workers.
Raises:
ValueError: Raised if the trainer was initialized without a
sampler, or batch_size wasn't provided here
or to train.
Returns:
list[dict]: One batch of samples.
"""
eps = self.obtain_episodes(itr, batch_size, agent_update, env_update)
return eps.to_list()
def save(self, epoch):
"""Save snapshot of current batch.
Args:
epoch (int): Epoch.
Raises:
NotSetupError: if save() is called before the trainer is set up.
"""
if not self._has_setup:
raise NotSetupError('Use setup() to setup trainer before saving.')
logger.log('Saving snapshot...')
params = dict()
# Save arguments
params['seed'] = self._seed
params['train_args'] = self._train_args
params['stats'] = self._stats
# Save states
params['env'] = self._env
params['algo'] = self._algo
params['n_workers'] = self._n_workers
params['worker_class'] = self._worker_class
params['worker_args'] = self._worker_args
self._snapshotter.save_itr_params(epoch, params)
logger.log('Saved')
def restore(self, from_dir, from_epoch='last'):
"""Restore experiment from snapshot.
Args:
from_dir (str): Directory of the pickle file
to resume experiment from.
from_epoch (str or int): The epoch to restore from.
Can be 'first', 'last' or a number.
Not applicable when snapshot_mode='last'.
Returns:
TrainArgs: Arguments for train().
"""
saved = self._snapshotter.load(from_dir, from_epoch)
self._seed = saved['seed']
self._train_args = saved['train_args']
self._stats = saved['stats']
set_seed(self._seed)
self.setup(env=saved['env'], algo=saved['algo'])
n_epochs = self._train_args.n_epochs
last_epoch = self._stats.total_epoch
last_itr = self._stats.total_itr
total_env_steps = self._stats.total_env_steps
batch_size = self._train_args.batch_size
store_episodes = self._train_args.store_episodes
pause_for_plot = self._train_args.pause_for_plot
fmt = '{:<20} {:<15}'
logger.log('Restore from snapshot saved in %s' %
self._snapshotter.snapshot_dir)
logger.log(fmt.format('-- Train Args --', '-- Value --'))
logger.log(fmt.format('n_epochs', n_epochs))
logger.log(fmt.format('last_epoch', last_epoch))
logger.log(fmt.format('batch_size', batch_size))
logger.log(fmt.format('store_episodes', store_episodes))
logger.log(fmt.format('pause_for_plot', pause_for_plot))
logger.log(fmt.format('-- Stats --', '-- Value --'))
logger.log(fmt.format('last_itr', last_itr))
logger.log(fmt.format('total_env_steps', total_env_steps))
self._train_args.start_epoch = last_epoch + 1
return copy.copy(self._train_args)
def log_diagnostics(self, pause_for_plot=False):
"""Log diagnostics.
Args:
pause_for_plot (bool): Pause for plot.
"""
logger.log('Time %.2f s' % (time.time() - self._start_time))
logger.log('EpochTime %.2f s' % (time.time() - self._itr_start_time))
tabular.record('TotalEnvSteps', self._stats.total_env_steps)
logger.log(tabular)
if self._plot:
self._plotter.update_plot(self._algo.policy,
self._algo.max_episode_length)
if pause_for_plot:
input('Plotting evaluation run: Press Enter to " "continue...')
def train(self,
n_epochs,
batch_size=None,
plot=False,
store_episodes=False,
pause_for_plot=False):
"""Start training.
Args:
n_epochs (int): Number of epochs.
batch_size (int or None): Number of environment steps in one batch.
plot (bool): Visualize an episode from the policy after each epoch.
store_episodes (bool): Save episodes in snapshot.
pause_for_plot (bool): Pause for plot.
Raises:
NotSetupError: If train() is called before setup().
Returns:
float: The average return in last epoch cycle.
"""
if not self._has_setup:
raise NotSetupError(
'Use setup() to setup trainer before training.')
# Save arguments for restore
self._train_args = TrainArgs(n_epochs=n_epochs,
batch_size=batch_size,
plot=plot,
store_episodes=store_episodes,
pause_for_plot=pause_for_plot,
start_epoch=0)
self._plot = plot
self._start_worker()
log_dir = self._snapshotter.snapshot_dir
summary_file = os.path.join(log_dir, 'experiment.json')
dump_json(summary_file, self)
average_return = self._algo.train(self)
self._shutdown_worker()
return average_return
def step_epochs(self):
"""Step through each epoch.
This function returns a magic generator. When iterated through, this
generator automatically performs services such as snapshotting and log
management. It is used inside train() in each algorithm.
The generator initializes two variables: `self.step_itr` and
`self.step_episode`. To use the generator, these two have to be
updated manually in each epoch, as the example shows below.
Yields:
int: The next training epoch.
Examples:
for epoch in trainer.step_epochs():
trainer.step_episode = trainer.obtain_samples(...)
self.train_once(...)
trainer.step_itr += | |
<reponame>def670/lfd
import math
import numpy
import random
import operator
import types
import numpy as np
import h5py
import IPython as ipy
import os
import time
import argparse
from lfd.mmqe import colorize
def redprint(msg):
print colorize.colorize(msg, "red", bold=True)
def yellowprint(msg):
print colorize.colorize(msg, "yellow", bold=True)
class ArgumentParser(argparse.ArgumentParser):
def parse_args(self, *args, **kw):
res = argparse.ArgumentParser.parse_args(self, *args, **kw)
from argparse import _HelpAction, _SubParsersAction
for x in self._subparsers._actions:
if not isinstance(x, _SubParsersAction):
continue
v = x.choices[res.subparser_name] # select the subparser name
subparseargs = {}
for x1 in v._optionals._actions: # loop over the actions
if isinstance(x1, _HelpAction): # skip help
continue
n = x1.dest
if hasattr(res, n): # pop the argument
subparseargs[n] = getattr(res, n)
delattr(res, n)
res.__setattr__(res.subparser_name, argparse.Namespace(**subparseargs))
return res
class Bunch(object):
def __init__(self, adict):
self.__dict__.update(adict)
# Define a context manager to suppress stdout
class suppress_stdout(object):
'''
A context manager for doing a "deep suppression" of stdout in
Python, i.e. will suppress all print, even if the print originates in a
compiled C/Fortran sub-function.
'''
def __init__(self):
# Open a null file
while (True):
try:
self.null_fds = os.open(os.devnull,os.O_RDWR)
break
except OSError:
time.sleep(1)
# Save the actual stdout file descriptor
self.save_fds = os.dup(1)
def __enter__(self):
# Assign the null pointers to stdout
os.dup2(self.null_fds,1)
os.close(self.null_fds)
def __exit__(self, *_):
# Re-assign the real stdout back
os.dup2(self.save_fds,1)
# Close the null file
os.close(self.save_fds)
class Transform(object):
"""
Rotation and translation represented as 4 x 4 matrix
"""
def __init__(self, matrix):
self.matrix = numpy.array(matrix)
self.matrix_inv = None
self.zRot = False
def inverse(self):
"""
Returns transformation matrix that is the inverse of this one
"""
if self.matrix_inv == None:
self.matrix_inv = numpy.linalg.inv(self.matrix)
return Transform(self.matrix_inv)
def __neg__(self):
return self.inverse()
def compose(self, trans):
"""
Returns composition of self and trans
"""
tr = Transform(numpy.dot(self.matrix, trans.matrix))
if self.zRot and trans.zRot:
return tr.pose()
else:
return tr
def __mul__(self, other):
return self.compose(other)
def pose(self, zthr = 0.01, fail = True):
"""
Convert to Pose
"""
if abs(1 - self.matrix[2][2]) < zthr:
theta = math.atan2(self.matrix[1][0], self.matrix[0][0])
return Pose(self.matrix[0][3], self.matrix[1][3], self.matrix[2][3], theta)
elif fail:
print self.matrix
raise Exception, "Not a valid 2.5D Pose"
else:
return None
def point(self):
return self.pose().point()
def applyToPoint(self, point):
"""
Transform a point into a new point.
"""
p = numpy.dot(self.matrix, point.matrix())
return Point(p[0], p[1], p[2], p[3])
def __call__(self, point):
return self.applyToPoint(point)
def __repr__(self):
return str(self.matrix)
def shortStr(self, trim = False):
return self.__repr__()
__str__ = __repr__
class Pose(Transform): # 2.5D transform
"""
Represent the x, y, z, theta pose of an object in 2.5D space
"""
def __init__(self, x, y, z, theta):
self.x = x
"""x coordinate"""
self.y = y
"""y coordinate"""
self.z = z
"""z coordinate"""
self.theta = fixAngle02Pi(theta)
"""rotation in radians"""
self.initTrans()
self.zRot = True
def initTrans(self):
cosTh = math.cos(self.theta)
sinTh = math.sin(self.theta)
self.reprString = None
Transform.__init__(self, [[cosTh, -sinTh, 0.0, self.x],
[sinTh, cosTh, 0.0, self.y],
[0.0, 0.0, 1.0, self.z],
[0, 0, 0, 1]])
def setX(self, x):
self.x = x
self.initTrans()
def setY(self, y):
self.y = y
self.initTrans()
def setZ(self, z):
self.z = z
self.initTrans()
def setTheta(self, theta):
self.theta = theta
self.initTrans()
def average(self, other, alpha):
"""
Weighted average of this pose and other
"""
return Pose(alpha * self.x + (1 - alpha) * other.x,
alpha * self.y + (1 - alpha) * other.y,
alpha * self.z + (1 - alpha) * other.z,
angleAverage(self.theta, other.theta, alpha))
def point(self):
"""
Return just the x, y, z parts represented as a C{Point}
"""
return Point(self.x, self.y, self.z)
def pose(self, fail = False):
return self
def near(self, pose, distEps, angleEps):
"""
Return True if pose is within distEps and angleEps of self
"""
return self.point().isNear(pose.point(), distEps) and \
nearAngle(self.theta, pose.pose().theta, angleEps)
def diff(self, pose):
"""
Return a pose that is the difference between self and pose (in
x, y, z, and theta)
"""
return Pose(self.x-pose.x,
self.y-pose.y,
self.z-pose.z,
fixAnglePlusMinusPi(self.theta-pose.theta))
def distance(self, pose):
"""
Return the distance between the x,y,z part of self and the x,y,z
part of pose.
"""
return self.point().distance(pose.point())
def totalDist(self, pose, angleScale = 1):
return self.distance(pose) + \
abs(fixAnglePlusMinusPi(self.theta-pose.theta)) * angleScale
def inverse(self):
"""
Return a transformation matrix that is the inverse of the
transform associated with this pose.
"""
return super(Pose, self).inverse().pose()
def xyztTuple(self):
"""
Representation of pose as a tuple of values
"""
return (self.x, self.y, self.z, self.theta)
def corrupt(self, e, eAng = None):
def corrupt(x, e):
return x + random.uniform(-e, e)
eAng = eAng or e
return Pose(corrupt(self.x, e), corrupt(self.y, e), corrupt(self.z, e),
fixAnglePlusMinusPi(corrupt(self.theta, eAng)))
def corruptGauss(self, mu, sigma, noZ = False):
def corrupt(x):
return x + random.gauss(mu, sigma)
return Pose(corrupt(self.x), corrupt(self.y),
self.z if noZ else corrupt(self.z),
fixAnglePlusMinusPi(corrupt(self.theta)))
def __repr__(self):
if not self.reprString:
# An attempt to make string equality useful
self.reprString = 'Pose[' + prettyString(self.x) + ', ' +\
prettyString(self.y) + ', ' +\
prettyString(self.z) + ', ' +\
(prettyString(self.theta) \
if self.theta <= 6.283 else prettyString(0.0))\
+ ']'
#self.reprString = 'Pose'+ prettyString(self.xyztTuple())
return self.reprString
def shortStr(self, trim = False):
return self.__repr__()
def __eq__(self, other):
return str(self) == str(other)
def __hash__(self):
return str(self).__hash__()
__str__ = __repr__
class Point:
"""
Represent a point with its x, y, z values
"""
def __init__(self, x, y, z, w=1.0):
self.x = x
"""x coordinate"""
self.y = y
"""y coordinate"""
self.z = z
"""z coordinate"""
self.w = w
"""w coordinate"""
def matrix(self):
# recompute each time to allow changing coords... reconsider this later
return numpy.array([self.x, self.y, self.z, self.w])
def isNear(self, point, distEps):
"""
Return true if the distance between self and point is less
than distEps
"""
return self.distance(point) < distEps
def distance(self, point):
"""
Euclidean distance between two points
"""
dx = self.x - point.x
dy = self.y - point.y
dz = self.z - point.z
return math.sqrt(dx*dx + dy*dy + dz*dz)
def distanceXY(self, point):
"""
Euclidean distance (squared) between two points
"""
return math.sqrt((self.x - point.x)**2 + (self.y - point.y)**2)
def distanceSq(self, point):
"""
Euclidean distance (squared) between two points
"""
dx = self.x - point.x
dy = self.y - point.y
dz = self.z - point.z
return dx*dx + dy*dy + dz*dz
def distanceSqXY(self, point):
"""
Euclidean distance (squared) between two points
"""
dx = self.x - point.x
dy = self.y - point.y
return dx*dx + dy*dy
def magnitude(self):
"""
Magnitude of this point, interpreted as a vector in 3-space
"""
return math.sqrt(self.x**2 + self.y**2 + self.z**2)
def xyzTuple(self):
"""
Return tuple of x, y, z values
"""
return (self.x, self.y, self.z)
def pose(self, angle = 0.0):
"""
Return a pose with the position of the point.
"""
return Pose(self.x, self.y, self.z, angle)
def point(self):
"""
Return a point, that is, self.
"""
return self
def __repr__(self):
if self.w == 1:
return 'Point'+ prettyString(self.xyzTuple())
if self.w == 0:
return 'Delta'+ prettyString(self.xyzTuple())
else:
return 'PointW'+ prettyString(self.xyzTuple()+(self.w,))
def shortStr(self, trim = False):
return self.__repr__()
def angleToXY(self, p):
"""
Return angle in radians of vector from self to p (in the xy projection)
"""
dx = p.x - self.x
dy = p.y - self.y
return math.atan2(dy, dx)
def add(self, point):
"""
Vector addition
"""
return Point(self.x + point.x, self.y + point.y, self.z + point.z)
def __add__(self, point):
return self.add(point)
def sub(self, point):
"""
Vector subtraction
"""
return Point(self.x - point.x, self.y - point.y, self.z - point.z)
def __sub__(self, point):
return self.sub(point)
def scale(self, s):
"""
Vector scaling
"""
return Point(self.x*s, self.y*s, self.z*s)
def __rmul__(self, s):
return self.scale(s)
def dot(self, p):
"""
Dot product
"""
return self.x*p.x + self.y*p.y + self.z*p.z
class LineXY:
"""
Line in 2D space
"""
def __init__(self, p1, p2):
"""
Initialize with two points that are on the line. Actually
store a normal and an offset from the origin
"""
self.theta = p1.angleToXY(p2)
"""normal angle"""
self.nx = -math.sin(self.theta)
"""x component of normal vector"""
self.ny = math.cos(self.theta)
"""y component of normal vector"""
self.off = p1.x * self.nx + p1.y * self.ny
"""offset along normal"""
def pointOnLine(self, p, eps):
"""
Return true if p is within eps of the line
"""
dist = abs(p.x*self.nx + p.y*self.ny - self.off)
return dist < eps
def __repr__(self):
return 'LineXY'+ prettyString((self.nx, self.ny, self.off))
def shortStr(self, trim = False):
return | |
= (
TautomerScore('benzoquinone', '[#6]1([#6]=[#6][#6]([#6]=[#6]1)=,:[N,S,O])=,:[N,S,O]', 25),
TautomerScore('oxim', '[#6]=[N][OH]', 4),
TautomerScore('C=O', '[#6]=,:[#8]', 2),
TautomerScore('N=O', '[#7]=,:[#8]', 2),
TautomerScore('P=O', '[#15]=,:[#8]', 2),
TautomerScore('C=hetero', '[#6]=[!#1;!#6]', 1),
TautomerScore('methyl', '[CX4H3]', 1),
TautomerScore('guanidine terminal=N', '[#7][#6](=[NR0])[#7H0]', 1),
TautomerScore('guanidine endocyclic=N', '[#7;R][#6;R]([N])=[#7;R]', 2),
TautomerScore('aci-nitro', '[#6]=[N+]([O-])[OH]', -4),
)
#: The default value for the maximum number of tautomers to enumerate, a limit to prevent combinatorial explosion.
MAX_TAUTOMERS = 1000
class TautomerCanonicalizer(object):
"""
"""
def __init__(self, transforms=TAUTOMER_TRANSFORMS, scores=TAUTOMER_SCORES, max_tautomers=MAX_TAUTOMERS):
"""
:param transforms: A list of TautomerTransforms to use to enumerate tautomers.
:param scores: A list of TautomerScores to use to choose the canonical tautomer.
:param max_tautomers: The maximum number of tautomers to enumerate, a limit to prevent combinatorial explosion.
"""
self.transforms = transforms
self.scores = scores
self.max_tautomers = max_tautomers
def __call__(self, mol):
"""Calling a TautomerCanonicalizer instance like a function is the same as calling its canonicalize(mol) method."""
return self.canonicalize(mol)
def canonicalize(self, mol):
"""Return a canonical tautomer by enumerating and scoring all possible tautomers.
:param mol: The input molecule.
:type mol: rdkit.Chem.rdchem.Mol
:return: The canonical tautomer.
:rtype: rdkit.Chem.rdchem.Mol
"""
# TODO: Overload the mol parameter to pass a list of pre-enumerated tautomers
tautomers = self._enumerate_tautomers(mol)
if len(tautomers) == 1:
return tautomers[0]
# Calculate score for each tautomer
highest = None
for t in tautomers:
smiles = Chem.MolToSmiles(t, isomericSmiles=True)
log.debug('Tautomer: %s', smiles)
score = 0
# Add aromatic ring scores
ssr = Chem.GetSymmSSSR(t)
for ring in ssr:
btypes = {t.GetBondBetweenAtoms(*pair).GetBondType() for pair in pairwise(ring)}
elements = {t.GetAtomWithIdx(idx).GetAtomicNum() for idx in ring}
if btypes == {BondType.AROMATIC}:
log.debug('Score +100 (aromatic ring)')
score += 100
if elements == {6}:
log.debug('Score +150 (carbocyclic aromatic ring)')
score += 150
# Add SMARTS scores
for tscore in self.scores:
for match in t.GetSubstructMatches(tscore.smarts):
log.debug('Score %+d (%s)', tscore.score, tscore.name)
score += tscore.score
# Add (P,S,Se,Te)-H scores
for atom in t.GetAtoms():
if atom.GetAtomicNum() in {15, 16, 34, 52}:
hs = atom.GetTotalNumHs()
if hs:
log.debug('Score %+d (%s-H bonds)', -hs, atom.GetSymbol())
score -= hs
# Set as highest if score higher or if score equal and smiles comes first alphabetically
if not highest or highest['score'] < score or (highest['score'] == score and smiles < highest['smiles']):
log.debug('New highest tautomer: %s (%s)', smiles, score)
highest = {'smiles': smiles, 'tautomer': t, 'score': score}
return highest['tautomer']
@memoized_property
def _enumerate_tautomers(self):
return TautomerEnumerator(self.transforms, self.max_tautomers)
class TautomerEnumerator(object):
"""
"""
def __init__(self, transforms=TAUTOMER_TRANSFORMS, max_tautomers=MAX_TAUTOMERS):
"""
:param transforms: A list of TautomerTransforms to use to enumerate tautomers.
:param max_tautomers: The maximum number of tautomers to enumerate (limit to prevent combinatorial explosion).
"""
self.transforms = transforms
self.max_tautomers = max_tautomers
def __call__(self, mol):
"""Calling a TautomerEnumerator instance like a function is the same as calling its enumerate(mol) method."""
return self.enumerate(mol)
def enumerate(self, mol):
"""Enumerate all possible tautomers and return them as a list.
:param mol: The input molecule.
:type mol: rdkit.Chem.rdchem.Mol
:return: A list of all possible tautomers of the molecule.
:rtype: list of rdkit.Chem.rdchem.Mol
"""
smiles = Chem.MolToSmiles(mol, isomericSmiles=True)
tautomers = {smiles: copy.deepcopy(mol)}
# Create a kekulized form of the molecule to match the SMARTS against
kekulized = copy.deepcopy(mol)
Chem.Kekulize(kekulized)
kekulized = {smiles: kekulized}
done = set()
while len(tautomers) < self.max_tautomers:
for tsmiles in sorted(tautomers):
if tsmiles in done:
continue
for transform in self.transforms:
for match in kekulized[tsmiles].GetSubstructMatches(transform.tautomer):
# log.debug('Matched rule: %s to %s for %s', transform.name, tsmiles, match)
# Create a copy of in the input molecule so we can modify it
# Use kekule form so bonds are explicitly single/double instead of aromatic
product = copy.deepcopy(kekulized[tsmiles])
# Remove a hydrogen from the first matched atom and add one to the last
first = product.GetAtomWithIdx(match[0])
last = product.GetAtomWithIdx(match[-1])
# log.debug('%s: H%s -> H%s' % (first.GetSymbol(), first.GetTotalNumHs(), first.GetTotalNumHs() - 1))
# log.debug('%s: H%s -> H%s' % (last.GetSymbol(), last.GetTotalNumHs(), last.GetTotalNumHs() + 1))
first.SetNumExplicitHs(max(0, first.GetTotalNumHs() - 1))
last.SetNumExplicitHs(last.GetTotalNumHs() + 1)
# Remove any implicit hydrogens from the first and last atoms now we have set the count explicitly
first.SetNoImplicit(True)
last.SetNoImplicit(True)
# Adjust bond orders
for bi, pair in enumerate(pairwise(match)):
if transform.bonds:
# Set the resulting bond types as manually specified in the transform
# log.debug('%s-%s: %s -> %s' % (product.GetAtomWithIdx(pair[0]).GetSymbol(), product.GetAtomWithIdx(pair[1]).GetSymbol(), product.GetBondBetweenAtoms(*pair).GetBondType(), transform.bonds[bi]))
product.GetBondBetweenAtoms(*pair).SetBondType(transform.bonds[bi])
else:
# If no manually specified bond types, just swap single and double bonds
current_bond_type = product.GetBondBetweenAtoms(*pair).GetBondType()
product.GetBondBetweenAtoms(*pair).SetBondType(BondType.DOUBLE if current_bond_type == BondType.SINGLE else BondType.SINGLE)
# log.debug('%s-%s: %s -> %s' % (product.GetAtomWithIdx(pair[0]).GetSymbol(), product.GetAtomWithIdx(pair[1]).GetSymbol(), current_bond_type, product.GetBondBetweenAtoms(*pair).GetBondType()))
# Adjust charges
if transform.charges:
for ci, idx in enumerate(match):
atom = product.GetAtomWithIdx(idx)
# log.debug('%s: C%s -> C%s' % (atom.GetSymbol(), atom.GetFormalCharge(), atom.GetFormalCharge() + transform.charges[ci]))
atom.SetFormalCharge(atom.GetFormalCharge() + transform.charges[ci])
try:
Chem.SanitizeMol(product)
smiles = Chem.MolToSmiles(product, isomericSmiles=True)
log.debug('Applied rule: %s to %s', transform.name, tsmiles)
if smiles not in tautomers:
log.debug('New tautomer produced: %s' % smiles)
kekulized_product = copy.deepcopy(product)
Chem.Kekulize(kekulized_product)
tautomers[smiles] = product
kekulized[smiles] = kekulized_product
else:
log.debug('Previous tautomer produced again: %s' % smiles)
except ValueError:
log.debug('ValueError Applying rule: %s', transform.name)
done.add(tsmiles)
if len(tautomers) == len(done):
break
else:
log.warning('Tautomer enumeration stopped at maximum %s', self.max_tautomers)
# Clean up stereochemistry
for tautomer in tautomers.values():
Chem.AssignStereochemistry(tautomer, force=True, cleanIt=True)
for bond in tautomer.GetBonds():
if bond.GetBondType() == BondType.DOUBLE and bond.GetStereo() > BondStereo.STEREOANY:
begin = bond.GetBeginAtomIdx()
end = bond.GetEndAtomIdx()
for othertautomer in tautomers.values():
if not othertautomer.GetBondBetweenAtoms(begin, end).GetBondType() == BondType.DOUBLE:
neighbours = tautomer.GetAtomWithIdx(begin).GetBonds() + tautomer.GetAtomWithIdx(end).GetBonds()
for otherbond in neighbours:
if otherbond.GetBondDir() in {BondDir.ENDUPRIGHT, BondDir.ENDDOWNRIGHT}:
otherbond.SetBondDir(BondDir.NONE)
Chem.AssignStereochemistry(tautomer, force=True, cleanIt=True)
log.debug('Removed stereochemistry from unfixed double bond')
break
return list(tautomers.values())
#==============================================================================
# from .charge import ACID_BASE_PAIRS, Reionizer, Uncharger
#==============================================================================
class AcidBasePair(object):
"""An acid and its conjugate base, defined by SMARTS.
A strength-ordered list of AcidBasePairs can be used to ensure the strongest acids in a molecule ionize first.
"""
def __init__(self, name, acid, base):
"""Initialize an AcidBasePair with the following parameters:
:param string name: A name for this AcidBasePair.
:param string acid: SMARTS pattern for the protonated acid.
:param string base: SMARTS pattern for the conjugate ionized base.
"""
log.debug('Initializing AcidBasePair: %s', name)
self.name = name
self.acid_str = acid
self.base_str = base
@memoized_property
def acid(self):
log.debug('Loading AcidBasePair acid: %s', self.name)
return Chem.MolFromSmarts(self.acid_str)
@memoized_property
def base(self):
log.debug('Loading AcidBasePair base: %s', self.name)
return Chem.MolFromSmarts(self.base_str)
def __repr__(self):
return 'AcidBasePair({!r}, {!r}, {!r})'.format(self.name, self.acid_str, self.base_str)
def __str__(self):
return self.name
#: The default list of AcidBasePairs, sorted from strongest to weakest. This list is derived from the Food and Drug
#: Administration Substance Registration System Standard Operating Procedure guide.
ACID_BASE_PAIRS = (
AcidBasePair('-OSO3H', 'OS(=O)(=O)[OH]', 'OS(=O)(=O)[O-]'),
AcidBasePair('–SO3H', '[!O]S(=O)(=O)[OH]', '[!O]S(=O)(=O)[O-]'),
AcidBasePair('-OSO2H', 'O[SD3](=O)[OH]', 'O[SD3](=O)[O-]'),
AcidBasePair('-SO2H', '[!O][SD3](=O)[OH]', '[!O][SD3](=O)[O-]'),
AcidBasePair('-OPO3H2', 'OP(=O)([OH])[OH]', 'OP(=O)([OH])[O-]'),
AcidBasePair('-PO3H2', '[!O]P(=O)([OH])[OH]', '[!O]P(=O)([OH])[O-]'),
AcidBasePair('-CO2H', 'C(=O)[OH]', 'C(=O)[O-]'),
AcidBasePair('thiophenol', 'c[SH]', 'c[S-]'),
AcidBasePair('(-OPO3H)-', 'OP(=O)([O-])[OH]', 'OP(=O)([O-])[O-]'),
AcidBasePair('(-PO3H)-', '[!O]P(=O)([O-])[OH]', '[!O]P(=O)([O-])[O-]'),
AcidBasePair('phthalimide', 'O=C2c1ccccc1C(=O)[NH]2', 'O=C2c1ccccc1C(=O)[N-]2'),
AcidBasePair('CO3H (peracetyl)', 'C(=O)O[OH]', 'C(=O)O[O-]'),
AcidBasePair('alpha-carbon-hydrogen-nitro group', 'O=N(O)[CH]', 'O=N(O)[C-]'),
AcidBasePair('-SO2NH2', 'S(=O)(=O)[NH2]', 'S(=O)(=O)[NH-]'),
AcidBasePair('-OBO2H2', 'OB([OH])[OH]', 'OB([OH])[O-]'),
AcidBasePair('-BO2H2', '[!O]B([OH])[OH]', '[!O]B([OH])[O-]'),
AcidBasePair('phenol', 'c[OH]', 'c[O-]'),
AcidBasePair('SH (aliphatic)', 'C[SH]', 'C[S-]'),
AcidBasePair('(-OBO2H)-', 'OB([O-])[OH]', 'OB([O-])[O-]'),
AcidBasePair('(-BO2H)-', '[!O]B([O-])[OH]', '[!O]B([O-])[O-]'),
AcidBasePair('cyclopentadiene', 'C1=CC=C[CH2]1', 'c1ccc[cH-]1'),
AcidBasePair('-CONH2', 'C(=O)[NH2]', 'C(=O)[NH-]'),
AcidBasePair('imidazole', 'c1cnc[nH]1', 'c1cnc[n-]1'),
AcidBasePair('-OH (aliphatic alcohol)', '[CX4][OH]', '[CX4][O-]'),
AcidBasePair('alpha-carbon-hydrogen-keto group', 'O=C([!O])[C!H0+0]', 'O=C([!O])[C-]'),
AcidBasePair('alpha-carbon-hydrogen-acetyl ester group', 'OC(=O)[C!H0+0]', 'OC(=O)[C-]'),
AcidBasePair('sp carbon hydrogen', 'C#[CH]', 'C#[C-]'),
AcidBasePair('alpha-carbon-hydrogen-sulfone group', 'CS(=O)(=O)[C!H0+0]', 'CS(=O)(=O)[C-]'),
AcidBasePair('alpha-carbon-hydrogen-sulfoxide group', 'C[SD3](=O)[C!H0+0]', 'C[SD3](=O)[C-]'),
AcidBasePair('-NH2', '[CX4][NH2]', '[CX4][NH-]'),
AcidBasePair('benzyl hydrogen', 'c[CX4H2]', 'c[CX3H-]'),
AcidBasePair('sp2-carbon hydrogen', '[CX3]=[CX3!H0+0]', '[CX3]=[CX2-]'),
AcidBasePair('sp3-carbon hydrogen', '[CX4!H0+0]', '[CX3-]'),
)
class ChargeCorrection(object):
"""An atom that should have a certain charge applied, defined by a SMARTS pattern."""
def __init__(self, name, smarts, charge):
"""Initialize a ChargeCorrection with the following parameters:
:param string name: A name for this ForcedAtomCharge.
:param string smarts: SMARTS pattern to match. Charge is applied to the first atom.
:param int charge: The charge to apply.
"""
log.debug('Initializing ChargeCorrection: %s', name)
self.name = name
self.smarts_str = smarts
self.charge = charge
@memoized_property
def smarts(self):
log.debug('Loading ChargeCorrection smarts: %s', self.name)
return Chem.MolFromSmarts(self.smarts_str)
def __repr__(self):
return 'ChargeCorrection({!r}, {!r}, {!r})'.format(self.name, self.smarts_str, self.charge)
def __str__(self):
return self.name
#: The default list of ChargeCorrections.
CHARGE_CORRECTIONS = (
ChargeCorrection('[Li,Na,K]', '[Li,Na,K;X0+0]', 1),
ChargeCorrection('[Mg,Ca]', '[Mg,Ca;X0+0]', 2),
ChargeCorrection('[Cl]', '[Cl;X0+0]', -1),
# TODO: Extend to other incorrectly charged atoms
)
class Reionizer(object):
"""A class to fix charges and reionize a molecule such that the strongest acids ionize first."""
def __init__(self, acid_base_pairs=ACID_BASE_PAIRS, charge_corrections=CHARGE_CORRECTIONS):
"""Initialize a Reionizer with the following parameter:
:param acid_base_pairs: A list of :class:`AcidBasePairs <molvs.charge.AcidBasePair>` to reionize, sorted from
strongest to weakest.
:param charge_corrections: A list of :class:`ChargeCorrections <molvs.charge.ChargeCorrection>`.
"""
log.debug('Initializing Reionizer')
self.acid_base_pairs = acid_base_pairs
self.charge_corrections = charge_corrections
def __call__(self, | |
from sdf_timing import sdfparse, sdfwrite
from sdf_timing import utils as sdfutils
from pathlib import Path
import argparse
import re
import json
from datetime import date
from collections import defaultdict
from .liberty_to_json import LibertyToJSONParser
from . import log_printer
from .log_printer import log
class JSONToSDFParser():
ctypes = ['combinational', 'three_state_disable', 'three_state_enable', 'rising_edge', 'falling_edge', 'clear']
# extracts cell name and design name, ignore kfactor value
headerparser = re.compile(r'^\"?(?P<cell>[a-zA-Z_][a-zA-Z_0-9]*)\"?\s*cell\s*(?P<design>[a-zA-Z_][a-zA-Z_0-9]*)\s*(?:kfactor\s*(?P<kfactor>[0-9.]*))?\s*(?:instance\s*(?P<instance>[a-zA-Z_0-9]*))?.*') # noqa: E501
normalize_cell_names = True
normalize_port_names = True
@classmethod
def extract_delval(cls, libentry: dict, kfactor: float):
"""Extracts SDF delval entry from Liberty structure format.
Parameters
----------
libentry: dict
The pin entry from Liberty file, containing fields
intrinsic_(rise|fall)(_min|_max)?
Returns
-------
(dict, dict):
pair of dicts containing extracted agv, max, min values from
intrinsic rise and fall entries, respectively
"""
fall = {'avg': None, 'max': None, 'min': None}
rise = {'avg': None, 'max': None, 'min': None}
if 'intrinsic_rise_min' in libentry:
rise['min'] = float(libentry['intrinsic_rise_min']) * kfactor
if 'intrinsic_rise' in libentry:
rise['avg'] = float(libentry['intrinsic_rise']) * kfactor
if 'intrinsic_rise_max' in libentry:
rise['max'] = float(libentry['intrinsic_rise_max']) * kfactor
if 'intrinsic_fall_min' in libentry:
fall['min'] = float(libentry['intrinsic_fall_min']) * kfactor
if 'intrinsic_fall' in libentry:
fall['avg'] = float(libentry['intrinsic_fall']) * kfactor
if 'intrinsic_fall_max' in libentry:
fall['max'] = float(libentry['intrinsic_fall_max']) * kfactor
return rise, fall
@classmethod
def getparsekey(cls, entrydata, direction):
"""Generates keys for entry to corresponding parsing hook.
Parameters
----------
entrydata: dict
Timing entry from Liberty-JSON structure to generate the key for
direction: str
The direction of pin, can be input, output, inout
Returns
-------
tuple: key for parser hook (direction, is_sequential)
"""
defentrydata = defaultdict(lambda: None, entrydata)
return (
direction,
(defentrydata["timing_type"] is not None and
defentrydata["timing_type"] not in cls.ctypes))
@classmethod
def is_delval_empty(cls, delval):
"""Checks if delval is empty.
Parameters
----------
delval: dict
A delval value
Returns
-------
bool: True if empty, else False
"""
for val in delval.values():
if not(val is None or float(val) == 0):
return False
return True
@classmethod
def parseiopath(cls, delval_rise, delval_fall, objectname, entrydata):
"""Parses combinational entries into IOPATH.
This hook takes combinational output entries from LIB file, and
generates the corresponding IOPATH entry in SDF.
Parameters
----------
delval_rise: dict
Delay values for IOPATH for 0->1 change
delval_fall: dict
Delay values for IOPATH for 1->0 change
objectname: str
The name of the cell containing given pin
entrydata: dict
Converted LIB struct fro given pin
Returns
-------
dict: SDF entry for a given pin
"""
if cls.normalize_port_names:
normalize = cls.normalize_name
else:
normalize = lambda x: x
paths = {}
paths['fast'] = delval_rise
paths['nominal'] = delval_fall
element = sdfutils.add_iopath(
pfrom={
"port": normalize(entrydata["related_pin"]),
"port_edge": None,
},
pto={
"port": normalize(objectname),
"port_edge": None,
},
paths=paths)
element["is_absolute"] = True
return element
@classmethod
def parsesetuphold(cls, delval_rise, delval_fall, objectname, entrydata):
"""Parses clock-depending entries into timingcheck entry.
This hook takes timing information from LIB file for pins depending on
clocks (like setup, hold, etc.), and generates the corresponding
TIMINGCHECK entry in SDF.
Parameters
----------
delval_rise: dict
Delay values for TIMINGCHECK
delval_fall: dict
Delay values for TIMINGCHECK (should not differ from delval_rise)
objectname: str
The name of the cell containing given pin
entrydata: dict
Converted LIB struct fro given pin
Returns
-------
dict: SDF entry for a given pin
"""
typestoedges = {
'hold_falling': ('hold', 'negedge'),
'hold_rising': ('hold', 'posedge'),
'setup_falling': ('setup', 'negedge'),
'setup_rising': ('setup', 'posedge'),
'removal_falling': ('removal', 'negedge'),
'removal_rising': ('removal', 'posedge'),
'recovery_falling': ('recovery', 'negedge'),
'recovery_rising': ('recovery', 'posedge'),
}
if cls.normalize_port_names:
normalize = cls.normalize_name
else:
normalize = lambda x: x
# combinational types, should not be present in this function
if ('timing_type' in entrydata and
entrydata['timing_type'] not in cls.ctypes):
timing_type = entrydata['timing_type']
if timing_type not in typestoedges:
log("WARNING", "not supported timing_type: {} in {}".format(
timing_type, objectname))
return None
if typestoedges[timing_type] is None:
log("INFO", 'timing type is ignored: {}'.format(timing_type))
return None
else:
delays = {
"nominal": (delval_fall if cls.is_delval_empty(delval_rise)
else delval_rise)}
ptype, edgetype = typestoedges[timing_type]
else:
log("ERROR", "combinational entry in sequential timing parser")
assert entrydata['timing_type'] not in cls.ctypes
element = sdfutils.add_tcheck(
type=ptype,
pto={
"port": normalize(objectname),
"port_edge": None,
},
pfrom={
"port": normalize(entrydata["related_pin"]),
"port_edge": edgetype,
"cond": None,
"cond_equation": None,
},
paths=delays)
return element
@classmethod
def merge_delays(cls, oldelement, newelement):
"""Merges delays for "duplicated" entries.
LIB format contains field `timing_sense` that describes to what changes
between the input and output the given entry refers to
(`positive_unate`, `negative_unate`, `non_unate`).
SDF does not support such parameter diversity, so for now when there
are different delay values of parameters depending on `timing_sense`
field, then we take the worst possible timings.
Parameters
----------
oldelement: dict
Previous pin entry for cell
newelement: dict
New pin entry for cell
Returns
-------
dict: Merged entry
"""
olddelays = oldelement["delay_paths"]
newdelays = newelement["delay_paths"]
delays = {**olddelays, **newdelays}
for key in delays.keys():
if key in olddelays and key in newdelays:
old = olddelays[key]
new = newdelays[key]
for dkey in old:
if new[dkey] is None or (
old[dkey] is not None and old[dkey] > new[dkey]):
delays[key][dkey] = old[dkey]
else:
delays[key][dkey] = new[dkey]
element = oldelement
element["delay_paths"] = delays
return element
@classmethod
def normalize_name(cls, name):
# remove array markers
newname = name.replace('[','').replace(']','')
return newname
@classmethod
def export_sdf_from_lib_dict(
cls,
voltage: float,
parsed_data : list,
normalize_cell_names : bool,
normalize_port_names : bool,
sdf_timescale : str = "1ns"):
'''Converts the dictionary containing parsed timing information from
LIB file to the SDF format.
Parameters
----------
header: str
A header for given LIB file
voltage: float
A voltage for which timings apply
lib_dict: dict
A dictionary containing parsed LIB file
normalize_cell_names
When True enables normalization of cell and cell instance names
normalize_cell_names
When True enables normalization of port names
'''
# setup hooks that run different parsing functions based on current
# entry
parserhooks = {}
parserhooks[("input", True)] = [cls.parsesetuphold]
parserhooks[("input", False)] = [cls.parseiopath]
parserhooks[("inout", True)] = [cls.parsesetuphold]
parserhooks[("inout", False)] = [cls.parseiopath]
parserhooks[("output", False)] = [cls.parseiopath]
cls.normalize_cell_names = normalize_cell_names
cls.normalize_port_names = normalize_port_names
# extracts cell name and design name, ignore kfactor value
headerparser = cls.headerparser
# extracts pin name and value
whenparser = re.compile("(?P<name>[a-zA-Z_][a-zA-Z_0-9]*(\[[0-9]*\])?)\s*==\s*1'b(?P<value>[0-1])(\s*&&)?") # noqa: E501
# we generate a design name from the first header
header = parsed_data[0][0]
if header.startswith('library'):
design = "Unknown"
else:
parsedheader = headerparser.match(header)
design = parsedheader.group('design')
sdfparse.sdfyacc.header = {
'date': date.today().strftime("%B %d, %Y"),
'design': design,
'sdfversion': '3.0',
'voltage': {'avg': voltage, 'max': voltage, 'min': voltage}
}
cells = defaultdict(lambda: defaultdict(lambda: defaultdict(dict)))
for ld in parsed_data:
header = ld[0]
lib_dict = ld[1]
keys = [key for key in lib_dict.keys()]
if len(keys) != 1 or not keys[0].startswith('library'):
log('ERROR', 'JSON does not represent Liberty library')
return None
if header.startswith('library'):
kfactor = 1.0
design = "Unknown"
instancenames = cellnames = [key.split()[1] for key in lib_dict[keys[0]].keys() if key.startswith("cell")]
librarycontents = [lib_dict[keys[0]][cell] for cell in lib_dict[keys[0]].keys() if cell.startswith("cell")]
else:
# parse header
parsedheader = headerparser.match(header)
kfactor = float(parsedheader.group('kfactor'))
design = parsedheader.group('design')
# name of the cell
cellnames = [parsedheader.group('cell')]
librarycontents = [lib_dict[keys[0]]]
instance = parsedheader.group('instance')
if instance is None:
instance = parsedheader.group('cell')
instancenames = [instance]
# initialize Yacc dictionaries holding data
sdfparse.init()
for instancename, cellname, librarycontent in zip(instancenames, cellnames, librarycontents):
# for all pins in the cell
for objectname, obj in librarycontent.items():
objectname = objectname.split(' ', 1)[1]
direction = obj['direction']
# for all timing configurations in the cell
if 'timing ' in obj:
elementnametotiming = defaultdict(lambda: [])
for timing in (obj['timing ']
if type(obj['timing ']) is list
else [obj['timing ']]):
cname = cellname
if 'when' in timing:
if timing["when"] != "":
# normally, the sdf_cond field should contain the name
# generated by the following code, but sometimes it is
# not present or represented by some specific constants
condlist = ['{}_EQ_{}'
.format(entry.group('name'),
entry.group('value'))
for entry in whenparser.finditer(
timing['when'])]
if not condlist:
log("ERROR", "when entry not parsable: {}"
.format(timing['when']))
return False
cname += "_" + '_'.join(condlist)
# when the timing is defined for falling edge, add this
# info to cell name
if 'timing_type' in timing:
if 'falling' in timing['timing_type']:
cname += "_{}_EQ_1".format(
timing['timing_type'].upper())
# Normalize cell and instance names
if cls.normalize_cell_names:
cname = cls.normalize_name(cname)
instancename = cls.normalize_name(instancename)
# extract intrinsic_rise and intrinsic_fall in SDF-friendly
# format
rise, fall = cls.extract_delval(timing, kfactor)
# if timings are completely empty, skip the entry
| |
#!/usr/bin/env python2
# -*- coding: utf-8 -*- #
import re
import random
import requests
import inflect
from twitterbot import TwitterBot
inflector = inflect.engine()
class DeepAnswerBot(TwitterBot):
unrecognised_tweet = "@gelatindesign I could not understand this question :("
no_concepts_tweet = "@gelatindesign I couldn't find any related concepts :("
def bot_init(self):
"""
Initialize and configure your bot!
Use this function to set options and initialize your own custom bot
state (if any).
"""
############################
# REQUIRED: LOGIN DETAILS! #
############################
self.config['api_key'] = ''
self.config['api_secret'] = ''
self.config['access_key'] = ''
self.config['access_secret'] = ''
######################################
# SEMI-OPTIONAL: OTHER CONFIG STUFF! #
######################################
# how often to tweet, in seconds
self.config['tweet_interval'] = 4 * 60 * 60 # 4 hours
# use this to define a (min, max) random range of how often to tweet
# e.g., self.config['tweet_interval_range'] = (5*60, 10*60) # tweets every 5-10 minutes
self.config['tweet_interval_range'] = None
# only reply to tweets that specifically mention the bot
self.config['reply_direct_mention_only'] = False
# only include bot followers (and original tweeter) in @-replies
self.config['reply_followers_only'] = True
# fav any tweets that mention this bot?
self.config['autofav_mentions'] = False
# fav any tweets containing these keywords?
self.config['autofav_keywords'] = []
# follow back all followers?
self.config['autofollow'] = False
###########################################
# CUSTOM: your bot's own state variables! #
###########################################
# If you'd like to save variables with the bot's state, use the
# self.state dictionary. These will only be initialized if the bot is
# not loading a previous saved state.
# self.state['butt_counter'] = 0
# You can also add custom functions that run at regular intervals
# using self.register_custom_handler(function, interval).
#
# For instance, if your normal timeline tweet interval is every 30
# minutes, but you'd also like to post something different every 24
# hours, you would implement self.my_function and add the following
# line here:
# self.register_custom_handler(self.my_function, 60 * 60 * 24)
def on_scheduled_tweet(self):
"""
Make a public tweet to the bot's own timeline.
It's up to you to ensure that it's less than 140 characters.
Set tweet frequency in seconds with TWEET_INTERVAL in config.py.
"""
# text = function_that_returns_a_string_goes_here()
# self.post_tweet(text)
pass
# raise NotImplementedError("You need to implement this to tweet to timeline (or pass if you don't want to)!")
def on_mention(self, tweet, prefix):
"""
Defines actions to take when a mention is received.
tweet - a tweepy.Status object. You can access the text with
tweet.text
prefix - the @-mentions for this reply. No need to include this in the
reply string; it's provided so you can use it to make sure the value
you return is within the 140 character limit with this.
It's up to you to ensure that the prefix and tweet are less than 140
characters.
When calling post_tweet, you MUST include reply_to=tweet, or
Twitter won't count it as a reply.
"""
# text = function_that_returns_a_string_goes_here()
# prefixed_text = prefix + ' ' + text
# self.post_tweet(prefix + ' ' + text, reply_to=tweet)
# call this to fav the tweet!
# if something:
# self.favorite_tweet(tweet)
pass
# raise NotImplementedError("You need to implement this to reply to/fav mentions (or pass if you don't want to)!")
def on_timeline(self, tweet, prefix):
"""
Defines actions to take on a timeline tweet.
tweet - a tweepy.Status object. You can access the text with
tweet.text
prefix - the @-mentions for this reply. No need to include this in the
reply string; it's provided so you can use it to make sure the value
you return is within the 140 character limit with this.
It's up to you to ensure that the prefix and tweet are less than 140
characters.
When calling post_tweet, you MUST include reply_to=tweet, or
Twitter won't count it as a reply.
"""
print "---"
print tweet.text
tweet_format = self.get_tweet_format(tweet)
if tweet_format is not None:
text = self.get_response(tweet, tweet_format) + ' ' + self._tweet_url(tweet)
else:
text = DeepAnswerBot.unrecognised_tweet + ' ' + self._tweet_url(tweet)
print text
self.post_tweet(text, reply_to=tweet)
def get_tweet_format(self, tweet):
"""
Gets the type of the tweet with the subjects and relations.
See ../learnings/deepanswerbot/deepquestionbot.txt for examples.
"""
tweet_formats = [
('IsA', re.compile(ur'Why do(?:es)?(?: an?)? (?P<start>[a-z\s]+) have to be(?: an?)? (?P<end>[a-z\s]+)\?', re.IGNORECASE)),
('IsA', re.compile(ur'Why must(?: an?)? (?P<start>[a-z\s]+) be(?: an?)? (?P<end>[a-z\s]+)\?', re.IGNORECASE)),
('IsA', re.compile(ur'Why(?: are|is)?(?: an?)? (?P<start>[a-z\s]+)(?: so often) considered to be(?: an?)? (?P<end>[a-z\s]+)\?', re.IGNORECASE)),
('AtLocation', re.compile(ur'Why do(?:es)?(?: an?)? (?P<end>[a-z\s]+) have(?: an?)? (?P<start>[a-z\s]+)\?', re.IGNORECASE)),
('AtLocation', re.compile(ur'Why (?:are|is)?(?: an?)? (?P<start>[a-z\s]+) (?:kept|found) (?:in|near)(?: an?)? (?P<end>[a-z\s]+)\?', re.IGNORECASE)),
('AtLocation.GuessAtLocation', re.compile(ur'(?P<start>[a-z\s]+) are (?:kept|found) (?:near|in)(?: an?)? (?P<end>[a-z\s]+)(?:, right\?|.)? (?:So|But) where do (?:you|we) (?:keep|find)(?: an?)? (?P<subject>[a-z\s]+)\?(?: an?)?(?: an?)? (?P<guess>[a-z\s]+)\?', re.IGNORECASE)),
('AtLocation.SubjectAtLocation', re.compile(ur'(?P<start>[a-z\s]+) are (?:kept|found) (?:near|in)(?: an?)? (?P<end>[a-z\s]+)(?:, right\?|.)? (?:So|But) where do (?:you|we) (?:keep|find)(?: an?)? (?P<subject>[a-z\s]+)\?', re.IGNORECASE)),
('AtLocation.InsteadAtLocation', re.compile(ur'What if you (?:kept|found)(?: an?)? (?P<start>[a-z\s]+) (?:near|in)(?: an?)? (?P<startLocation>[a-z\s]+), instead of (?:near|in)(?: an?)? (?P<end>[a-z\s]+)\?', re.IGNORECASE)),
('RelatedTo.InsteadIsA', re.compile(ur'Have (?:you|we) ever considered(?: an?)? (?P<start>[a-z\s]+) that is(?: an?)? (?P<guess>[a-z\s]+) instead of(?: an?)? (?P<end>[a-z\s]+)\?', re.IGNORECASE)),
('Compared', re.compile(ur'Why (?:are|is)(?: an?)? (?P<start>[a-z\s]+)(?:so) (?P<startAttribute>[a-z\s]+), and(?: an?)? (?P<end>[a-z\s]+)(?:comparitively) (?P<endAttribute>[a-z\s]+)\?', re.IGNORECASE)),
('StillA', re.compile(ur'If(?: an?)? (?P<start>[a-z\s]+) is(?: not)?(?: an?)? (?P<startAttribute>[a-z\s]+), is it still [a-z\s]+\?', re.IGNORECASE)),
]
for tweet_format in tweet_formats:
name, pattern = tweet_format
matches = pattern.search(tweet.text)
if matches:
return (name, matches)
return None
def get_response(self, tweet, tweet_format):
"""
Gets the response to a tweet given a format and subjects
"""
relation, matches = tweet_format
templates = []
if relation == 'IsA':
templates = [
"Perhaps without [singular_noun:a:end.RelatedTo] we wouldn't have [singular_noun:a:start]",
"If [a:end.IsA] is [a:end] then surely the [start] can only be [a:end] too",
"When [singular_noun:a:start.RelatedTo] becomes [singular_noun:a:end.RelatedTo] there is no other reality",
"The greatest [plural:start] of the greatest number is the foundation of [plural:end]",
"[singular_noun:start] consists in doing [plural:end]",
"If [plural:end] did not exist it would be necessary to invent [plural:start]",
"It is wrong always, everywhere and for everyone, to believe anything upon insufficient [plural:start]",
"There is but one truly serious [singular_noun:end] problem, and that is [plural:start]",
"I once had [singular_noun:a:start], it was a nice [singular_noun:start]. But that's not relevant now."
]
elif relation == 'AtLocation':
templates = [
"Where else would [singular_noun:a:start] be?",
"[plural:end] are near [plural:end.AtLocation], so [singular_noun:a:start] must be there too",
"Because [singular_noun:a:start] is [singular_noun:a:start.IsA.end] and [singular_noun:a:end] is [singular_noun:a:end.IsA.end]",
"I found [singular_noun:a:start] once, it wasn't near [singular_noun:a:end], I miss my [singular_noun:a:start] :(",
u"╰( ⁰ ਊ ⁰ )━☆゚.*・。゚"
]
elif relation == 'AtLocation.SubjectAtLocation':
templates = [
"I'd usually look for [singular_noun:a:subject] near [plural:subject.AtLocation]",
"It depends, if [singular_noun:a:subject] is [singular_noun:a:subject.IsA] then it would be near [plural:start.AtLocation]",
"One cannot step twice in the same [singular_noun:end], so [singular_noun:a:subject] must be near [subject.AtLocation]",
"To put it bluntly, not near [singular_noun:a:end]",
"I find [plural:subject] in my imagination!",
u"¯\_(ツ)_/¯"
]
elif relation == 'AtLocation.GuessAtLocation':
templates = [
"Nope, [singular_noun:a:guess] is [singular_noun:a:guess.IsA] so it seems unlikely",
"Only when [singular_noun:a:start] has [singular_noun:a:start.HasA]",
"Well, [singular_noun:guess] is a nice place to be, so long as [singular_noun:a:start] can be [start.UsedFor]",
"[singular_noun:a:start] lies in [singular_noun:a:end], and perfect [plural:subject] lies in the best [singular_noun:guess]",
"Sometimes, it depends on if the [singular_noun:subject] is [singular_noun:a:subject.IsA]",
u"I know the answer to that one but I'm not going to tell you ᕕ( ⁰ ▽ ⁰ )ᕗ"
]
elif relation == 'AtLocation.InsteadAtLocation':
templates = [
"Woah... and what if [singular_noun:a:start] can [start.CapableOf]?",
"I doubt it would make much difference to [singular_noun:a:start] considering it's not [singular_noun:a:end.IsA]",
"I would run scared, [singular_noun:a:start] should never be near [plural:startLocation]",
"What? No, [singular_noun:a:start] would never be near [singular_noun:a:end]"
]
elif relation == 'RelatedTo.InsteadIsA':
templates = [
"The [guess] [singular_noun:start] is that which overcomes not only it's [end] but also it's [singular_noun:start.HasA]",
"When [singular_noun:a:guess] can [guess.CapableOf] I would suggest it must also be [singular_noun:a:end]",
"[singular_noun:a:guess] is the sign of the [singular_noun:start], it is the opium of the people",
u"(ʘᗩʘ’)"
]
elif relation == 'Compared':
templates = [
"Occasionally a [endAttribute] [singular_noun:end] can also be a [startAttribute] [singular_noun:start]",
"The only thing I know is that I know [plural:start] are [singular_noun:start.IsA]",
"[start] [startAttribute] for the worst, if they be not altered for the [endAttribute] designedly"
]
elif relation == 'StillA':
templates = [
"Perhaps if [singular_noun:a:start] is also [singular_noun:a:start.IsA] | |
- TODO: What format?
@hide_tree hides the tree
@exclude_those_with_too_many_nans_in_y_clustering removes elements with more than 25% nans from deciding the order in the y-clustering
It's using ggplot and ggdendro... in the end, this code breads insanity"""
column_number = len(set(data.get_column_view('condition')))
row_number = len(data) / column_number
keep_column_order = False
if column_order is not False:
keep_column_order = True
keep_row_order = False
if row_order is not False:
keep_row_order = True
load_r()
valid_array_cluster = 'hamming_on_0', 'cosine'
if not array_cluster_method in valid_array_cluster:
raise ValueError("only accepts array_cluster_method methods %s" %
valid_array_cluster)
df = data
if colors == None:
colors = ['grey' for x in range(len(data))]
# R's scale NaNs everything on any of these values...
# df[numpy.isnan(df.get_column_view('expression_change')), 'expression_change'] = 0 #we do this part in R now.
df[numpy.isposinf(df.get_column_view('expression_change')),
'expression_change'] = infinity_replacement_value
df[numpy.isneginf(df.get_column_view('expression_change')),
'expression_change'] = -1 * infinity_replacement_value
if len(df.get_column_unique('condition')) < 2 or len(
df.get_column_unique('gene')) < 2:
op = open(output_filename, 'wb')
op.write("not enough dimensions\n")
op.close()
return
file_extension = output_filename[-3:].lower()
if not (file_extension == 'pdf' or file_extension == 'png'):
raise ValueError('File extension must be .pdf or .png, outfile was ' +
output_filename)
robjects.r("""
normvec<-function(x)
{
sqrt(x%*%x)
}
cosine_distance = function(a, b)
{
1 - ((a %*% b) / ( normvec(a) * normvec(b)))[1]
}
dist_cosine = function(x)
{
x = as.matrix(x)
N = nrow(x)
res = matrix(0, nrow = N, ncol= N)
for (i in 1:N)
{
for (t in 1:N)
{
res[i,t] = cosine_distance(x[i,], x[t,])
}
}
as.dist(res)
}
hamming_distance = function(a, b)
{
sum(a != b)
}
dist_hamming = function(x)
{
x = as.matrix(x)
N = nrow(x)
res = matrix(0, nrow = N, ncol= N)
for (i in 1:N)
{
for (t in 1:N)
{
res[i,t] = hamming_distance(x[i,], x[t,])
}
}
as.dist(res)
}
""")
robjects.r("""
library(ggplot2)
library(reshape)
library(ggdendro)
library(grid)
do_tha_funky_heatmap = function(outputfilename, df,
low, mid, high, nan_color,
hide_genes, width, height, array_cluster_method,
keep_column_order, keep_row_order, colors, hide_tree, exclude_those_with_too_many_nans_in_y_clustering, row_order, column_order)
{
df$condition <- factor(df$condition)
options(expressions = 50000) #allow more recursion
#transform df into a rectangualr format
df_cast = cast(df, gene ~ condition, value='expression_change', fun.aggregate=median)
col_names = names(df_cast)
row_names = df_cast$gene
df_cast = df_cast[do.call(order,df_cast['gene']),]
df_scaled = as.matrix(scale(df_cast))
df_scaled[is.nan(df_scaled)] = 0
df_scaled[is.na(df_scaled)] = 0
#do the row clustering
if (!keep_row_order)
{
if (exclude_those_with_too_many_nans_in_y_clustering) #when clustering genes, leave out those samples with too many nans
{
df_scaled_with_nans = as.matrix(scale(df_cast)) #we need it a new, with nans
nan_count_per_column = colSums(is.na(df_scaled_with_nans))
too_much = dim(df_scaled_with_nans)[1] / 4.0
exclude = nan_count_per_column >= too_much
keep = !exclude
df_scaled_with_nans = df_scaled_with_nans[, keep]
df_scaled_with_nans[is.nan(df_scaled_with_nans)] = 0
df_scaled_with_nans[is.na(df_scaled_with_nans)] = 0
dd.row <- as.dendrogram(hclust(dist_cosine(df_scaled_with_nans)))
}
else
{
dd.row <- as.dendrogram(hclust(dist_cosine(df_scaled)))
}
}
#do the column clustering.
if(!keep_column_order){
if (array_cluster_method == 'cosine')
{
dd.col <- as.dendrogram(hclust(dist_cosine(t(df_scaled))))
}
else if (array_cluster_method == 'hamming_on_0')
{
df_hamming = as.matrix(df_cast) > 0
df_hamming[is.nan(df_hamming)] = 0
df_hamming[is.na(df_hamming)] = 0
dd.col <- as.dendrogram(hclust(dist_hamming(t(df_hamming))))
}
}
if (keep_row_order)
{
row.ord = 1:length(row_order)
for(i in 1:length(row_order)){
row.ord[i] = which(row_names==row_order[i])
}
row.ord = rev(row.ord)
}
else
{
row.ord <- order.dendrogram(dd.row)
}
if (keep_column_order)
{
tmp = 1:length(column_order)
for(i in 1:length(column_order)){
tmp[i] = which(col_names==column_order[i])-1
}
col.ord <- tmp
}
else
{
col.ord <- order.dendrogram(dd.col)
}
xx <- scale(df_cast, FALSE, FALSE)[row.ord, col.ord]
xx_names <- attr(xx, 'dimnames')
df <- as.data.frame(xx)
colnames(df) <- xx_names[[2]]
df$gene <- xx_names[[1]]
df$gene <- with(df, factor(gene, levels=gene, ordered=TRUE))
mdf <- melt(df, id.vars="gene")
tmp = c()
i = 1
for (gene in df$gene)
{
index = which(colors$gene == gene)
colll <- as.character(colors$color[index])
tmp[i] = colll
i = i +1
}
colors = tmp
if(!keep_column_order){
ddata_x <- dendro_data(dd.col)
}
if(!keep_row_order)
{
ddata_y <- dendro_data(dd.row)
}
### Set up a blank theme
theme_none <- theme(
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.title.x = element_text(colour=NA),
axis.title.y = element_blank(),
axis.text.x = element_blank(),
axis.text.y = element_blank(),
axis.line = element_blank(),
axis.ticks = element_blank()
)
### Create plot components ###
# Heatmap
p1 <- ggplot(mdf, aes(x=variable, y=gene)) +
geom_tile(aes(fill=value)) + scale_fill_gradient2(low=low,mid=mid, high=high, na.value=nan_color) + theme(axis.text.x = element_text(angle=90, size=8, hjust=0, vjust=0, colour="black"),
axis.title.y = element_blank(), axis.title.x = element_blank(),
axis.text.y = element_text(colour="black"))
if (hide_genes)
p1 = p1 + theme(axis.text.y = element_blank())
else
{
p1 = p1 + theme(strip.background = element_rect(colour = 'NA', fill = 'NA'), axis.text.y = element_text(colour=colors))
}
if (!keep_column_order && !hide_tree)
{
# Dendrogram 1
p2 <- ggplot(segment(ddata_x)) +
geom_segment(aes(x=x, y=y, xend=xend, yend=yend)) +
theme_none + theme(axis.title.x=element_blank())
}
if(!keep_row_order && !hide_tree)
{
# Dendrogram 2
p3 <- ggplot(segment(ddata_y)) +
geom_segment(aes(x=x, y=y, xend=xend, yend=yend)) +
coord_flip() + theme_none
}
if (grepl('png$', outputfilename))
png(outputfilename, width=width * 72, height=height * 72)
else if (grepl('pdf$', outputfilename))
pdf(outputfilename, width=width, height=height)
else
error("Don't know that file format")
grid.newpage()
if (hide_tree)
vp = viewport(1, 1, x=0.5, y=0.5)
else
vp = viewport(0.8, 0.8, x=0.4, y=0.4)
print(p1, vp=vp)
if (!keep_column_order && !hide_tree)
{
print(p2, vp=viewport(0.60, 0.2, x=0.4, y=0.9))
}
if (!keep_row_order && !hide_tree)
{
print(p3, vp=viewport(0.2, 0.86, x=0.9, y=0.4))
}
dev.off()
}
""")
if not width:
width = len(df.get_column_unique('condition')) * 0.4 + 5
height = len(df.get_column_unique('gene')) * 0.15 + 3
robjects.r('do_tha_funky_heatmap')(
output_filename, df, low, mid, high, nan_color, hide_genes, width,
height, array_cluster_method, keep_column_order, keep_row_order,
colors, hide_tree, exclude_those_with_too_many_nans_in_y_clustering,
row_order, column_order)
def EmptyPlot(text_to_display='No data'):
p = Plot(pandas.DataFrame({'x': [0], 'y': [0], 'text': [text_to_display]}))
p.add_text('x', 'y', 'text')
return p
class CombinedPlots:
"""Combine multiple ggplots into one graph.
Default is A4
"""
def __init__(self, plots, ncol=3, width=8.267 * 150, height=11.5 * 150):
"""width/height are in pixels @ 150 pixels/inch"""
self.plots = plots
self.ncol = ncol
self.width = float(width)
self.height = float(height)
def _repr_svg_(self):
so = tempfile.NamedTemporaryFile(suffix='.svg')
self.render(so.name)
so.flush()
so.flush()
result = so.read()
so.close()
return result, {"isolated": True}
def render(self, output_filename, width=None, height=None):
if not output_filename.endswith('.svg'):
raise ValueError("combined plots currently only support svg")
from . import svg_stack
if width is None:
width = self.width
if height is None:
height = self.height
if len(self.plots) < self.ncol:
self.ncol = len(self.plots)
nrow = math.ceil(len(self.plots) / float(self.ncol))
svgs = [
p._repr_svg_(
width=self.width / self.ncol / 150 * 72,
height=self.height / nrow / 150 * 72) for p in self.plots
]
tfs = [tempfile.NamedTemporaryFile(suffix='.svg') for x in svgs]
for of, svg in zip(tfs, svgs):
of.write(svg[0].encode('utf-8'))
of.flush()
doc = svg_stack.Document()
layout1 = svg_stack.VBoxLayout()
rows = [tfs[i:i + self.ncol] for i in range(0, len(tfs), self.ncol)]
ii = 0
for row in rows:
ii += 1
layout2 = svg_stack.HBoxLayout()
for element in row:
layout2.addSVG(element.name, alignment=svg_stack.AlignLeft)
layout2.setSpacing(0)
layout1.addLayout(layout2)
layout1.setSpacing(0)
doc.setLayout(layout1)
doc.save(output_filename)
for of in tfs:
of.close()
def to_excel(self, output_filename):
writer = pandas.ExcelWriter(output_filename)
i = 0
for p in self.plots:
i += 1
df = p.dataframe.copy()
rename_columns = {}
for ii, x in enumerate(p.old_names):
new_name = 'dat_%s' % ii
if new_name in df:
rename_columns[new_name] = x
df = df.rename(columns=rename_columns)
df = df[list(set(df.columns).intersection(p.used_columns))]
df.to_excel(writer, 'Plot%i' % i)
writer.save()
def position_dodge(width=RNULL, height=RNULL):
"""Adjust position by dodging overlaps to the side."""
return robjects.r('position_dodge')(width, height)
def position_fill(width=RNULL, height=RNULL):
"""Stack overlapping objects on top of one another, and standardise to have"""
return robjects.r('position_fill')(width, height)
def position_identity(width=RNULL, height=RNULL):
"""Don't adjust position"""
return robjects.r('position_identity')(width, height)
def position_stack(width=RNULL, height=RNULL):
"""Stack overlapping objects on top of one another."""
return robjects.r('position_stack')(width, height)
def position_jitter(w=0.4, h=0.4):
return robjects.r('position_jitter')(w, h)
def multiplot(list_of_plots, cols):
"""Plot multiple plots on one image"""
robjects.r("""
multiplot <- function(..., plotlist=NULL, file, cols=1, layout=NULL) {
library(grid)
# Make a list from the ... arguments and plotlist
plots <- c(list(...), plotlist)
numPlots = length(plots)
# If layout is NULL, then use 'cols' to determine layout
if (is.null(layout)) {
# Make the panel
# ncol: Number of columns of plots
# nrow: Number of rows needed, calculated from # of cols
layout <- matrix(seq(1, cols * ceiling(numPlots/cols)),
ncol = cols, nrow = ceiling(numPlots/cols))
}
if (numPlots==1) {
print(plots[[1]])
} else {
# Set up the page
grid.newpage()
pushViewport(viewport(layout = grid.layout(nrow(layout), ncol(layout))))
# Make each plot, in the correct location
for (i in 1:numPlots) {
# Get the i,j matrix positions of the regions that contain this subplot
matchidx <- as.data.frame(which(layout == i, arr.ind = TRUE))
print(plots[[i]], vp = viewport(layout.pos.row = matchidx$row,
layout.pos.col = matchidx$col))
}
}
}
""")
plots = [x._build_plot() for x in list_of_plots]
robjects.r('multiplot')(plotlist=plots, cols=cols)
def r_plot_to_png(callback, width=600, height=600, dpi=72):
from rpy2.robjects.lib import grdevices
with grdevices.render_to_bytesio(
grdevices.png, width=width, height=height, res=dpi) as | |
to SLC35E2B ...
if ( curID == "728661" ):
print " --> changing gene from <%s> to SLC35E2B " % curGene
curGene = "SLC35E2B"
elif (nameChangeFlag == "YES"):
if (newGene != curGene):
# do NOT change to this "new" name if this name seems to
# already be in use ...
if ((newGene in curGeneSymbols) or (newGene in usedGeneSymbols) or (newGene == "")):
print " --> NOT changing name from <%s> to <%s> " % (curGene, newGene)
else:
print " --> changing name from <%s> to <%s> " % (curGene, newGene)
curGene = newGene
# keep track of 'used' gene symbols ...
if (curGene not in usedGeneSymbols):
usedGeneSymbols += [curGene]
gotItFlag = 0
# ------------------------------------
# IF haveName=TRUE and haveCoord=FALSE
if (haveName and not haveCoord):
# here we want to add coordinates based on a gene name ...
# --> first we try Gencode, and then we try GAF ...
# more importantly, FIRST we try by gene ID and then by gene symbol ...
if ( haveExtraName ):
geneID = tokenList[7]
print " --> first looking using gene ID <%s> " % geneID
if (geneID in GAF_geneCoordDict_byID):
print " found by ID ... ", curLabel, curGene, geneID, GAF_geneCoordDict_byID[geneID]
newGene = GAF_geneSymbol_byID[geneID][0]
if (newGene != curGene):
print " --> changing name from <%s> to <%s> " % (curGene, newGene)
curGene = newGene
# sys.exit(-1)
newLabel = annotateLabel(curLabel, curGene, GAF_geneCoordDict_byID[geneID])
print " addGene : ", tokenList, " --> ", newLabel
gotItFlag = 1
if (curType not in addGene):
addGene[curType] = 1
else:
addGene[curType] += 1
if ( not gotItFlag ):
if (curGene in Gencode_geneCoordDict_bySymbol):
print " now looking by gene sybmol in Gencode ... ", curLabel, curGene, Gencode_geneCoordDict_bySymbol[curGene]
newLabel = annotateLabel(curLabel, curGene, Gencode_geneCoordDict_bySymbol[curGene])
print " addGene : ", tokenList, " --> ", newLabel
gotItFlag = 1
# keep track of how often we add a gene label ...
if (curType not in addGene):
addGene[curType] = 1
else:
addGene[curType] += 1
if ( not gotItFlag ):
if (curGene in GAF_geneCoordDict_bySymbol):
print " now looking by symbol in GAF ... ", curLabel, curGene, GAF_geneCoordDict_bySymbol[curGene]
newLabel = annotateLabel(curLabel, curGene, GAF_geneCoordDict_bySymbol[curGene])
print " addGene : ", tokenList, " --> ", newLabel
gotItFlag = 1
# keep track of how often we add a gene label ...
if (curType not in addGene):
addGene[curType] = 1
else:
addGene[curType] += 1
if (not gotItFlag):
print " this gene is not in GAF by gene ID (or no gene ID available) ??? ", tokenList
if (curGene in refGeneDict):
print " finally, found in refGene ... ", curLabel, curGene, refGeneDict[curGene]
newLabel = annotateLabel(
curLabel, curGene, refGeneDict[curGene])
print " addGene : ", tokenList, " --> ", newLabel
# keep track of how often we add a gene label ...
if (curType not in addGene):
addGene[curType] = 1
else:
addGene[curType] += 1
else:
print " and also not in refGene ... "
# -----------------------------------------
# IF haveName=FALSE and haveValidCoord=TRUE
elif (not haveName and haveValidCoord):
# here we want to add either a single gene name based on valid
# coordinates, or else we add a cytoband label ...
# print tokenList
geneList = overlap(curLabel, GAF_geneCoordDict_bySymbol)
# print geneList
if ( (len(geneList)!=1) or (curType=="CNVR") ):
# if there are several (or zero) genes in this segment, then we
# annotate based on cytoband instead ...
# print curLabel
# print geneList
curCytoband = getCytobandLabel(curLabel, cytoDict)
newLabel = curLabel[:7] + curCytoband + curLabel[7:]
print " addCyto : ", tokenList, " --> ", newLabel
# print newLabel
# keep track of how often we add a gene label ...
if (curType not in addCyto):
addCyto[curType] = 1
else:
addCyto[curType] += 1
else:
# if there is just one gene, then use that
if (hasSpecialChar(geneList[0])):
print " need to fix this gene name : ", geneList[0]
sys.exit(-1)
newLabel = curLabel[:7] + geneList[0] + curLabel[7:]
print " addGene : ", tokenList, " --> ", newLabel
# print newLabel
# keep track of how often we add a gene label ...
if (curType not in addGene):
addGene[curType] = 1
else:
addGene[curType] += 1
newRowLabels += [newLabel]
# END OF BLOCK of SEVERAL IF STATEMENTS
# -------------------------------------
# END OF OUTER LOOP OVER ROWS ...
print " "
print " "
if ( 0 ):
# before we assign the new row labels, make sure that they
# are unique !!
numIdent = 0
print " checking for label uniqueness ... ", len(newRowLabels)
for ii in range(len(newRowLabels)):
if ( (ii%10000) == 0 ): print ii, len(newRowLabels)
for jj in range(ii + 1, len(newRowLabels)):
if (newRowLabels[ii] == newRowLabels[jj]):
print " WARNING !!! identical labels ??? tacking on dup "
print ii, newRowLabels[ii]
print jj, newRowLabels[jj]
if (newRowLabels[jj][-1] == ":"):
newRowLabels[jj] += "dup"
else:
newRowLabels[jj] += "_dup"
print " --> ", jj, newRowLabels[jj]
numIdent += 1
if (0):
if (numIdent > 0):
sys.exit(-1)
print " "
print " OK ... "
print " "
if (len(newRowLabels) == len(rowLabels)):
print " --> seem to have all the new labels ... ", len(newRowLabels), len(rowLabels)
print " addGene : ", addGene
print " addCyto : ", addCyto
# assign the new labels ...
dataD['rowLabels'] = newRowLabels
return (dataD)
else:
print " ERROR ??? wrong number of labels ??? "
print len(newRowLabels), len(rowLabels)
sys.exit(-1)
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
# -#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
if __name__ == "__main__":
if (1):
if (len(sys.argv) >= 4):
inFile = sys.argv[1]
build = sys.argv[2]
outFile = sys.argv[3]
if (len(sys.argv) >= 5):
forceFlag = sys.argv[4].upper()
else:
forceFlag = "NO"
if (len(sys.argv) >= 6):
nameChangeFlag = sys.argv[5].upper()
else:
nameChangeFlag = "NO"
else:
print " "
print " Usage: %s <input TSV file> <hg18 or hg19> <output TSV file> [force REannotation=NO/YES] [nameChangeFlag=NO/YES] "
print " note that forceFlag nameChangeFlag will default to NO "
print " "
print " ERROR -- bad command line arguments "
sys.exit(-1)
tumorType = 'gbm'
if (forceFlag == "Y"):
forceFlag = "YES"
if (forceFlag != "YES"):
forceFlag = "NO"
if (nameChangeFlag == "Y"):
nameChangeFlag = "YES"
if (nameChangeFlag != "YES"):
nameChangeFlag = "NO"
print " forceFlag = %s " % forceFlag
print " nameChangeFlag = %s " % nameChangeFlag
print " "
# and get the coordinates for these genes ...
bioinformaticsReferencesDir = gidgetConfigVars['TCGAFMP_BIOINFORMATICS_REFERENCES']
if (build == 'hg18'):
gafFilename = bioinformaticsReferencesDir + "/GAF/Feb2011/GAF.hg18.Feb2011/GAF_bundle/outputs/TCGA.hg18.Feb2011.gaf"
gencodeFilename = ""
cybFilename = bioinformaticsReferencesDir + "/hg18/cytoBand.hg18.txt"
elif (build == 'hg19'):
gafFilename = bioinformaticsReferencesDir + "/GAF/GAF3.0/all.gaf"
gencodeFilename = bioinformaticsReferencesDir + "/gencode/gencode.v19.gene.gtf"
refGeneFilename = bioinformaticsReferencesDir + "/hg19/refGene.txt"
cybFilename = bioinformaticsReferencesDir + "/hg19/cytoBand.hg19.txt"
else:
print " ERROR ... genome build must be either hg18 or hg19 "
infFilename = bioinformaticsReferencesDir + "/ftp.ncbi.nlm.nih.gov/gene/DATA/gene_info"
print " "
print " Running : %s %s %s %s " % (sys.argv[0], sys.argv[1], sys.argv[2], sys.argv[3])
print " %s " % gafFilename
print " %s " % gencodeFilename
print " %s " % refGeneFilename
print " %s " % cybFilename
print " %s " % infFilename
print " "
print " "
# read in the input feature matrix first, just in case there
# actually isn't one yet available ...
print " --> calling tsvIO.readTSV ... "
testD = tsvIO.readTSV(inFile)
try:
print len(testD['rowLabels']), len(testD['colLabels'])
if (len(testD['rowLabels']) == 0 or len(testD['colLabels']) == 0):
print " EXITING ... no data "
sys.exit(-1)
except:
print " --> invalid / missing input feature matrix "
sys.exit(-1)
# read in the gene_info file ...
# this was turned off ... looking into turning it back on (1/7/13)
# turning it back off (1/17/14)
if (0):
print " --> calling readGeneInfoFile ... "
(geneInfoDict, synMapDict) = refData.readGeneInfoFile(infFilename)
else:
geneInfoDict = {}
synMapDict = {}
# then read in the GAF file ... or GENCODE ...
print " --> calling readGAF ... "
(GAF_geneCoordDict_bySymbol, GAF_geneCoordDict_byID, GAF_geneSymbol_byID) = refData.readGAF(gafFilename)
print " --> and Gencode ... "
(Gencode_geneCoordDict_bySymbol, Gencode_geneCoordDict_byID, Gencode_geneSymbol_byID) = refData.readGencode(gencodeFilename)
# also the refGene file ...
# looking in to turning this off too (1/17/14)
if | |
i*block_size),(top_left_x+play_width + add, top_left_y+ i * block_size))
for j in range(len(grid_2[i])):
pygame.draw.line(surface, (128, 128, 128), (top_left_x + add + j*block_size, top_left_y),(top_left_x + add + j*block_size, top_left_y + play_height))
def clear_rows(grid, locked_pos):
# delete rows part
# inc --> increment index
inc = 0
# read the grid from bottom to top
for i in range(len(grid)-1,-1,-1):
row = grid[i]
# to check whether there are blank(black) spaces in row
if (0,0,0) not in row:
inc += 1
# ind --> current index been looped (locate index)
ind = i
for j in range(len(row)):
# delete the cells in the row which fulfill the conditions of being deleted
try:
del locked_pos[(j,i)]
except:
continue
# to shift the rows
# need to add back rows to the top after delete the row
if inc > 0:
# the sorted convert the list of locked_position dictionary keys to a list and sorted the values based on the second value of dict keys(tuple) in descending order
# [(1, 0), (2, 0), (1, 1), (0, 0), (2, 1), (0, 1)] --> [(0, 1), (2, 1), (1, 1), (1, 0), (2, 0), (0, 0)]
# the locked pos(after deleted some cells form code on top) will be accessed from bottom to top
for key in sorted(list(locked_pos),key= lambda x:x[1],reverse=True):
x,y = key
# to only shift down the rows that above the row that been deleted
if y < ind:
newKey = (x,y+inc)
locked_pos[newKey] = locked_pos.pop(key)
return inc
# display next shape beside the play section
def draw_next_shape(shape_1,shape_2, surface,add=0):
font = pygame.font.SysFont('Consolas',20,bold=True)
label = font.render('Next Shape: ',True,(255,255,255))
# hardcoding the x and y coordinates of show next shape section
x_coor = top_left_x + play_width + 50
y_coor = top_left_y + play_height/2 - 100
format_1 = shape_1.shape[shape_1.rotation % (len(shape_1.shape))]
format_2 = shape_2.shape[shape_2.rotation % (len(shape_2.shape))]
for i,line in enumerate(format_1):
for j,column in enumerate(line):
if column == '0':
pygame.draw.rect(surface,shape_1.color,(x_coor + j*block_size , y_coor + i*block_size,block_size,block_size),0)
pygame.draw.rect(surface, (128, 128, 128), (int(x_coor + j * block_size), int(y_coor + i * block_size), block_size, block_size), 1)
for i,line in enumerate(format_2):
for j,column in enumerate(line):
if column == '0':
pygame.draw.rect(surface,shape_2.color,(x_coor + j*block_size + add , y_coor + i*block_size,block_size,block_size),0)
pygame.draw.rect(surface, (128, 128, 128), (int(x_coor + j * block_size + add), int(y_coor + i * block_size), block_size, block_size), 1)
surface.blit(label,(x_coor+10,y_coor-30))
surface.blit(label, (x_coor + 10+add, y_coor - 30))
pygame.display.update()
def draw_window(surface,grid_1,grid_2,score_1=0,score_2=0,level=1,speed = 0.27,add=0):
surface.fill((33,29,29))
# initialize font object before creating it
pygame.font.init()
font = pygame.font.SysFont('Consolas',20,italic=True)
label = font.render("LEVEL : "+str(level)+ " SPEED: "+ str(round(1/speed,2)),True,(255,255,255))
surface.blit(label, ((top_left_x + play_width) / 1.5 - label.get_width(), 30))
surface.blit(label,((top_left_x+play_width)/1.5 - label.get_width() + add ,30))
# draw the blocks
# last arg represent border radius (0 = fill)
for i in range(len(grid_1)):
for j in range(len(grid_1[i])):
pygame.draw.rect(surface, grid_1[i][j],
(top_left_x + (block_size * j), top_left_y + (block_size * i), block_size, block_size), 0)
for i in range(len(grid_2)):
for j in range(len(grid_2[i])):
pygame.draw.rect(surface, grid_2[i][j],
(top_left_x + (block_size * j) + add, top_left_y + (block_size * i), block_size, block_size), 0)
# draw the border
pygame.draw.rect(surface, (255, 0, 0), (top_left_x , top_left_y, play_width, play_height), 4)
pygame.draw.rect(surface, (255, 0, 0), (top_left_x + add, top_left_y, play_width, play_height), 4)
# draw the score
font_1 = pygame.font.SysFont('Consolas', 20,bold=False,italic=True)
label_1 = font_1.render('Score: '+ str(score_1), True, (255, 255, 255))
font_2 = pygame.font.SysFont('Consolas', 20, bold=False, italic=True)
label_2 = font_2.render('Score: ' + str(score_2), True, (255, 255, 255))
x_coor = top_left_x + play_width + 50
y_coor = top_left_y + play_height / 2 - 100
# draw middle line
pygame.draw.line(surface,(255,255,255),(s_width/2,0),(s_width/2,s_height))
surface.blit(label_1, (x_coor + 10 , y_coor - 120))
surface.blit(label_2,(x_coor+ 10 + add,y_coor-120))
draw_grid(surface,grid_1,grid_2,add=int(mid_x))
pygame.display.update()
def main(surface):
run = True
p1_locked_positions = {}
p1_change_piece = False
p1_current_piece = get_shape()
p1_next_piece = get_shape()
p1_score = 0
p2_locked_positions = {}
p2_change_piece = False
p2_current_piece = get_shape()
p2_next_piece = get_shape()
p2_score = 0
clock = pygame.time.Clock()
fallTime = 0
level_time = 0
level = 1
fallSpeed = 0.27
while run:
# constantly update the grid while the program is running
p1_grid = create_grid(p1_locked_positions)
p2_grid = create_grid(p2_locked_positions)
# gets the amount of time since last clock tick
fallTime += clock.get_rawtime()
level_time += clock.get_rawtime()
# called once per frame
clock.tick()
# auto update the level after 15 seconds
if level_time / 1000 > 15:
level_time = 0
if fallSpeed > 0.12:
fallSpeed -= 0.01
level += 1
# to automatically move the piece down
if fallTime/1000 >= fallSpeed:
fallTime = 0
p1_current_piece.y += 1
p2_current_piece.y += 1
# to detect the validity of piece and change it if it is not valid
if not(valid_space(p1_current_piece,p1_grid)) and p1_current_piece.y > 0:
p1_current_piece.y -= 1
p1_change_piece = True
if not(valid_space(p2_current_piece,p2_grid)) and p2_current_piece.y > 0:
p2_current_piece.y -= 1
p2_change_piece = True
# get user events
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
# to detect the keys pressed down
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
p1_current_piece.x -= 1
# to check whether the block is in valid space
if not (valid_space(p1_current_piece, p1_grid)):
p1_current_piece.x += 1
if event.key == pygame.K_RIGHT:
p1_current_piece.x += 1
if not (valid_space(p1_current_piece, p1_grid)):
p1_current_piece.x -= 1
if event.key == pygame.K_DOWN:
p1_current_piece.y += 1
if not (valid_space(p1_current_piece, p1_grid)):
p1_current_piece.y -= 1
# let the block rotate when we press up key
if event.key == pygame.K_UP:
p1_current_piece.rotation += 1
if not (valid_space(p1_current_piece, p1_grid)):
p1_current_piece.rotation -= 1
if event.key == pygame.K_w:
p2_current_piece.rotation += 1
if not (valid_space(p2_current_piece, p2_grid)):
p2_current_piece.rotation -= 1
if event.key == pygame.K_a:
p2_current_piece.x -= 1
if not (valid_space(p2_current_piece, p2_grid)):
p2_current_piece.x += 1
if event.key == pygame.K_s:
p2_current_piece.y += 1
if not (valid_space(p2_current_piece, p2_grid)):
p2_current_piece.y -= 1
if event.key == pygame.K_d:
p2_current_piece.x += 1
if not (valid_space(p2_current_piece, p2_grid)):
p2_current_piece.x -= 1
# pause the game if space bar is pressed
if event.key == pygame.K_SPACE:
pause(surface,clock)
# to locate current_piece coordinates
p1_shape_pos = convert_shape_format(p1_current_piece)
p2_shape_pos = convert_shape_format(p2_current_piece)
for i in range(len(p1_shape_pos)):
x,y = p1_shape_pos[i]
# draw the color after it appears on play section
if y > -1:
p1_grid[y][x] = p1_current_piece.color
for i in range(len(p2_shape_pos)):
x,y = p2_shape_pos[i]
# draw the color after it appears on play section
if y > -1:
p2_grid[y][x] = p2_current_piece.color
# check for change_piece and update locked positions
if p1_change_piece:
for pos in p1_shape_pos:
p = (pos[0],pos[1])
p1_locked_positions[p] = p1_current_piece.color
# locked_positions will look like this --> {(1,2):(0,255,255),......}
# change current_piece to next_piece
p1_current_piece = p1_next_piece
p1_next_piece = get_shape()
# change change_piece to False to prevent change_piece again
p1_change_piece = False
# only clear rows when next piece is generated
p1_score += clear_rows(p1_grid,p1_locked_positions) * 10 * level
# check for change_piece and update locked positions
if p2_change_piece:
for pos in p2_shape_pos:
p = (pos[0], pos[1])
p2_locked_positions[p] = p2_current_piece.color
# locked_positions will look like this --> {(1,2):(0,255,255),......}
# change current_piece to next_piece
p2_current_piece = p2_next_piece
p2_next_piece = get_shape()
# change change_piece to False to prevent change_piece again
p2_change_piece = False
# only clear rows when next piece is generated
p2_score += clear_rows(p2_grid, p2_locked_positions) * 10 * level
# to break the loop if lost
if check_lost(p1_locked_positions) or check_lost(p2_locked_positions):
if check_lost(p1_locked_positions):
font = pygame.font.SysFont("Consolas", 60, bold=True)
label = font.render("YOU LOSE!!!", True, (255,255,255))
label_2 = font.render("YOU WIN!!!", True, (255,255,255))
surface.blit(label, (top_left_x + play_width / 2 - label.get_width() / 2 - 20,top_left_y + play_height / 2 - label.get_height() / 2))
surface.blit(label_2, (top_left_x + play_width / 2 - label.get_width() / 2 + mid_x - 20,top_left_y + play_height / 2 - label.get_height() / 2))
if check_lost(p2_locked_positions):
font = pygame.font.SysFont("Consolas", 60, bold=True)
label = font.render("YOU LOSE!!!", True, (255, 255, 255))
label_2 = font.render("YOU WIN!!!", True, (255, 255, 255))
surface.blit(label, (top_left_x + play_width / 2 - label.get_width() / 2 + mid_x - 20,top_left_y + play_height / 2 - label.get_height() / 2))
surface.blit(label_2, (top_left_x + play_width / 2 - label.get_width() / 2 - 20,top_left_y + play_height / 2 - label.get_height() / 2))
pygame.display.update()
pygame.time.delay(3000)
run = False
# if there are multiple draw functions in one run, juz call pygame.display.update() in main loop once
draw_window(surface,p1_grid,p2_grid,p1_score,p2_score,level,fallSpeed,add=int(mid_x))
draw_next_shape(p1_next_piece, p2_next_piece, surface, add=int(mid_x))
pygame.display.update()
# background picture of | |
<reponame>mkulariya1/tefla
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
import math
import tensorflow as tf
from collections import namedtuple
from .layers import batch_norm_tf as batch_norm
NamedOutputs = namedtuple('NamedOutputs', ['name', 'outputs'])
@six.add_metaclass(abc.ABCMeta)
class PoolingBaseModel(object):
"""Inherit from this class when implementing new models.
Reference:
Learnable pooling method with Context Gating for video classification
<NAME>, <NAME>, <NAME>
credit: modified version of original implementation https://github.com/antoine77340/LOUPE
"""
def __init__(self,
feature_size,
max_samples,
cluster_size,
output_dim,
gating=True,
add_batch_norm=True,
is_training=True,
name='LearnablePooling',
outputs_collections=None):
"""Initialize a NetVLAD block.
Args:
feature_size: Dimensionality of the input features.
max_samples: The maximum number of samples to pool.
cluster_size: The number of clusters.
output_dim: size of the output space after dimension reduction.
add_batch_norm: (bool) if True, adds batch normalization.
is_training: (bool) Whether or not the graph is training.
gating: (bool) Whether or not to use gating operation
name: a string, name of the layer
outputs_collections: The collections to which the outputs are added.
"""
self.feature_size = feature_size
self.max_samples = max_samples
self.output_dim = output_dim
self.is_training = is_training
self.gating = gating
self.add_batch_norm = add_batch_norm
self.cluster_size = cluster_size
self.name = name
self.outputs_collections = outputs_collections
@abc.abstractmethod
def forward(self, reshaped_input):
raise NotImplementedError("Models should implement the forward pass.")
def context_gating(self, input_layer):
"""Context Gating.
Args:
input_layer: Input layer in the following shape:
'batch_size' x 'number_of_activation'
Returns:
activation: gated layer in the following shape:
'batch_size' x 'number_of_activation'
"""
input_dim = input_layer.get_shape().as_list()[1]
gating_weights = tf.get_variable(
"gating_weights", [input_dim, input_dim],
initializer=tf.random_normal_initializer(stddev=1 / math.sqrt(input_dim)))
gates = tf.matmul(input_layer, gating_weights)
if self.add_batch_norm:
gates = batch_norm(
gates, center=True, scale=True, is_training=self.is_training, scope="gating_bn")
else:
gating_biases = tf.get_variable(
"gating_biases", [input_dim],
initializer=tf.random_normal_initializer(stddev=1 / math.sqrt(input_dim)))
gates += gating_biases
gates = tf.sigmoid(gates)
activation = tf.multiply(input_layer, gates)
return _collect_named_outputs(self.outputs_collections, self.name, activation)
class NetVLAD(PoolingBaseModel):
"""Creates a NetVLAD class."""
def __init__(self, feature_size, max_samples, cluster_size, output_dim, **kwargs):
"""Initialize a NetVLAD block.
Args:
feature_size: Dimensionality of the input features.
max_samples: The maximum number of samples to pool.
cluster_size: The number of clusters.
output_dim: size of the output space after dimension reduction.
add_batch_norm: (bool) if True, adds batch normalization.
is_training: (bool) Whether or not the graph is training.
gating: (bool) Whether or not to use gating operation
name: a string, name of the layer
outputs_collections: The collections to which the outputs are added.
"""
super(NetVLAD, self).__init__(feature_size, max_samples, cluster_size, output_dim, **kwargs)
def forward(self, reshaped_input):
"""Forward pass of a NetVLAD block.
Args:
reshaped_input: The input in reshaped in the following form:
'batch_size' x 'max_samples' x 'feature_size'.
Returns:
vlad: the pooled vector of size: 'batch_size' x 'output_dim'
"""
cluster_weights = tf.get_variable(
"cluster_weights", [self.feature_size, self.cluster_size],
initializer=tf.random_normal_initializer(stddev=1 / math.sqrt(self.feature_size)))
activation = tf.matmul(reshaped_input, cluster_weights)
if self.add_batch_norm:
activation = batch_norm(
activation, center=True, scale=True, is_training=self.is_training, scope="cluster_bn")
else:
cluster_biases = tf.get_variable(
"cluster_biases", [cluster_size],
initializer=tf.random_normal_initializer(stddev=1 / math.sqrt(self.feature_size)))
activation += cluster_biases
activation = tf.nn.softmax(activation)
activation = tf.reshape(activation, [-1, self.max_samples, self.cluster_size])
a_sum = tf.reduce_sum(activation, -2, keep_dims=True)
cluster_weights2 = tf.get_variable(
"cluster_weights2", [1, self.feature_size, self.cluster_size],
initializer=tf.random_normal_initializer(stddev=1 / math.sqrt(self.feature_size)))
a = tf.multiply(a_sum, cluster_weights2)
activation = tf.transpose(activation, perm=[0, 2, 1])
reshaped_input = tf.reshape(reshaped_input, [-1, self.max_samples, self.feature_size])
vlad = tf.matmul(activation, reshaped_input)
vlad = tf.transpose(vlad, perm=[0, 2, 1])
vlad = tf.subtract(vlad, a)
vlad = tf.nn.l2_normalize(vlad, 1)
vlad = tf.reshape(vlad, [-1, self.cluster_size * self.feature_size])
vlad = tf.nn.l2_normalize(vlad, 1)
hidden1_weights = tf.get_variable(
"hidden1_weights", [self.cluster_size * self.feature_size, self.output_dim],
initializer=tf.random_normal_initializer(stddev=1 / math.sqrt(self.cluster_size)))
vlad = tf.matmul(vlad, hidden1_weights)
if self.gating:
vlad = super(NetVLAD, self).context_gating(vlad)
return vlad
class NetRVLAD(PoolingBaseModel):
"""Creates a NetRVLAD class (Residual-less NetVLAD)."""
def __init__(self, feature_size, max_samples, cluster_size, output_dim, **kwargs):
"""Initialize a NetVLAD block.
Args:
feature_size: Dimensionality of the input features.
max_samples: The maximum number of samples to pool.
cluster_size: The number of clusters.
output_dim: size of the output space after dimension reduction.
add_batch_norm: (bool) if True, adds batch normalization.
is_training: (bool) Whether or not the graph is training.
gating: (bool) Whether or not to use gating operation
name: a string, name of the layer
outputs_collections: The collections to which the outputs are added.
"""
super(NetRVLAD, self).__init__(feature_size, max_samples, cluster_size, output_dim, **kwargs)
def forward(self, reshaped_input):
"""Forward pass of a NetRVLAD block.
Args:
reshaped_input: The input in reshaped in the following form:
'batch_size' x 'max_samples' x 'feature_size'.
Returns:
vlad: the pooled vector of size: 'batch_size' x 'output_dim'
"""
cluster_weights = tf.get_variable(
"cluster_weights", [self.feature_size, self.cluster_size],
initializer=tf.random_normal_initializer(stddev=1 / math.sqrt(self.feature_size)))
activation = tf.matmul(reshaped_input, cluster_weights)
if self.add_batch_norm:
activation = batch_norm(
activation, center=True, scale=True, is_training=self.is_training, scope="cluster_bn")
else:
cluster_biases = tf.get_variable(
"cluster_biases", [cluster_size],
initializer=tf.random_normal_initializer(stddev=1 / math.sqrt(self.feature_size)))
tf.summary.histogram("cluster_biases", cluster_biases)
activation += cluster_biases
activation = tf.nn.softmax(activation)
activation = tf.reshape(activation, [-1, self.max_samples, self.cluster_size])
activation = tf.transpose(activation, perm=[0, 2, 1])
reshaped_input = tf.reshape(reshaped_input, [-1, self.max_samples, self.feature_size])
vlad = tf.matmul(activation, reshaped_input)
vlad = tf.transpose(vlad, perm=[0, 2, 1])
vlad = tf.nn.l2_normalize(vlad, 1)
vlad = tf.reshape(vlad, [-1, self.cluster_size * self.feature_size])
vlad = tf.nn.l2_normalize(vlad, 1)
hidden1_weights = tf.get_variable(
"hidden1_weights", [self.cluster_size * self.feature_size, self.output_dim],
initializer=tf.random_normal_initializer(stddev=1 / math.sqrt(self.cluster_size)))
vlad = tf.matmul(vlad, hidden1_weights)
if self.gating:
vlad = super(NetRVLAD, self).context_gating(vlad)
return vlad
class SoftDBoW(PoolingBaseModel):
"""Creates a Soft Deep Bag-of-Features class."""
def __init__(self, feature_size, max_samples, cluster_size, output_dim, **kwargs):
"""Initialize a NetVLAD block.
Args:
feature_size: Dimensionality of the input features.
max_samples: The maximum number of samples to pool.
cluster_size: The number of clusters.
output_dim: size of the output space after dimension reduction.
add_batch_norm: (bool) if True, adds batch normalization.
is_training: (bool) Whether or not the graph is training.
gating: (bool) Whether or not to use gating operation
name: a string, name of the layer
outputs_collections: The collections to which the outputs are added.
"""
super(SoftDBoW, self).__init__(feature_size, max_samples, cluster_size, output_dim, **kwargs)
def forward(self, reshaped_input):
"""Forward pass of a Soft-DBoW block.
Args:
reshaped_input: The input in reshaped in the following form:
'batch_size' x 'max_samples' x 'feature_size'.
Returns:
bof: the pooled vector of size: 'batch_size' x 'output_dim'
"""
cluster_weights = tf.get_variable(
"cluster_weights", [self.feature_size, self.cluster_size],
initializer=tf.random_normal_initializer(stddev=1 / math.sqrt(self.feature_size)))
activation = tf.matmul(reshaped_input, cluster_weights)
if self.add_batch_norm:
activation = batch_norm(
activation, center=True, scale=True, is_training=self.is_training, scope="cluster_bn")
else:
cluster_biases = tf.get_variable(
"cluster_biases", [self.cluster_size],
initializer=tf.random_normal_initializer(stddev=1 / math.sqrt(self.feature_size)))
activation += cluster_biases
activation = tf.nn.softmax(activation)
activation = tf.reshape(activation, [-1, self.max_samples, self.cluster_size])
bof = tf.reduce_sum(activation, 1)
bof = tf.nn.l2_normalize(bof, 1)
hidden1_weights = tf.get_variable(
"hidden1_weights", [self.cluster_size, self.output_dim],
initializer=tf.random_normal_initializer(stddev=1 / math.sqrt(self.cluster_size)))
bof = tf.matmul(bof, hidden1_weights)
if self.gating:
bof = super(SoftDBoW, self).context_gating(bof)
return bof
class NetFV(PoolingBaseModel):
"""Creates a NetFV class."""
def __init__(self, feature_size, max_samples, cluster_size, output_dim, **kwargs):
"""Initialize a NetVLAD block.
Args:
feature_size: Dimensionality of the input features.
max_samples: The maximum number of samples to pool.
cluster_size: The number of clusters.
output_dim: size of the output space after dimension reduction.
add_batch_norm: (bool) if True, adds batch normalization.
is_training: (bool) Whether or not the graph is training.
gating: (bool) Whether or not to use gating operation
name: a string, name of the layer
outputs_collections: The collections to which the outputs are added.
"""
super(NetFV, self).__init__(feature_size, max_samples, cluster_size, output_dim, **kwargs)
def forward(self, reshaped_input):
"""Forward pass of a NetFV block.
Args:
reshaped_input: The input in reshaped in the following form:
'batch_size' x 'max_samples' x 'feature_size'.
Returns:
fv: the pooled vector of size: 'batch_size' x 'output_dim'
"""
cluster_weights = tf.get_variable(
"cluster_weights", [self.feature_size, self.cluster_size],
initializer=tf.random_normal_initializer(stddev=1 / math.sqrt(self.feature_size)))
covar_weights = tf.get_variable(
"covar_weights", [self.feature_size, self.cluster_size],
initializer=tf.random_normal_initializer(mean=1.0, stddev=1 / math.sqrt(self.feature_size)))
covar_weights = tf.square(covar_weights)
eps = tf.constant([1e-6])
covar_weights = tf.add(covar_weights, eps)
activation = tf.matmul(reshaped_input, cluster_weights)
if self.add_batch_norm:
activation = batch_norm(
activation, center=True, scale=True, is_training=self.is_training, scope="cluster_bn")
else:
cluster_biases = tf.get_variable(
"cluster_biases", [self.cluster_size],
initializer=tf.random_normal_initializer(stddev=1 / math.sqrt(self.feature_size)))
activation += cluster_biases
activation = tf.nn.softmax(activation)
activation = tf.reshape(activation, [-1, self.max_samples, self.cluster_size])
a_sum = tf.reduce_sum(activation, -2, keep_dims=True)
cluster_weights2 = tf.get_variable(
"cluster_weights2", [1, self.feature_size, self.cluster_size],
initializer=tf.random_normal_initializer(stddev=1 / math.sqrt(self.feature_size)))
a = tf.multiply(a_sum, cluster_weights2)
activation = tf.transpose(activation, perm=[0, 2, 1])
reshaped_input = tf.reshape(reshaped_input, [-1, self.max_samples, self.feature_size])
fv1 = tf.matmul(activation, reshaped_input)
fv1 = tf.transpose(fv1, perm=[0, 2, 1])
# computing second order FV
a2 = tf.multiply(a_sum, tf.square(cluster_weights2))
b2 = tf.multiply(fv1, cluster_weights2)
fv2 = tf.matmul(activation, tf.square(reshaped_input))
fv2 = tf.transpose(fv2, perm=[0, 2, 1])
fv2 = tf.add_n([a2, fv2, tf.scalar_mul(-2, b2)])
fv2 = tf.divide(fv2, tf.square(covar_weights))
fv2 = tf.subtract(fv2, a_sum)
fv2 = tf.reshape(fv2, [-1, self.cluster_size * self.feature_size])
fv2 = tf.nn.l2_normalize(fv2, 1)
fv2 = tf.reshape(fv2, [-1, self.cluster_size * self.feature_size])
fv2 = tf.nn.l2_normalize(fv2, 1)
fv1 = tf.subtract(fv1, a)
fv1 = tf.divide(fv1, covar_weights)
fv1 = tf.nn.l2_normalize(fv1, 1)
fv1 = tf.reshape(fv1, [-1, self.cluster_size * self.feature_size])
fv1 = tf.nn.l2_normalize(fv1, 1)
fv = tf.concat([fv1, fv2], 1)
hidden1_weights = tf.get_variable(
"hidden1_weights", [2 * self.cluster_size | |
<reponame>schelleg/rfsoc_sam<gh_stars>10-100
__author__ = "<NAME>"
__organisation__ = "The Univeristy of Strathclyde"
__support__ = "https://github.com/strath-sdr/rfsoc_sam"
import ipywidgets as ipw
import numpy as np
import plotly.graph_objs as go
import warnings
import matplotlib.pyplot as plt
from PIL import Image
from scipy import signal
from rfsoc_freqplan import calculation
warnings.simplefilter(action='ignore', category=FutureWarning)
IL_FACTOR = 8
PLL_REF = 409.6e6
class Spectrum():
def __init__(self,
plot_data,
sample_frequency,
number_samples,
centre_frequency=0,
nyquist_stopband=1,
xlabel='Frequency (Hz)',
ylabel='Amplitude',
plot_width=800,
plot_height=400,
display_mode=0,
data_windowsize=16,
spectrum_mode=True,
decimation_factor=2):
self._y_data = plot_data
self._y_data_current = plot_data
self._sample_frequency = sample_frequency
self._number_samples = number_samples
self._decimation_factor = decimation_factor
self._centre_frequency = centre_frequency
self._rbw = (self._sample_frequency/self._decimation_factor) \
/self._number_samples
self._upper_limit = (self._sample_frequency/self._decimation_factor)/2
self._lower_limit = -(self._sample_frequency/self._decimation_factor)/2
self._upper_index = self._number_samples-1
self._lower_index = 0
self._xlabel = xlabel
self._ylabel = ylabel
self._x_data = np.arange(self._lower_limit,
self._upper_limit,
self._rbw) + self._centre_frequency
self._range = (min(self._x_data), max(self._x_data))
self._yrange = [-150, 0]
self._display_mode = display_mode
self._spectrum_mode = spectrum_mode
self._nyquist_stopband = nyquist_stopband
self._data_window = np.empty(1)
self._min_indices = [0]
self._max_indices = [0]
self._number_min_indices = 1
self._number_max_indices = 1
self._update_ddc_counter = 0
self.ddc_centre_frequency = 0
self.data_windowsize = data_windowsize
self.post_process = 'none'
self.enable_updates = False
self.display_min = False
self.display_max = False
self.display_ddc_plan = []
if (np.ceil(self._centre_frequency/(self._sample_frequency/2)) % 2) == 0:
self._nyquist_direction = -1
else:
self._nyquist_direction = 1
layout = {
'hovermode' : 'closest',
'height' : np.ceil(plot_height),
'width' : np.ceil(plot_width),
'xaxis' : {
'title' : self._xlabel,
'showticklabels' : True,
'autorange' : True
},
'yaxis' : {
'title' : self._ylabel,
'range' : self._yrange,
'autorange' : True
},
'margin' : {
't':25,
'b':25,
'l':25,
'r':25
},
'showlegend' : False,
}
config = {'displayModeBar' : False}
plot_data = []
plot_data.append(
go.Scatter(
x = self._x_data,
y = self._y_data,
name = 'Spectrum',
line = {'color' : 'palevioletred',
'width' : 0.75
}
)
)
plot_data.append(
go.Scatter(
x = self._x_data,
y = np.zeros(self._number_samples) - 300,
name = '',
fill = 'tonexty',
fillcolor = 'rgba(128, 128, 128, 0.5)'
)
)
plot_data.append(
go.Scatter(
x = None,
y = None,
mode = 'markers',
name = 'Maximum',
marker = {
'size' : 8,
'color' : 'red',
'symbol' : 'cross'
}
)
)
plot_data.append(
go.Scatter(
x = None,
y = None,
mode = 'markers',
name = 'Minimum',
marker = {
'size' : 8,
'color' : 'blue',
'symbol' : 'cross'
}
)
)
self.ddc_plan = calculation.FrequencyPlannerDDC(
fs_rf=self._sample_frequency,
il_factor=IL_FACTOR,
fc=self.ddc_centre_frequency,
dec=self._decimation_factor,
nco=self._centre_frequency,
pll_ref=PLL_REF
)
self._spurs_list = ['rx_alias', 'rx_image',
'hd2', 'hd2_image', 'hd3', 'hd3_image',
'pll_mix_up', 'pll_mix_up_image', 'pll_mix_down', 'pll_mix_down_image']
for spur in self._spurs_list:
spur_data = getattr(self.ddc_plan, spur)
spur_data['x'] = self._nyquist_direction*spur_data['x'] + self._centre_frequency
plot_data.append(
go.Scatter(
x = None,
y = None,
name = spur_data['label'],
line = dict(color=spur_data['color'])
)
)
self.display_ddc_plan.append(False)
self._plot = go.FigureWidget(
layout=layout,
data=plot_data,
)
self._clear_plot()
self._update_x_limits()
self._update_x_axis()
@property
def decimation_factor(self):
return self._decimation_factor
@decimation_factor.setter
def decimation_factor(self, decimation_factor):
self._decimation_factor = decimation_factor
self._rbw = (self._sample_frequency/self._decimation_factor) \
/self._number_samples
self._clear_plot()
self._update_x_limits()
self._update_x_axis()
@property
def number_min_indices(self):
return self._number_min_indices
@number_min_indices.setter
def number_min_indices(self, number_min_indices):
self._number_min_indices = number_min_indices
@property
def number_max_indices(self):
return self._number_max_indices
@number_max_indices.setter
def number_max_indices(self, number_max_indices):
self._number_max_indices = number_max_indices
@property
def data_windowsize(self):
return self._data_window.shape[0]
@data_windowsize.setter
def data_windowsize(self, data_windowsize):
temp_average = np.average(self._y_data)
self._data_window = np.full((data_windowsize, self._number_samples),
fill_value=temp_average, dtype=np.single)
@property
def line_colour(self):
return self._plot.data[0].line.color
@line_colour.setter
def line_colour(self, line_colour):
self._plot.data[0].line.color = line_colour
@property
def line_fill(self):
return self._plot.data[1].fillcolor
@line_fill.setter
def line_fill(self, line_fill):
self._plot.data[1].fillcolor = line_fill
@property
def yrange(self):
return self._yrange
@yrange.setter
def yrange(self, yrange):
self._yrange = yrange
self._plot.layout.yaxis.range = self._yrange
@property
def template(self):
return self._plot.layout.template
@template.setter
def template(self, template):
self._plot.layout.template = template
@property
def display_mode(self):
return self._display_mode
@display_mode.setter
def display_mode(self, display_mode):
if display_mode in [0, 1]:
self._display_mode = display_mode
self._update_x_limits()
self._update_x_axis()
@property
def centre_frequency(self):
return self._centre_frequency
@centre_frequency.setter
def centre_frequency(self, fc):
self._centre_frequency = fc
if (np.ceil(self._centre_frequency/(self._sample_frequency/2)) % 2) == 0:
self._nyquist_direction = -1
else:
self._nyquist_direction = 1
self._update_x_axis()
@property
def sample_frequency(self):
return self._sample_frequency
@sample_frequency.setter
def sample_frequency(self, fs):
self._sample_frequency = fs
self._rbw = (self._sample_frequency/self._decimation_factor) \
/self._number_samples
self._clear_plot()
self._update_x_limits()
self._update_x_axis()
@property
def data(self):
return self._y_data
@data.setter
def data(self, data):
if self.enable_updates:
data = self._apply_post_process(data)
self._y_data_current = data
self._y_data = self._y_data_current[self._lower_index:self._upper_index]
self._plot.data[0].update({'x':self._x_data, 'y':self._y_data})
self._apply_analysis()
self._display_analysis()
if self._update_ddc_counter > 8:
self.update_ddc_amplitude()
self._update_ddc_counter = 0
else:
self._update_ddc_counter = self._update_ddc_counter + 1
@property
def xlabel(self):
return self._xlabel
@xlabel.setter
def xlabel(self, xlabel):
self._xlabel = xlabel
self._plot.layout.xaxis = {'title':{'text':self._xlabel}}
@property
def ylabel(self):
return self._ylabel
@ylabel.setter
def ylabel(self, ylabel):
self._ylabel = ylabel
self._plot.layout.yaxis = {'title':{'text':self._ylabel}}
@property
def nyquist_stopband(self):
return self._nyquist_stopband
@nyquist_stopband.setter
def nyquist_stopband(self, stopband):
self._nyquist_stopband = stopband
self._update_x_limits()
self._update_x_axis()
@property
def width(self):
return self._plot.layout.width
@width.setter
def width(self, width):
self._plot.layout.width = width
@property
def height(self):
return self._plot.layout.height
@height.setter
def height(self, height):
self._plot.layout.height = height
@property
def number_samples(self):
return self._number_samples
@number_samples.setter
def number_samples(self, number_samples):
self._number_samples = number_samples
self._rbw = (self._sample_frequency/self._decimation_factor) \
/self._number_samples
self._clear_plot()
self._update_x_limits()
self._update_x_axis()
def _display_analysis(self):
if self.display_max:
self._plot.data[2].update({'x':[self._x_data[j] for j in self._max_indices],
'y':[self._y_data[j] for j in self._max_indices]})
#self._plot.plotly_relayout({'xaxis' : {'range' : self._range}})
self._plot.layout.xaxis.range = self._range
else:
self._plot.data[2].update({'x':None,
'y':None})
if self.display_min:
self._plot.data[3].update({'x':[self._x_data[j] for j in self._min_indices],
'y':[self._y_data[j] for j in self._min_indices]})
#self._plot.plotly_relayout({'xaxis' : {'range' : self._range}})
self._plot.layout.xaxis.range = self._range
else:
self._plot.data[3].update({'x':None,
'y':None})
def _apply_analysis(self):
if self.display_max:
self._max_indices = self._y_data.argsort()[-self._number_max_indices:]
if self.display_min:
self._min_indices = self._y_data.argsort()[:self._number_min_indices]
def _apply_post_process(self, data):
fdata = np.fft.fftshift(data)
if self.post_process == 'average':
self._data_window = np.roll(self._data_window, shift=1, axis=0)
self._data_window[0, :] = fdata
fdata = np.average(self._data_window, axis=0)
elif self.post_process == 'median':
self._data_window = np.roll(self._data_window, shift=1, axis=0)
self._data_window[0, :] = fdata
fdata = np.median(self._data_window, axis=0)
elif self.post_process == 'max':
fdata = np.maximum(self._y_data_current, fdata)
elif self.post_process == 'min':
fdata = np.minimum(self._y_data_current, fdata)
else:
pass
return fdata
def _clear_plot(self):
zeros = np.zeros(self._number_samples, dtype=np.single) - 300
zdata = zeros[self._lower_index : self._upper_index]
self._plot.data[0].y = zdata
def _update_x_limits(self):
self._upper_limit = ((self._sample_frequency/self._decimation_factor)/2)-self._rbw* \
np.ceil((self._number_samples/2)*(1-self._nyquist_stopband))
self._lower_limit = -((self._sample_frequency/self._decimation_factor)/2)+self._rbw* \
np.ceil((self._number_samples/2)*(1-self._nyquist_stopband))
self._upper_index = int(self._number_samples- \
int(np.ceil((self._number_samples/2)* \
(1-self._nyquist_stopband))))
self._lower_index = int(np.ceil((self._number_samples/2)* \
(1-self._nyquist_stopband)))
def _update_x_axis(self):
self._x_data = np.arange(self._lower_limit,
self._upper_limit,
self._rbw) + self._centre_frequency
self._range = (min(self._x_data), max(self._x_data))
self._plot.layout.xaxis.range = self._range
self.data_windowsize = self._data_window.shape[0]
if self.post_process == 'max':
self._y_data = np.zeros(len(self._x_data)) - 300
self._y_data_current = np.zeros(self._number_samples) - 300
elif self.post_process == 'min':
self._y_data = np.zeros(len(self._x_data)) + 300
self._y_data_current = np.zeros(self._number_samples) + 300
else:
temp_average = np.average(self._y_data)
self._y_data = np.zeros(len(self._x_data)) + temp_average
self._y_data_current = np.zeros(self._number_samples) + temp_average
self._plot.data[1].update({
'x':self._x_data,
'y':np.zeros(len(self._x_data)) - 300
})
self.update_ddc_plan()
def update_ddc_plan(self):
if any(self.display_ddc_plan):
self.ddc_plan.fs_rf = self._sample_frequency
self.ddc_plan.fc = self.ddc_centre_frequency
self.ddc_plan.dec = self._decimation_factor
nyquist_zone = np.floor(self._centre_frequency/(self._sample_frequency/2)) + 1
if (nyquist_zone % 2) == 0:
self.ddc_plan.nco = self._centre_frequency
else:
self.ddc_plan.nco = -self._centre_frequency
self.update_ddc_amplitude()
def update_ddc_amplitude(self):
spectrum_average = np.mean(self._y_data)
min_x_data = min(self._x_data)
max_x_data = max(self._x_data)
minimum_spectrum = min(self._x_data)/self._rbw
for index, spur in enumerate(self._spurs_list):
if self.display_ddc_plan[index]:
spur_data = getattr(self.ddc_plan, spur)
xvalue = self._nyquist_direction*spur_data['x'] + self._centre_frequency
if (xvalue >= min_x_data) and (xvalue <= max_x_data):
self._plot.data[4+index].update({
'x' : [xvalue, xvalue],
'y' : [spectrum_average, self._y_data[int((xvalue/self._rbw)-minimum_spectrum)]],
})
else:
self._plot.data[4+index].update({
'x' : None,
'y' : None,
})
else:
self._plot.data[4+index].update({
'x' : None,
'y' : None,
})
def get_plot(self):
return self._plot
class Spectrogram():
def __init__(self,
width=800,
height=400,
image_width=400,
image_height=200,
centre_frequency=0,
sample_frequency=4096e6,
decimation_factor=2,
nyquist_stopband=1,
ypixel=2,
plot_time=20,
zmin=-80,
zmax=0,
cmap='jet'):
self._width = width
self._height = height
self._image_width = image_width
self._image_height = image_height
self._sample_frequency = sample_frequency
self._decimation_factor = decimation_factor
self._centre_frequency = centre_frequency
self._nyquist_stopband = nyquist_stopband
self._ypixel = ypixel
self._data = np.ones((self._image_height, self._image_width, 3), dtype=np.uint8)*128
self._data_status = False
self.cmap = cmap
self._image_x = -(self._sample_frequency/self._decimation_factor)/2 + self._centre_frequency
self._image_y = 0
self._lower_limit = (-(self._sample_frequency/self._decimation_factor)/2) * \
self._nyquist_stopband + self._centre_frequency
self._upper_limit = ((self._sample_frequency/self._decimation_factor)/2) * \
self._nyquist_stopband + self._centre_frequency
self._plot_time = self._image_height
self.zmin = zmin
self.zmax = zmax
self.enable_updates = False
self._plot = go.FigureWidget(layout={
'height' : self._height,
'width' : self._width,
'yaxis' : {
'showgrid' : False,
'range' : [-self._plot_time, 0],
'autorange' : False,
'title' : 'Frame Number',
'showticklabels' : True,
'visible' : True
},
'xaxis' : {
'zeroline': False,
'showgrid' : False,
'range' : [self._lower_limit, self._upper_limit],
'autorange' : False,
'title' : 'Frequency (Hz)',
},
'margin' : {
't':25,
'b':25,
'l':25,
'r':25,
}})
img = Image.fromarray(self._data, 'RGB')
self._plot.add_layout_image(
dict(
source=img,
xref="x",
yref="y",
x=self._image_x,
y=self._image_y,
sizex=(self._sample_frequency/self._decimation_factor),
sizey=self._plot_time,
sizing='stretch',
opacity=1,
layer="below")
)
self._update_image()
@property
def template(self):
return self._plot.layout.template
@template.setter
def template(self, template):
self._plot.layout.template = template
| |
<filename>cf_perfeval/library.py
from cf_perfeval.helpers import flatten
# Prepare curve data
def perfeval_prepare_curve_data(input_dict): # , subtype
# import math
nPoints = 4
performance = flatten(input_dict['predictions']) # chartdata
subtype = input_dict['subtype']
kenmax = 0.5
ratemax = 0.5
for curve in performance:
n = len(curve['actual'])
negs = curve['actual'].count(0)
poss = curve['actual'].count(1)
if poss == 0 or negs == 0:
raise Exception("Class Error, zero poss or zero negs, only one class or other type error.")
# return []
try:
ranks = curve['rank']
except:
ranks = range(n + 1)[1:] # ranks from 1
paralel = []
for i in range(n):
paralel.append([curve['actual'][i], float(curve['predicted'][i])])
if (subtype == '-score'):
ROCseries = [[0, 0, '-Inf']];
PRseries = [[0, 1, '-Inf']];
LIFTseries = [[0, 0, '-Inf']]
ROChull = [[0, 0, '-Inf']];
COSTseries = [[0, 0, '-Inf']];
RATEseries = [];
KENseries = [[0, 0]];
KENup = [[0, 1]];
KENdown = [[0, 0]]
_oldrate = 0
_oldloss = 0
AUC = 0
AUPR = 0
ranked = sorted(paralel, key=lambda pair: pair[1], reverse=True)
print("ranked:" + curve['name'])
print("by prediction: " + str(ranked))
print("by actual: " + str(sorted(paralel, key=lambda pair: pair[0], reverse=True)))
k = 0
tp = 0;
fp = 0;
tp_old = 0;
fp_old = 0;
n1 = 0;
concordant_pairs = 0;
discordant_pairs = 0;
while k < len(ranked):
addedconc = 0;
addeddisc = 0;
threshold = ranked[k][1];
group = [x[0] for x in ranked if x[1] >= threshold]
tp = group.count(1)
fp = group.count(0)
# next k is len(group).
ties = len(group) - k
n1 += ties * (ties - 1) / 2
concordant_pairs += tp_old * (fp - fp_old)
discordant_pairs += fp_old * (tp - tp_old)
ROCpoint = [fp * 1.0 / negs, tp * 1.0 / poss, threshold]
ROCseries.append(ROCpoint)
AUC += (ROCpoint[1] + ROCseries[-2][1]) * (ROCpoint[0] - ROCseries[-2][0]) * 0.5
PRseries.append([tp * 1.0 / poss, tp * 1.0 / (tp + fp), threshold])
AUPR += (PRseries[-1][1] + PRseries[-2][1]) * (PRseries[-1][0] - PRseries[-2][0]) * 0.5
LIFTseries.append([len(group) * 1.0 / n, tp * 1.0 / poss, threshold])
# Convex hull and lower envelope:
while len(ROChull) >= 2 and (ROChull[-1][0] == ROCpoint[0] or (
ROChull[-2][0] != ROChull[-1][0] and (ROChull[-1][1] - ROChull[-2][1]) / (
ROChull[-1][0] - ROChull[-2][0]) <= (ROCpoint[1] - ROChull[-1][1]) / (
ROCpoint[0] - ROChull[-1][0]))):
ROChull.pop()
COSTseries.pop()
ROChull.append(ROCpoint)
if (ROCpoint[0] != ROChull[-2][0]):
slope = (ROCpoint[1] - ROChull[-2][1]) / (ROCpoint[0] - ROChull[-2][0])
intercept = ROCpoint[1] - slope * ROCpoint[0]
COSTseries.append([1 / (slope + 1), (1 - intercept) / (1 + slope), threshold])
else:
if len(COSTseries) == 0:
COSTseries.append([0, 0, threshold])
else:
COSTseries[0][2] = threshold
COSTend = 1 - ROCpoint[1]
# Rate driven curve:
# The Rate driven curve is a list of intervals. Each interval is a set of points on the appropriate parabola. There are nPoints number of points
RATEinterval = []
pi0 = poss * 1.0 / n
pi1 = 1 - pi0
_newrate = pi1 * ROCpoint[0] + pi0 * ROCpoint[1]
_newloss = 2 * (_newrate * (pi0 - _newrate) + pi1 * ROCpoint[0])
RATEinterval.append([_oldrate, _oldloss, threshold, performance.index(curve) + 1])
for i in range(1, nPoints):
alpha = i * 1.0 / nPoints
rate = _oldrate + alpha * (_newrate - _oldrate)
loss = 2 * (rate * (pi0 - rate) + pi1 * (
ROCseries[-2][0] + alpha * (ROCpoint[0] - ROCseries[-2][0])))
RATEinterval.append([rate, loss, 0])
RATEinterval.append([_newrate, _newloss, 0])
RATEseries.append(RATEinterval)
if _newloss > ratemax:
ratemax = _newloss
m = 0.5 * (pi0 + pi1 * (ROCseries[-2][0] - ROCpoint[0]) / (_newrate - _oldrate))
if m < _newrate and m > _oldrate:
mvalue = 2 * (m * (pi0 - m) + pi1 * (
(_newrate - m) * ROCseries[-2][0] + (m - _oldrate) * ROCpoint[0]) / (
_newrate - _oldrate))
if mvalue > ratemax:
ratemax = mvalue
# Kendall curve:
if _newrate <= pi0:
KENseries.append([_newrate, 2 * pi1 * ROCpoint[0], threshold])
else:
if _oldrate < pi0:
KENseries.append([pi0, (2 * pi1 * ROCpoint[0] - KENseries[-1][1]) * (pi0 - KENseries[-1][0]) / (
_newrate - KENseries[-1][0]) + (KENseries[-1][1]), ''])
KENseries.append([_newrate, 2 * pi0 * (1 - ROCpoint[1]), threshold])
if KENseries[-1][1] > kenmax:
kenmax = KENseries[-1][1]
_oldrate = _newrate
_oldloss = _newloss
k += len(group) - k
tp_old = tp
fp_old = fp
else:
ROCseries = [[0, 0, 0]];
PRseries = [[0, 1, 0]];
LIFTseries = [[0, 0, 0]] # x: y: rank:
ranked = sorted(paralel, key=lambda pair: pair[1])
print(ranked)
k = 0
while k < len(ranked):
tp = 0;
fp = 0;
threshold = ranked[k][1];
group = [x[0] for x in ranked if x[1] <= threshold]
print(group)
tp = group.count('1')
fp = group.count('0')
ROCpoint = [fp * 1.0 / negs, tp * 1.0 / poss, threshold]
ROCseries.append([fp * 1.0 / negs, tp * 1.0 / poss, int(threshold)])
PRseries.append([tp * 1.0 / poss, tp * 1.0 / (tp + fp), int(threshold)])
LIFTseries.append([len(group) * 1.0 / n, tp * 1.0 / poss, int(threshold)])
while len(ROChull) >= 2 and (ROChull[-1][0] == ROCpoint[0] or (
ROChull[-2][0] != ROChull[-1][0] and (ROChull[-1][1] - ROChull[-2][1]) / (
ROChull[-1][0] - ROChull[-2][0]) <= (ROCpoint[1] - ROChull[-1][1]) / (
ROCpoint[0] - ROChull[-1][0]))):
ROChull.pop()
COSTseries.pop()
ROChull.append(ROCpoint)
if (ROCpoint[0] != ROChull[-2][0]):
slope = (ROCpoint[1] - ROChull[-2][1]) / (ROCpoint[0] - ROChull[-2][0])
intercept = ROCpoint[1] - slope * ROCpoint[0]
COSTseries.append([1 / (1 + slope), (1 - intercept) / (1 + slope)])
else:
COSTseries.append([0.0, ROCpoint[0]])
k += len(group) - k
if COSTseries[-1][0] < 1:
# append final point with max threshold
COSTseries.append([1, COSTend, ranked[-1][1]])
curve['ROCpoints'] = ROCseries
curve['PRpoints'] = PRseries
curve['LIFTpoints'] = LIFTseries
curve['ROChull'] = ROChull
curve['COSTpoints'] = COSTseries
curve['RATEintervals'] = RATEseries
curve['KENpoints'] = KENseries
curve['AUC'] = AUC
curve['Gini'] = 2 * AUC - 1
n0 = n * (n - 1) / 2
# curve['KENtau'] = (concordant_pairs - discordant_pairs) / math.sqrt((n0 - n1) * (n0 - (negs*(negs-1) + poss*(poss-1))/2))
curve['AUPR'] = AUPR
AUCH = 0
for i in range(1, len(ROChull)):
AUCH += (ROChull[i][1] + ROChull[i - 1][1]) * (ROChull[i][0] - ROChull[i - 1][0]) * 0.5
curve['AUCH'] = AUCH
performance[0]['KENmax'] = kenmax
performance[0]['RATEmax'] = ratemax
output_dict = {}
output_dict['performance'] = performance
return output_dict
def perfeval_classification_statistics(input_dict):
from sklearn import metrics
labels = input_dict['true_and_predicted_labels']
pos_label = input_dict.get('pos_label', None)
# Check if we have true and predicted labels for each fold
if labels and type(labels[0][0]) == list:
try:
# Flatten
y_true, y_pred = [], []
for fold_labels in labels:
y_true.extend(fold_labels[0])
y_pred.extend(fold_labels[1])
labels = [y_true, y_pred]
except:
raise Exception('Expected true and predicted labels for each fold, but failed.' +
'If you wish to provide labels for each fold separately it should look like: ' +
'[[y_true_1, y_predicted_1], [y_true_2, y_predicted_2], ...]')
if len(labels) != 2:
raise Exception('Wrong input structure, this widget accepts labels in the form: [y_true, y_pred]')
y_true, y_pred = labels
classes = set()
classes.update(y_true + y_pred)
classes = sorted(list(classes))
# Assign integers to classes
class_to_int = {}
for i, cls_label in enumerate(classes):
class_to_int[cls_label] = i
y_true = [class_to_int[lbl] for lbl in y_true]
y_pred = [class_to_int[lbl] for lbl in y_pred]
accuracy = metrics.accuracy_score(y_true, y_pred)
precision = metrics.precision_score(y_true, y_pred)
recall = metrics.recall_score(y_true, y_pred)
f1 = metrics.f1_score(y_true, y_pred)
confusion_matrix = metrics.confusion_matrix(y_true, y_pred)
# AUC is defined only for binary classes
if len(classes) == 2:
auc = metrics.roc_auc_score(y_true, y_pred)
else:
auc = 'undefined for multiple classes'
return {'accuracy': accuracy, 'precision': precision, 'recall': recall,
'f1': f1, 'auc': auc, 'confusion_matrix': confusion_matrix}
def perfeval_noise_detection(input_dict):
noise = input_dict['noisy_inds']
nds = input_dict['detected_noise']
performance = []
for nd in nds:
nd_alg = nd['name']
det_noise = nd['inds']
inboth = set(noise).intersection(set(det_noise))
recall = len(inboth) * 1.0 / len(noise) if len(noise) > 0 else 0
precision = len(inboth) * 1.0 / len(det_noise) if len(det_noise) > 0 else 0
beta = float(input_dict['f_beta'])
print(beta, recall, precision)
if precision == 0 and recall == 0:
fscore = 0
else:
fscore = (1 + beta ** 2) * precision * recall / ((beta ** 2) * precision + recall)
performance.append({'name': nd_alg, 'recall': recall, 'precision': precision, 'fscore': fscore, 'fbeta': beta})
from operator import itemgetter
output_dict = {}
output_dict['nd_eval'] = sorted(performance, key=itemgetter('name'))
return output_dict
def perfeval_bar_chart(input_dict):
return {}
def perfeval_to_table(input_dict):
return {}
def perfeval_batch(input_dict):
| |
<gh_stars>0
''' konsentrasi.py
Diberikan daftar mata kuliah yang diambil oleh seorang
mahasiswa, tentukan apa konsentrasinya. Khusus untuk mahasiswa
Program Studi TIF (064*).
<NAME> (<EMAIL>)
20141227
* 20141227
* Mulai dengan mengumpulkan daftar mata kuliah untuk setiap
* konsentrasi: RPL, KI, dan GK (GrafKom).
'''
__author__ = '<NAME>'
import xlrd
peserta = { '06408021': '<NAME>', '06408032':
'<NAME>', '06409017': 'Fahim Alawi', '06409019':
'<NAME>', '06409022': '<NAME>',
'06409025': 'Nishaizaty', '06409032': '<NAME>',
'06409033': 'Tri Endah Sari', '06409036': 'Bhismi Alham',
'06409045': '<NAME>', '06410002': 'Defry Tri Hendra',
'06410005': 'Bintang Winandito', '06410008':
'Widh<NAME>ani Wahdjudi', '06410010': 'Ayu Permata Sary',
'06410013': 'Stefanus Joko Tri Anggoro',
'06410023': 'Laras Ok<NAME>ah', '06410027': 'Marad<NAME>',
'06508007': '<NAME>', '06509012': '<NAME>',
'06509013': '<NAME>', '06510007': '<NAME>',
'06510012': '<NAME>', '06509110': '<NAME>',
'06411007': '<NAME>aka Ayu' }
coursedict = { 'IEB232': 'Teknik Komunikasi Data', 'IFI301':
'Fisika I', 'IFI302': 'Fisika II', 'IIB205': 'Perilaku Organisasi ',
'IIS201': 'Penelitian Operasiaonal', 'IKA301':
'Kecerdasan Buatan', 'IKA312': 'Sistem Jaringan Syaraf',
'IKA321': 'Komputasi Bergerak',
'IKB205': 'Metodologi Penelitian dan Penulisan Ilmiah',
'IKB206': 'Kreativitas dan Inovasi', 'IKB250': 'Kuliah Kerja Profesi',
'IKB405': 'Penulisan ilmiah dan Laporan',
'IKB406': 'Metodologi Penelitian ',
'IKB411': 'Konsep Teknologi Informasi dan Keamanan',
'IKB450': 'Tugas Akhir I', 'IKB451': 'Tugas Akhir II',
'IKD112': 'Praktikum Manajemen Data dan Informasi',
'IKD123': 'Praktikum Manajemen Data dan Informasi Lanjut',
'IKD312': 'Manajemen Data dan Informasi',
'IKD313': 'Manajemen Data dan Informasi Lanjut',
'IKG101': 'Praktikum Grafika Komputer',
'IKG102': 'Praktikum Pengolahan Citra',
'IKG121': 'Praktikum Rekayasa Sistem Multimedia',
'IKG301': 'Grafika Komputer', 'IKG302': 'Pengolahan Citra',
'IKG304': 'Seni Animasi', 'IKG311': 'Sistem Informasi Geografi',
'IKG321': 'Rekayasa Sistem Multimedia', 'IKG402': 'Grafika Komputer',
'IKH109': 'Praktikum Pengelolaan Keamanan Informasi',
'IKH110': 'Praktikum Keamanan Jaringan Komputer',
'IKH111': 'Praktikum Sistem Operasi',
'IKH112': 'Praktikum Keamanan Sistem Operasi',
'IKH113': 'Praktikum Jaringan Komputer dan Keamanan',
'IKH114': 'Praktikum Sistem Pengendalian Akses',
'IKH116': 'Praktikum Model dan Arsitektur Keamanan Informasi',
'IKH117': 'Praktikum Desain Keamanan Informasi',
'IKH118': 'Praktikum Verifikasi dan Validasi Keamanan Informasi',
'IKH119': 'Praktikum Keamanan Operasional',
'IKH123': 'Praktikum Keamanan Informasi',
'IKH126': 'Praktikum Perencanaan Kontinyuitas Bisnis',
'IKH128': 'Praktikum Forensik Komputer dan Jaringan',
'IKH132': 'Praktikum Pengelolaan Jaringan Komputer',
'IKH151': 'Praktikum Organisasi dan Arsitektur Komputer',
'IKH203': 'Arsitektur Komputer', 'IKH300': 'Pengantar Sistim Digital',
'IKH303': 'Arsitektur Komputer', 'IKH306': 'Pemrograman Jaringan',
'IKH309': 'Pengelolaan Jaringan Komputer',
'IKH310': 'Sistem Keamanan Jaringan', 'IKH311': 'Sistem Operasi',
'IKH312': 'Keamanan Sistem Operasi',
'IKH313': 'Jaringan Komputer dan Keamanan',
'IKH314': 'Sistem Pengendalian Akses',
'IKH316': 'Model dan Arsitektur Keamanan Informasi',
'IKH317': 'Desain Keamanan Informasi',
'IKH318': 'Verifikasi dan Validasi Keamanan Informasi',
'IKH319': 'Keamanan Operasional',
'IKH321': 'Sistem Keamanan Komputer', 'IKH323': 'Keamanan Informasi',
'IKH324': 'Keamanan Pengembangan Aplikasi',
'IKH326': 'Perencanaan Kontinyuitas Bisnis', 'IKH327': 'Keamanan Fisik',
'IKH328': 'Forensik Komputer dan Jaringan',
'IKH329': 'Pengelolaan Keamanan Informasi', 'IKH330': 'Bahasa Rakitan',
'IKH331': 'Sistem Terdistribusi', 'IKH342': 'Pengolahan Sinyal Digital',
'IKH351': 'Organisasi dan Arsitektur Komputer',
'IKL133': 'Praktikum Pemodelan Geometri',
'IKL135': 'Praktikum Dasar Pemrograman',
'IKL141': 'Praktikum Struktur Data dan Algoritma',
'IKL232': 'Model Dan Simulasi', 'IKL233': 'Pemodelan Geometri',
'IKL335': 'Dasar Pemrograman', 'IKL341': 'Struktur Data dan Algoritma',
'IKL343': 'Desain dan Analisis Algoritma',
'IKP102': 'Praktikum Pemrograman Berbasis Komponen',
'IKP103': 'Praktikum Mobile Programming',
'IKP111': 'Praktikum Pemrograman Berbasis Web',
'IKP113': 'Praktikum Bahasa Pemrograman', 'IKP213': 'Bahasa Pemrograman',
'IKP301': 'Desain dan Implementasi Program',
'IKP302': 'Pemrograman Berbasis Komponen', 'IKP303': 'Mobile Programming',
'IKP311': 'Pemrograman Berbasis Web',
'IKP321': 'Manajemen dan Kualitas Perangkat Lunak',
'IKP333': 'Cloud Computing',
'IKS104': 'Praktikum Metode Berorientasi Objek',
'IKS108': 'Praktikum Sistem Informasi Manajemen',
'IKS118': 'Praktikum Verifikasi dan Validasi Perangkat Lunak',
'IKS123': 'Praktikum Analisis dan Pemodelan Perangkat Lunak',
'IKS126': 'Praktikum Desain Perangkat Lunak',
'IKS201': 'Arsitektur Perangkat Lunak',
'IKS212': 'Computer Aided Software Engineering',
'IKS213': 'Kewirausahaan Telematika',
'IKS301': 'Interaksi Manusia dan Komputer', 'IKS302': 'Sistem Informasi',
'IKS304': 'Metode Berorientasi Objek',
'IKS308': 'Sistem Informasi Manajemen',
'IKS318': 'Verifikasi dan Validasi Perangkat Lunak',
'IKS319': 'Verifikasi dan Validasi Sistem Informasi',
'IKS323': 'Analisis dan Pemodelan Perangkat Lunak',
'IKS326': 'Arsitektur Enterprise ', 'IKS327': 'Desain Perangkat Lunak ',
'IKS328': 'ITSM (Information Technology Service Management)',
'IKS413': 'Technopreneurship', 'ISA301': 'Bisnis Berbasis Internet',
'ISD301': 'Data Warehouse', 'ISD305': 'Rekayasa Data dan Pengetahuan',
'ISD312': 'Information Retrieval', 'ISD313': 'Virtualisasi',
'ISD314': 'Teknik Data Mining', 'ISL302': 'Komunikasi Bisnis dan Teknis',
'ISL303': 'Pengetahuan Bisnis', 'ISL304': 'Analisa Proses Bisnis (MPK)',
'ISL305': 'Business Intelligence',
'ISM114': 'Praktikum Audit Sistem Informasi',
'ISM202': 'Manajemen Pengetahuan', 'ISM221': 'Ekonomi Informasi',
'ISM302': 'Manajemen Konfigurasi',
'ISM303': 'Komunikasi Interpersonal & Etika Profesi',
'ISM312': 'Manajemen Pengetahuan',
'ISM313': 'Pengantar Audit Sistem Informasi (MPK)',
'ISM314': 'Audit Sistem Informasi', 'ISM315': 'Manajemen Keuangan',
'ISM316': 'Manajemen Pemasaran IT', 'ISM317': 'Manajemen Dokumen',
'ISM401': 'Manajemen Proyek Teknologi Informasi',
'ISM403': 'Etika, Agama, Profesionalisme dan Legal',
'ISM404': 'Proyek Rekayasa',
'ISO101': 'Praktikum Pengelolaan Sistem Informasi',
'ISO301': 'Pengelolaan Sistem Informasi (MPK)',
'IUM263': 'Struktur Diskrit II', 'IUM314': 'Kalkulus',
'IUM331': 'Kalkulus I', 'IUM332': 'Kalkulus II',
'IUM333': 'Kalkulus Lanjut', 'IUM341': 'Aljabar Linier',
'IUM351': 'Statistik Dasar', 'IUM461': 'Struktur Diskrit I',
'PDU212': 'Teori Warna', 'PMA331': 'Kalkulus I', 'PMA332': 'Kalkulus II',
'UAG201': 'Pendidikan Agama Islam', 'UAG202': 'Pendidikan Agama Kristen',
'UAG203': 'Pendidikan Agama Katolik', 'UAG204': 'Pendidikan Agama Budha',
'UAG205': 'Pendidikan Agama Hindu', 'UBA201': 'Bahasa Inggris I',
'UBA202': 'Bahasa Inggris II', 'UBA402': 'Internasionalisasi',
'UBN200': 'Bahasa Indonesia', 'UKD200': 'Keadilan, Demokrasi & HAM',
'UKT101': 'Kuliah Kerja Lapangan',
'UKT200': 'Kuliah Usaha Mandiri Ilmu Teknik Terapan',
'UKT300': 'Kuliah Usaha Mandiri Ilmu Teknik Terapan',
'UKW400': 'Pancasila dan Kewarganegaraan',
'UPA200': 'Pendidikan Pancasila', 'EAU300': 'Pengantar Akuntansi',
'EMU302': 'Pengantar Manajemen Umum', 'IFI103': 'Praktikum Fisika Dasar',
'III202': 'Konsep Teknologi', 'IKB301': 'Pengantar Teknologi Informasi',
'IKB350': 'Riset Teknologi Informasi', 'IKD302': 'Basis Data',
'IKD303': 'Sistem Basis Data', 'IKG303': 'Grafika Komputer Lanjut',
'IKG323': 'Disain Implemantasi Multimedia',
'IKH100': 'Praktikum Pengantar Sistem Digital',
'IKH104': 'Praktikum Jaringan Komputer',
'IKH118': 'Praktikum Verifikasi dan Validasi Keamanan Informasi',
'IKH151': 'Praktikum Organisasi dan Arsitektur Komputer',
'IKH232': 'Pengelolaan Jaringan Komputer', 'IKH301': 'Organisasi Komputer',
'IKH304': 'Jaringan Komputer',
'IKL141': 'Praktikum Struktur Data dan Algoritma',
'IKL332': 'Pemodelan Geometri', 'IKL333': 'Algoritma dan Pemrograman',
'IKL341': 'Struktur Data dan Algoritma',
'IKL343': 'Desain dan Analisis Algoritma',
'IKS101': 'Praktikum Pemrograman Berorientasi Objek',
'IKS102': 'Praktikum Pemrograman Terstruktur',
'IKS103': 'Praktikum Rekayasa Perangkat Lunak',
'IKS214': 'Komputer dan Masyarakat', 'IKS303': 'Rekayasa Perangkat Lunak',
'IKS306': 'Analisa dan Disain Sistem', 'IKS307': 'Teknik Pengujian Sistem',
'IKS314': 'Manajemen Keamanan Sitem Informasi',
'IKS315': 'Sistem Informasi Akuntansi',
'IKS316': 'Pemrograman Berorientasi Objek',
'IKS321': 'Pemrograman Terstruktur', 'ISD201': 'Teknik Data Mining',
'ISK201': 'Konsep Sistem Informasi I',
'ISK202': 'Konsep Sistem Informasi II',
'ISL301': 'Pengembangan Aplikasi Berbasis Web',
'ISM301': 'Manajemen Proyek Teknologi Informasi',
'ISM321': 'Ekonomi Informasi', 'IUM362': 'Logika Matematika',
'UKW200': 'Kewiraan' }
def main(nim_mhs):
# SIS: wb = xlrd.open_workbook('../raw-data/transkrip-aktif-2014-2015-1.xls')
# UPTF
wb = xlrd.open_workbook('../raw-data/Nilai-Pra-Yudisium-2014-gasal-TIF-SI.xls')
print wb.sheet_names()
sheet = wb.sheet_by_index(0)
print "name: ", sheet.name
print "rows: ", sheet.nrows
print "cols: ", sheet.ncols
transkrip = []
transkrip_lulus = {}
transkrip_fail = {}
for nrow in range(1, sheet.nrows):
nim_cell = sheet.cell(nrow, 0)
kode_cell = sheet.cell(nrow, 1)
nilai_cell = sheet.cell(nrow, 3)
nim = nim_cell.value
kode = kode_cell.value
grade = nilai_cell.value
if nim == nim_mhs:
transkrip.append(kode)
if grade not in ['D', 'E']:
transkrip_lulus[kode] = grade
else:
transkrip_fail[kode] = grade
print len(transkrip), len(transkrip_lulus), len(transkrip_fail)
print set(transkrip)
print set(transkrip_lulus.keys())
print set(transkrip_fail.keys())
set_transkrip = set(transkrip)
print len(set_transkrip)
set_lulus = set(transkrip_lulus.keys())
set_fail = set(transkrip_fail.keys())
should_be_empty = set_lulus & set_fail
print should_be_empty
kelas = open("../raw-data/prayudisium-UPTF.csv", "a")
RPL = [ 'IUM461', 'IUM263', 'IKB406', 'IKB206', 'IKB405',
'IKB411', 'ISM401', 'IKH323', 'IKH123', 'IKH313',
'IKH113', 'IKH351', 'IKH151', 'IKH311', 'IKH111',
'IKL335', 'IKL135', 'IKL341', 'IKL141', 'IKL343',
'IKS304', 'IKS104', 'IKP213', 'IKP113', 'IKP321',
'IKS318', 'IKS118', 'IKS323', 'IKS123', 'IKS327',
'IKS126', 'IKS301', 'IKD313', 'IKD123', 'IKD312',
'IKD112', 'IKH331', 'IKP103', 'IKP303', 'IKP111',
'IKP311', 'IKP302', 'IKP102', 'UBA402', 'UKW400',
'ISM404', 'ISM403', 'IKB250', 'IKS413', 'UKT101',
'IKB450', 'IKB451', 'IKA301', 'IKH314', 'IKH326',
'IKP333', 'ISD312', 'ISD313', 'ISD314', 'ISM312',
'UKT300', 'ISM317' ]
coreRPL = [ 'IUM461', 'IUM263', 'IKB406', 'IKB206', 'IKB405',
'IKB411', 'ISM401', 'IKH323', 'IKH123', 'IKH313',
'IKH113', 'IKH351', 'IKH151', 'IKH311', 'IKH111',
'IKL335', 'IKL135', 'IKL341', 'IKL141', 'IKL343',
'IKS304', 'IKS104', 'IKP213', 'IKP113', 'IKP321',
'IKS318', 'IKS118', 'IKS323', 'IKS123', 'IKS327',
'IKS126', 'IKS301', 'IKD313', 'IKD123', 'IKD312',
'IKD112', 'IKH331', 'IKP103', 'IKP303', 'IKP111',
'IKP311', 'IKP302', 'IKP102', 'UBA402', 'UKW400',
'ISM404', 'ISM403', 'IKB250', 'IKS413', 'UKT101',
'IKB450', 'IKB451' ]
GK = [ 'ISM221', 'IUM461', 'IUM263', 'IKB406', 'IKB206',
'IKB405', 'IKB411', 'ISM401', 'IKH323', 'IKH123',
'IKH313', 'IKH113', 'IKH351', 'IKH151', 'IKH311',
'IKH111', 'IKL335', 'IKL135', 'IKL341', 'IKL141',
'IKS304', 'IKS104', 'IKP321', 'IKS318', 'IKS118',
'IKS323', 'IKS123', 'IKS327', 'IKS126', 'IKS301',
'IKD313', 'IKD123', 'IKD312', 'IKD112', 'IKG402',
'IKG101', 'IKG321', 'IKG121', 'IKG302', 'IKG102',
'PDU212', 'IKH342', 'IKL233', 'IKL133', 'UBA402',
'UKW400', 'ISM404', 'ISM403', 'IKB250', 'IKS413',
'UKT101', 'IKB450', 'IKB451', 'IKA301', 'IKG311',
'IKH314', 'IKH326', 'IKP333', 'ISD312', 'ISD314',
'ISM312', 'UKT300', 'IKG304', 'ISD305' ]
coreGK = [ 'ISM221', 'IUM461', 'IUM263', 'IKB406', 'IKB206',
'IKB405', 'IKB411', 'ISM401', 'IKH323', 'IKH123',
'IKH313', 'IKH113', 'IKH351', 'IKH151', 'IKH311',
'IKH111', 'IKL335', 'IKL135', 'IKL341', 'IKL141',
'IKS304', 'IKS104', 'IKP321', 'IKS318', 'IKS118',
'IKS323', 'IKS123', 'IKS327', 'IKS126', 'IKS301',
'IKD313', 'IKD123', 'IKD312', 'IKD112', 'IKG402',
'IKG101', 'IKG321', 'IKG121', 'IKG302', 'IKG102',
'PDU212', 'IKH342', 'IKL233', 'IKL133', 'UBA402',
'UKW400', 'ISM404', 'ISM403', 'IKB250', 'IKS413',
'UKT101', 'IKB450', 'IKB451' ]
KI = [ 'ISM221', 'IUM461', 'IKB406', 'IKB206', 'IKB405',
'IKB411', 'ISM401', 'IUM314', 'IKH323', 'IKH123',
'IKH313', 'IKH113', 'IKH309', 'IKH132', 'IKH351',
'IKH151', 'IKH311', 'IKH111', 'IKL335', 'IKL135',
'IKL341', 'IKL141', 'IKD312', 'IKD112', 'IKH329',
'IKH109', 'IKH310', 'IKH110', 'IKH312', 'IKH112',
'IKH314', 'IKH114', 'IKH318', 'IKH118', 'IKH319',
'IKH119', 'IKH326', 'IKH126', 'IKH327', 'IKH328',
'IKH128', 'IKH316', 'IKH116', 'IKH324', 'UBA402',
'UKW400', 'ISM404', 'ISM403', 'IKB250', 'IKS413',
'UKT101', 'IKB450', 'IKB451', 'IKA301', 'IKG321',
'IKP333', 'ISD312', 'ISD313', 'IKP321', 'ISM312',
'ISM314', 'UKT300' ]
coreKI = [ 'ISM221', 'IUM461', 'IKB406', 'IKB206', 'IKB405',
'IKB411', 'ISM401', 'IUM314', 'IKH323', 'IKH123',
'IKH313', 'IKH113', 'IKH309', 'IKH132', 'IKH351',
'IKH151', 'IKH311', 'IKH111', 'IKL335', 'IKL135',
'IKL341', 'IKL141', 'IKD312', 'IKD112', 'IKH329',
'IKH109', 'IKH310', 'IKH110', 'IKH312', 'IKH112',
'IKH314', 'IKH114', 'IKH318', 'IKH118', 'IKH319',
'IKH119', 'IKH326', 'IKH126', 'IKH327', 'IKH328',
'IKH128', 'IKH316', 'IKH116', 'IKH324', 'UBA402',
'UKW400', 'ISM404', 'ISM403', 'IKB250', 'IKS413',
'UKT101', 'IKB450', 'IKB451' ]
SI = [ 'ISM221', 'IUM461', 'IKB406', 'IKB206', 'IKB405',
| |
<reponame>dklopfenstein/biocode
"""Selected UniProt data saved in Python."""
# Copyright (C) 2014-2019 <NAME>. All rights reserved
#
# ADAPTED TO PYTHON from the UniProt file:
# ftp://ftp.uniprot.org/pub/databases/uniprot/current_release/knowledgebase/complete
DOWNLOADED = "2019_03_07" # UniProt source files were downloaded on this date
# Contains 5915 items for "Saccharomyces cerevisiae ("
from collections import namedtuple
ntuniprot = namedtuple('ntuniprot', 'RecName_Full')
# pylint: disable=too-many-lines
UNIPROT2NT = { # 5,915 items
'A0A023PXF5' : ntuniprot(RecName_Full='Putative uncharacterized helicase-like protein YHR218W-A {ECO:0000305}'),
'A0A023PZB3' : ntuniprot(RecName_Full='Protein FMP49, mitochondrial {ECO:0000305|PubMed:14576278}'),
'A0A0B7P221' : ntuniprot(RecName_Full='Uncharacterized protein RDT1 {ECO:0000303|PubMed:21948395}'),
'A0A0B7P3V8' : ntuniprot(RecName_Full='Transposon Ty4-P Gag-Pol polyprotein'),
'A2P2R3' : ntuniprot(RecName_Full='Putative glutamine--fructose-6-phosphate aminotransferase [isomerizing]'),
'A5Z2X5' : ntuniprot(RecName_Full='UPF0495 protein YPR010C-A'),
'D6VPM8' : ntuniprot(RecName_Full='Putative DUP240 protein YAR023C'),
'D6VTK4' : ntuniprot(RecName_Full='Pheromone alpha factor receptor'),
'D6W196' : ntuniprot(RecName_Full='Truncated non-functional calcium-binding mitochondrial carrier SAL1-1'),
'I2HB52' : ntuniprot(RecName_Full='Uncharacterized protein YBR056W-A'),
'I2HB70' : ntuniprot(RecName_Full='Uncharacterized protein YMR316C-A'),
'O13297' : ntuniprot(RecName_Full='mRNA-capping enzyme subunit beta'),
'O13329' : ntuniprot(RecName_Full='DNA replication fork-blocking protein FOB1 {ECO:0000303|PubMed:9078378}'),
'O13511' : ntuniprot(RecName_Full='Uncharacterized protein YAL065C'),
'O13512' : ntuniprot(RecName_Full='Uncharacterized membrane protein YAL064W-B'),
'O13516' : ntuniprot(RecName_Full='40S ribosomal protein S9-A {ECO:0000303|PubMed:9559554}'),
'O13525' : ntuniprot(RecName_Full='Ubiquinone biosynthesis protein COQ4, mitochondrial {ECO:0000255|HAMAP-Rule:MF_03111}'),
'O13527' : ntuniprot(RecName_Full='Truncated transposon Ty1-A Gag-Pol polyprotein'),
'O13529' : ntuniprot(RecName_Full='Protein ECM12'),
'O13535' : ntuniprot(RecName_Full='Transposon Ty1-H Gag-Pol polyprotein'),
'O13539' : ntuniprot(RecName_Full='THO complex subunit THP2'),
'O13547' : ntuniprot(RecName_Full='Covalently-linked cell wall protein 14'),
'O13549' : ntuniprot(RecName_Full='Uncharacterized protein VPS63 {ECO:0000305}'),
'O13556' : ntuniprot(RecName_Full='Putative uncharacterized protein YLR462W'),
'O13559' : ntuniprot(RecName_Full="Y' element ATP-dependent helicase protein 1 copy 4"),
'O13563' : ntuniprot(RecName_Full='26S proteasome regulatory subunit RPN13'),
'O13565' : ntuniprot(RecName_Full='Uncharacterized protein YLR358C'),
'O13577' : ntuniprot(RecName_Full='Damage-regulated import facilitator 1'),
'O13578' : ntuniprot(RecName_Full='Putative uncharacterized protein YLR415C, mitochondrial'),
'O13585' : ntuniprot(RecName_Full='Dilute domain-containing protein YPR089W'),
'O13587' : ntuniprot(RecName_Full='Uncharacterized protein YPR096C'),
'O14455' : ntuniprot(RecName_Full='60S ribosomal protein L36-B {ECO:0000303|PubMed:9559554}'),
'O14464' : ntuniprot(RecName_Full='54S ribosomal protein RTC6, mitochondrial'),
'O14467' : ntuniprot(RecName_Full='Multiprotein-bridging factor 1'),
'O14468' : ntuniprot(RecName_Full='Uncharacterized protein YOR304C-A'),
'O43137' : ntuniprot(RecName_Full='Uncharacterized protein YBR085C-A'),
'O60200' : ntuniprot(RecName_Full='Mitochondrial distribution and morphology protein 35'),
'O74302' : ntuniprot(RecName_Full='Transposon Ty1-DR4 Gag polyprotein'),
'O74700' : ntuniprot(RecName_Full='Mitochondrial import inner membrane translocase subunit TIM9'),
'O75012' : ntuniprot(RecName_Full='37S ribosomal protein MRP10, mitochondrial'),
'O94742' : ntuniprot(RecName_Full='26S proteasome complex subunit SEM1'),
'P00044' : ntuniprot(RecName_Full='Cytochrome c iso-1'),
'P00045' : ntuniprot(RecName_Full='Cytochrome c iso-2'),
'P00127' : ntuniprot(RecName_Full='Cytochrome b-c1 complex subunit 6'),
'P00128' : ntuniprot(RecName_Full='Cytochrome b-c1 complex subunit 7'),
'P00163' : ntuniprot(RecName_Full='Cytochrome b'),
'P00175' : ntuniprot(RecName_Full='Cytochrome b2, mitochondrial'),
'P00330' : ntuniprot(RecName_Full='Alcohol dehydrogenase 1'),
'P00331' : ntuniprot(RecName_Full='Alcohol dehydrogenase 2'),
'P00358' : ntuniprot(RecName_Full='Glyceraldehyde-3-phosphate dehydrogenase 2'),
'P00359' : ntuniprot(RecName_Full='Glyceraldehyde-3-phosphate dehydrogenase 3'),
'P00360' : ntuniprot(RecName_Full='Glyceraldehyde-3-phosphate dehydrogenase 1'),
'P00401' : ntuniprot(RecName_Full='Cytochrome c oxidase subunit 1'),
'P00410' : ntuniprot(RecName_Full='Cytochrome c oxidase subunit 2'),
'P00420' : ntuniprot(RecName_Full='Cytochrome c oxidase subunit 3'),
'P00424' : ntuniprot(RecName_Full='Cytochrome c oxidase polypeptide 5A, mitochondrial'),
'P00425' : ntuniprot(RecName_Full='Cytochrome c oxidase polypeptide 5B, mitochondrial'),
'P00427' : ntuniprot(RecName_Full='Cytochrome c oxidase subunit 6, mitochondrial'),
'P00431' : ntuniprot(RecName_Full='Cytochrome c peroxidase, mitochondrial'),
'P00445' : ntuniprot(RecName_Full='Superoxide dismutase [Cu-Zn]'),
'P00447' : ntuniprot(RecName_Full='Superoxide dismutase [Mn], mitochondrial'),
'P00498' : ntuniprot(RecName_Full='ATP phosphoribosyltransferase'),
'P00546' : ntuniprot(RecName_Full='Cyclin-dependent kinase 1'),
'P00549' : ntuniprot(RecName_Full='Pyruvate kinase 1'),
'P00560' : ntuniprot(RecName_Full='Phosphoglycerate kinase'),
'P00572' : ntuniprot(RecName_Full='Thymidylate kinase'),
'P00635' : ntuniprot(RecName_Full='Repressible acid phosphatase'),
'P00724' : ntuniprot(RecName_Full='Invertase 2'),
'P00729' : ntuniprot(RecName_Full='Carboxypeptidase Y {ECO:0000303|Ref.4}'),
'P00812' : ntuniprot(RecName_Full='Arginase'),
'P00815' : ntuniprot(RecName_Full='Histidine biosynthesis trifunctional protein'),
'P00817' : ntuniprot(RecName_Full='Inorganic pyrophosphatase'),
'P00830' : ntuniprot(RecName_Full='ATP synthase subunit beta, mitochondrial'),
'P00854' : ntuniprot(RecName_Full='ATP synthase subunit a'),
'P00856' : ntuniprot(RecName_Full='ATP synthase protein 8'),
'P00890' : ntuniprot(RecName_Full='Citrate synthase, mitochondrial {ECO:0000303|PubMed:6090126}'),
'P00899' : ntuniprot(RecName_Full='Anthranilate synthase component 1'),
'P00912' : ntuniprot(RecName_Full="N-(5'-phosphoribosyl)anthranilate isomerase"),
'P00924' : ntuniprot(RecName_Full='Enolase 1'),
'P00925' : ntuniprot(RecName_Full='Enolase 2'),
'P00927' : ntuniprot(RecName_Full='Threonine dehydratase, mitochondrial'),
'P00931' : ntuniprot(RecName_Full='Tryptophan synthase'),
'P00937' : ntuniprot(RecName_Full='Multifunctional tryptophan biosynthesis protein'),
'P00942' : ntuniprot(RecName_Full='Triosephosphate isomerase'),
'P00950' : ntuniprot(RecName_Full='Phosphoglycerate mutase 1'),
'P00958' : ntuniprot(RecName_Full='Methionine--tRNA ligase, cytoplasmic'),
'P01094' : ntuniprot(RecName_Full='Protease A inhibitor 3'),
'P01097' : ntuniprot(RecName_Full='ATPase inhibitor, mitochondrial {ECO:0000305}'),
'P01098' : ntuniprot(RecName_Full='ATPase-stabilizing factor 9 kDa, mitochondrial {ECO:0000305}'),
'P01119' : ntuniprot(RecName_Full='Ras-like protein 1'),
'P01120' : ntuniprot(RecName_Full='Ras-like protein 2'),
'P01123' : ntuniprot(RecName_Full='GTP-binding protein YPT1'),
'P01149' : ntuniprot(RecName_Full='Mating factor alpha-1'),
'P02293' : ntuniprot(RecName_Full='Histone H2B.1'),
'P02294' : ntuniprot(RecName_Full='Histone H2B.2'),
'P02309' : ntuniprot(RecName_Full='Histone H4'),
'P02381' : ntuniprot(RecName_Full='Ribosomal protein VAR1, mitochondrial'),
'P02400' : ntuniprot(RecName_Full='60S acidic ribosomal protein P2-beta {ECO:0000303|PubMed:9559554}'),
'P02406' : ntuniprot(RecName_Full='60S ribosomal protein L28 {ECO:0000303|PubMed:9559554}'),
'P02407' : ntuniprot(RecName_Full='40S ribosomal protein S17-A {ECO:0000303|PubMed:9559554}'),
'P02557' : ntuniprot(RecName_Full='Tubulin beta chain'),
'P02829' : ntuniprot(RecName_Full='ATP-dependent molecular chaperone HSP82'),
'P02992' : ntuniprot(RecName_Full='Elongation factor Tu, mitochondrial'),
'P02994' : ntuniprot(RecName_Full='Elongation factor 1-alpha'),
'P03069' : ntuniprot(RecName_Full='General control protein GCN4'),
'P03873' : ntuniprot(RecName_Full='Cytochrome b mRNA maturase bI2'),
'P03874' : ntuniprot(RecName_Full='Cytochrome B pre-mRNA-processing protein 2'),
'P03875' : ntuniprot(RecName_Full='Putative COX1/OXI3 intron 1 protein'),
'P03876' : ntuniprot(RecName_Full='Putative COX1/OXI3 intron 2 protein'),
'P03877' : ntuniprot(RecName_Full='Intron-encoded DNA endonuclease aI3'),
'P03878' : ntuniprot(RecName_Full='Intron-encoded DNA endonuclease aI4'),
'P03879' : ntuniprot(RecName_Full='Intron-encoded RNA maturase bI4'),
'P03881' : ntuniprot(RecName_Full='Uncharacterized mitochondrial protein RF1'),
'P03882' : ntuniprot(RecName_Full='Intron-encoded endonuclease I-SceI'),
'P03962' : ntuniprot(RecName_Full="Orotidine 5'-phosphate decarboxylase"),
'P03965' : ntuniprot(RecName_Full='Carbamoyl-phosphate synthase arginine-specific large chain'),
'P04037' : ntuniprot(RecName_Full='Cytochrome c oxidase subunit 4, mitochondrial'),
'P04039' : ntuniprot(RecName_Full='Cytochrome c oxidase polypeptide VIII, mitochondrial'),
'P04046' : ntuniprot(RecName_Full='Amidophosphoribosyltransferase'),
'P04050' : ntuniprot(RecName_Full='DNA-directed RNA polymerase II subunit RPB1'),
'P04051' : ntuniprot(RecName_Full='DNA-directed RNA polymerase III subunit RPC1'),
'P04076' : ntuniprot(RecName_Full='Argininosuccinate lyase'),
'P04147' : ntuniprot(RecName_Full='Polyadenylate-binding protein, cytoplasmic and nuclear'),
'P04161' : ntuniprot(RecName_Full='Phosphoribosylglycinamide formyltransferase'),
'P04173' : ntuniprot(RecName_Full='3-isopropylmalate dehydrogenase'),
'P04385' : ntuniprot(RecName_Full='Galactokinase'),
'P04386' : ntuniprot(RecName_Full='Regulatory protein GAL4'),
'P04387' : ntuniprot(RecName_Full='Galactose/lactose metabolism regulatory protein GAL80'),
'P04397' : ntuniprot(RecName_Full='Bifunctional protein GAL10'),
'P04449' : ntuniprot(RecName_Full='60S ribosomal protein L24-A {ECO:0000303|PubMed:9559554}'),
'P04456' : ntuniprot(RecName_Full='60S ribosomal protein L25 {ECO:0000303|PubMed:9559554}'),
'P04650' : ntuniprot(RecName_Full='60S ribosomal protein L39 {ECO:0000303|PubMed:9559554}'),
'P04710' : ntuniprot(RecName_Full='ADP,ATP carrier protein 1'),
'P04786' : ntuniprot(RecName_Full='DNA topoisomerase 1'),
'P04801' : ntuniprot(RecName_Full='Threonine--tRNA ligase, cytoplasmic'),
'P04802' : ntuniprot(RecName_Full='Aspartate--tRNA ligase, cytoplasmic'),
'P04803' : ntuniprot(RecName_Full='Tryptophan--tRNA ligase, mitochondrial'),
'P04806' : ntuniprot(RecName_Full='Hexokinase-1'),
'P04807' : ntuniprot(RecName_Full='Hexokinase-2'),
'P04817' : ntuniprot(RecName_Full='Arginine permease CAN1'),
'P04819' : ntuniprot(RecName_Full='DNA ligase 1'),
'P04821' : ntuniprot(RecName_Full='Cell division control protein 25'),
'P04840' : ntuniprot(RecName_Full='Mitochondrial outer membrane protein porin 1'),
'P04911' : ntuniprot(RecName_Full='Histone H2A.1'),
'P04912' : ntuniprot(RecName_Full='Histone H2A.2'),
'P05030' : ntuniprot(RecName_Full='Plasma membrane ATPase 1'),
'P05066' : ntuniprot(RecName_Full='Deoxyribodipyrimidine photo-lyase, mitochondrial'),
'P05085' : ntuniprot(RecName_Full='Arginine metabolism regulation protein II'),
'P05150' : ntuniprot(RecName_Full='Ornithine carbamoyltransferase'),
'P05316' : ntuniprot(RecName_Full='Uracil permease'),
'P05317' : ntuniprot(RecName_Full='60S acidic ribosomal protein P0 {ECO:0000303|PubMed:9559554}'),
'P05318' : ntuniprot(RecName_Full='60S acidic ribosomal protein P1-alpha {ECO:0000303|PubMed:9559554}'),
'P05319' : ntuniprot(RecName_Full='60S acidic ribosomal protein P2-alpha {ECO:0000303|PubMed:9559554}'),
'P05373' : ntuniprot(RecName_Full='Delta-aminolevulinic acid dehydratase'),
'P05374' : ntuniprot(RecName_Full='Phosphatidylethanolamine N-methyltransferase {ECO:0000255|HAMAP-Rule:MF_03217, ECO:0000303|PubMed:2445736}'),
'P05375' : ntuniprot(RecName_Full='Phosphatidyl-N-methylethanolamine N-methyltransferase {ECO:0000255|HAMAP-Rule:MF_03216, ECO:0000305}'),
'P05453' : ntuniprot(RecName_Full='Eukaryotic peptide chain release factor GTP-binding subunit'),
'P05626' : ntuniprot(RecName_Full='ATP synthase subunit 4, mitochondrial'),
'P05694' : ntuniprot(RecName_Full='5-methyltetrahydropteroyltriglutamate--homocysteine methyltransferase'),
'P05737' : ntuniprot(RecName_Full='60S ribosomal protein L7-A {ECO:0000303|PubMed:9559554}'),
'P05738' : ntuniprot(RecName_Full='60S ribosomal protein L9-A {ECO:0000303|PubMed:9559554}'),
'P05739' : ntuniprot(RecName_Full='60S ribosomal protein L6-B {ECO:0000303|PubMed:9559554}'),
'P05740' : ntuniprot(RecName_Full='60S ribosomal protein L17-A {ECO:0000303|PubMed:9559554}'),
'P05743' : ntuniprot(RecName_Full='60S ribosomal protein L26-A {ECO:0000303|PubMed:9559554}'),
'P05744' : ntuniprot(RecName_Full='60S ribosomal protein L33-A {ECO:0000303|PubMed:9559554}'),
'P05745' : ntuniprot(RecName_Full='60S ribosomal protein L36-A {ECO:0000303|PubMed:9559554}'),
'P05747' : ntuniprot(RecName_Full='60S ribosomal protein L29 {ECO:0000303|PubMed:9559554}'),
'P05748' : ntuniprot(RecName_Full='60S ribosomal protein L15-A {ECO:0000303|PubMed:9559554}'),
'P05749' : ntuniprot(RecName_Full='60S ribosomal protein L22-A {ECO:0000303|PubMed:9559554}'),
'P05750' : ntuniprot(RecName_Full='40S ribosomal protein S3 {ECO:0000303|PubMed:9559554}'),
'P05755' : ntuniprot(RecName_Full='40S ribosomal protein S9-B {ECO:0000303|PubMed:9559554}'),
'P05756' : ntuniprot(RecName_Full='40S ribosomal protein S13 {ECO:0000303|PubMed:9559554}'),
'P05759' : ntuniprot(RecName_Full='Ubiquitin-40S ribosomal protein S31'),
'P05986' : ntuniprot(RecName_Full='cAMP-dependent protein kinase type 3'),
'P06100' : ntuniprot(RecName_Full='General negative regulator of transcription subunit 2'),
'P06101' : ntuniprot(RecName_Full='Hsp90 co-chaperone Cdc37'),
'P06102' : ntuniprot(RecName_Full='General negative regulator of transcription subunit 3'),
'P06103' : ntuniprot(RecName_Full='Eukaryotic translation initiation factor 3 subunit B {ECO:0000255|HAMAP-Rule:MF_03001}'),
'P06104' : ntuniprot(RecName_Full='Ubiquitin-conjugating enzyme E2 2'),
'P06105' : ntuniprot(RecName_Full='Protein SCP160'),
'P06106' : ntuniprot(RecName_Full='Homocysteine/cysteine synthase {ECO:0000305}'),
'P06115' : ntuniprot(RecName_Full='Catalase T'),
'P06168' : ntuniprot(RecName_Full='Ketol-acid reductoisomerase, mitochondrial'),
'P06169' : ntuniprot(RecName_Full='Pyruvate decarboxylase isozyme 1'),
'P06174' : ntuniprot(RecName_Full='Uroporphyrinogen-III synthase'),
'P06182' : ntuniprot(RecName_Full='Cytochrome c heme lyase'),
'P06197' : ntuniprot(RecName_Full='CDP-diacylglycerol--inositol 3-phosphatidyltransferase'),
'P06208' : ntuniprot(RecName_Full='2-isopropylmalate synthase'),
'P06242' : ntuniprot(RecName_Full='Serine/threonine-protein kinase KIN28'),
'P06243' : ntuniprot(RecName_Full='Cell division control protein 7'),
'P06244' : ntuniprot(RecName_Full='cAMP-dependent protein kinase type 1'),
'P06245' : ntuniprot(RecName_Full='cAMP-dependent protein kinase type 2'),
'P06367' : ntuniprot(RecName_Full='40S ribosomal protein S14-A {ECO:0000303|PubMed:9559554}'),
'P06633' : ntuniprot(RecName_Full='Imidazoleglycerol-phosphate dehydratase'),
'P06634' : ntuniprot(RecName_Full='ATP-dependent RNA helicase DED1'),
'P06700' : ntuniprot(RecName_Full='NAD-dependent histone deacetylase SIR2'),
'P06701' : ntuniprot(RecName_Full='Regulatory protein SIR3'),
'P06704' : ntuniprot(RecName_Full='Cell division control protein 31'),
'P06738' : ntuniprot(RecName_Full='Glycogen phosphorylase'),
'P06773' : ntuniprot(RecName_Full='Deoxycytidylate deaminase'),
'P06774' : ntuniprot(RecName_Full='Transcriptional activator HAP2'),
'P06775' : ntuniprot(RecName_Full='Histidine permease'),
'P06776' : ntuniprot(RecName_Full="3',5'-cyclic-nucleotide phosphodiesterase 2"),
'P06777' : ntuniprot(RecName_Full='DNA repair protein RAD1'),
'P06778' : ntuniprot(RecName_Full='DNA repair and recombination protein RAD52'),
'P06779' : ntuniprot(RecName_Full='DNA repair protein RAD7'),
'P06780' : ntuniprot(RecName_Full='GTP-binding protein RHO1'),
'P06781' : ntuniprot(RecName_Full='GTP-binding protein RHO2'),
'P06782' : ntuniprot(RecName_Full='Carbon catabolite-derepressing | |
a = tf.ones((xshape[1], 1), tf.float32)
b = tf.zeros((xshape[1], 1), tf.float32)
if (y == 2):
c = tf.reshape(tf.stack([a, b], axis=1), [1, newshape])
elif (y == 4):
c = tf.reshape(tf.stack([b, a, b, b], axis=1), [1, newshape])
d = tf.transpose(c, [1, 0])
# Create a [newshape, newshape] matrix by taking the matrix product of
# d and c. This will be a matrix that is zeros everywhere, but will have
# xshape*xshape 1s, evenly spaced through it.
coeff_matrix = tf.matmul(d, c)
coeff_matrix = tf.expand_dims(coeff_matrix, axis=0)
coeff_matrix = tf.expand_dims(coeff_matrix, axis=-1)
assert coeff_matrix.get_shape().as_list() == [1, newshape, newshape, 1]
# Upsample using nearest neighbour
X = tf.image.resize_nearest_neighbor(x, [newshape, newshape])
# Mask using the above coeff_matrix
X = X * coeff_matrix
return X
def add_conjugates(X):
""" Concatenate tensor with its conjugates.
The DTCWT returns an array of 6 orientations for each coordinate,
corresponding to the 6 orientations of [15, 45, 75, 105, 135, 165]. We can
get another 6 rotations by taking complex conjugates of these.
Parameters
----------
X : tf tensor of shape (batch, ..., 6)
Returns
-------
Y : tf tensor of shape (batch, .... 12)
"""
f = lambda x: tf.concat([x, tf.conj(x)], axis=-1)
if type(X) is list or type(X) is tuple:
Y = list()
for x in X:
Y.append(f(x))
else:
Y = f(X)
return Y
def collapse_conjugates(X):
""" Invert the add_conjugates function. I.e. go from 12 dims to 6
To collapse, we add the first 6 dimensions with the conjugate of the last 6
and then divide by 2.
Parameters
----------
X : tf tensor of shape (batch, ..., 12)
Returns
-------
Y : tf tensor of shape (batch, .... 6)
"""
ax = lambda x: len(x.get_shape().as_list()) - 1
split = lambda x: tf.split(x, [6,6], axis=ax(x))
def f(x):
t1, t2 = split(x)
return 0.5 * (t1 + tf.conj(t2))
if type(X) is list or type(X) is tuple:
Y = list()
for x in X:
Y.append(f(x))
else:
Y = f(X)
return Y
def response_normalization(x, power=2):
""" Function to spread out the activations.
The aim is to keep the top activation as it is, and send the others towards
zero. We can do this by mapping our data through a polynomial function,
ax^power. We adjust a so that the max value remains unchanged.
Negative inputs should not happen as we are competing after a magnitude
operation. However, they can sometimes occur due to upsampling that may
happen. If this is the case, we clip them to 0.
"""
m = tf.expand_dims(tf.reduce_max(x, axis=-1), axis=-1)
# Clip negative values
x = tf.maximum(x, 0.0)
# factor = tf.exp(power * m)
# return tf.exp(power * x) * x/factor - 1
a = 1 / m**(power - 1)
return x**power / a
def wavelet(x, nlevels, biort='near_sym_b_bp', qshift='qshift_b_bp',
data_format="nhwc"):
""" Perform an nlevel dtcwt on the input data.
Parameters
----------
x: tf tensor of shape (batch, h, w) or (batch, h, w, c)
The input to be transformed. If the input has a channel dimension, the
dtcwt will be applied to each of the channels independently.
nlevels : int
the number of scales to use. 0 is a special case, if nlevels=0, then we
return a lowpassed version of the x, and Yh and Yscale will be
empty lists
biort : str
which biorthogonal filters to use. 'near_sym_b_bp' are my favourite, as
they have 45° and 135° filters with the same period as the others.
qshift : str
which quarter shift filters to use. These should match up with the
biorthogonal used. 'qshift_b_bp' are my favourite for the same reason.
data_format : str
An optional string of the form "nchw" or "nhwc" (for 4D data), or "nhw"
or "hwn" (for 3D data). This specifies the data format of the input.
E.g. If format is "nchw" (the default), then data is in the form [batch,
channels, h, w]. If the format is "nhwc", then the data is in the form
[batch, h, w, c].
Returns
-------
out : a tuple of (lowpass, highpasses and scales).
* Lowpass is a tensor of the lowpass data. This is a real float. If
x has shape [batch, height, width, channels], the dtcwt will be
applied independently for each channel and combined.
* Highpasses is a list of length <nlevels>, each entry has the six
orientations of wavelet coefficients for the given scale. These are
returned as tf.complex64 data type.
* Scales is a list of length <nlevels>, each entry containing the
lowpass signal that gets passed to the next level of the dtcwt
transform.
"""
# If nlevels was 0, lowpass the input
with tf.variable_scope('wavelet'):
if nlevels == 0:
Yh, Yscale = [], []
filters = _biort(biort)
h0o = np.reshape(filters[0], [1, -1, 1, 1])
h0oT = np.transpose(h0o, [1, 0, 2, 3])
Xshape = x.get_shape().as_list()
# Put the channels to the batch dimension and back after
X = tf.reshape(x, [-1, Xshape[1], Xshape[2], 1])
Yl = separable_conv_with_pad(X, h0o, h0oT)
Yl = tf.reshape(Yl, [-1, Xshape[1], Xshape[2], Xshape[3]])
else:
transform = Transform2d(biort=biort, qshift=qshift)
# Use either forward_channels or forward, depending on the input
# shape
noch = len(['batch', 'height', 'width'])
ch = len(['batch', 'height', 'width', 'channel'])
l = len(x.get_shape().as_list())
if l == noch:
data_format = 'nhw'
Yl, Yh, Yscale = dtcwt.utils.unpack(
transform.forward_channels(
x, data_format, nlevels=nlevels, include_scale=True),
'tf')
elif l == ch:
Yl, Yh, Yscale = dtcwt.utils.unpack(
transform.forward_channels(
x, data_format, nlevels=nlevels, include_scale=True),
'tf')
else:
raise ValueError("Unkown length {} for wavelet block".format(l))
return Yl, _dtcwt_correct_phases(Yh), Yscale
def wavelet_inv(Yl, Yh, biort='near_sym_b_bp', qshift='qshift_b_bp',
data_format="nhwc"):
""" Perform an nlevel inverse dtcwt on the input data.
Parameters
----------
Yl : :py:class:`tf.Tensor`
Real tensor of shape (batch, h, w) or (batch, h, w, c) holding the
lowpass input. If the shape has a channel dimension, then c inverse
dtcwt's will be performed (the other inputs need to also match this
shape).
Yh : list(:py:class:`tf.Tensor`)
A list of length nlevels. Each entry has the high pass for the scales.
Shape has to match Yl, with a 6 on the end.
biort : str
Which biorthogonal filters to use. 'near_sym_b_bp' are my favourite, as
they have 45° and 135° filters with the same period as the others.
qshift : str
Which quarter shift filters to use. These should match up with the
biorthogonal used. 'qshift_b_bp' are my favourite for the same reason.
data_format : str
An optional string of the form "nchw" or "nhwc" (for 4D data), or "nhw"
or "hwn" (for 3D data). This specifies the data format of the input.
E.g. If format is "nchw" (the default), then data is in the form [batch,
channels, h, w]. If the format is "nhwc", then the data is in the form
[batch, h, w, c].
Returns
-------
X : :py:class:`tf.Tensor`
An input of size [batch, h', w'], where h' and w' will be larger than
h and w by a factor of 2**nlevels
"""
with tf.variable_scope('wavelet_inv'):
Yh = _dtcwt_correct_phases(Yh, inv=True)
transform = Transform2d(biort=biort, qshift=qshift)
pyramid = Pyramid(Yl, Yh)
X = transform.inverse_channels(pyramid, data_format=data_format)
return X
def wavelet_channel_gains(Yl, Yh, gain_mask, lp_gain, data_format='nhwc'):
""" Apply a conv layer in the wavelet domain
Parameters
----------
pyramid : dtcwt.tf.Pyramid
Pyramid representation of a signal. It must have a lowpass_op and
a list of length J of highpasses_ops.
gain_mask : tf.Variable (tf.complex64)
Filters for each of the J highpasses. Must be of shape (J, f, C, 6)
where C is the number of input channels, J is the number of bandpass
coefficients, and f is the number of output filters.
lp_gain : tf.Variable (tf.float32)
Filters for the lowpass. Must be of shape (f, C).
data_format : str
The order of the dimensions in the pyramid
"""
# Check the gain_mask input is ok. If it is None, set it to all ones. If it
# is a numpy array, convert it to tensorflow. Make sure it has 4 components
# to it.
if gain_mask is None:
gain_mask = tf.constant(np.ones((1, 1, 6, len(Yh))))
| |
raise aXeSIMError(msg)
# test conversion to float
try:
tmp = float(depen_data[index])
except ValueError:
msg = ("\nValue: {0:s} is not convertible to float!"
.format(str(depen_data[index])))
raise aXeSIMError(msg)
# check for ascending order
if index > 0:
# check that independent data is rising
if indep_data[index] <= indep_data[index-1]:
raise aXeSIMError("\nIndependent column data is not"
"monotonically rising!")
# append the values to the arrays
out_indep.append(float(indep_data[index]))
out_depen.append(float(depen_data[index]))
# return the new arrays
return out_indep, out_depen
def _get_indep_index(self, value):
"""Locate the correct index
The method locates the 'index' for the independent data which
has "indep_data[index] <= value indep_data[index+1]".
The approximate index postition "self.accelerator" is used as
a starting point.
Parameters
----------
value: str
the list with dependent data
Returns: index position for the independent data
"""
# check whether you have to search upwards or downwards
if value > self._indep_data[self.accelerator]:
# in case that you search upwards, go up
# the independent values until you find the right interval
self.accelerator += 1
while value > self._indep_data[self.accelerator]:
self.accelerator += 1
else:
# in case that you search downwards, go down
# the independent values until you find the right interval
# while(wavelength < resp->spec[nact-1].lambda_mean)
while value < self._indep_data[self.accelerator-1]:
self.accelerator -= 1
# return the position
return self.accelerator
def _check_input(self, indata, input_file):
"""Check whether the input valid
The method performs some basic checks on data which is supposed
to form an interpolator. Checks for the correct data type and
for rising independent data values and against NULL entries are done.
Parameters
----------
indata: axe_asciidata.AsciiData()
the list with dependent data
input_file: str
the input file name
"""
# check the type of first column
if isinstance(float, indata[0].get_type()):
# check whether it is int
if isinstance(int, indata[0].get_type()):
# go over the column
for index in range(indata.nrows):
# convert to float
indata[0][index] = float(indata[0][index])
else:
msg = ("\nColumn 0 of file: {0:s}} contains wrong "
"type!".format(input_file))
raise aXeSIMError(msg)
# check the type of second column
if isinstance(float, indata[1].get_type()):
# check whether it is int
if (isinstance(int, indata[1].get_type())):
# go over the column
for index in range(indata.nrows):
# convert to float
indata[1][index] = float(indata[1][index])
else:
msg = ("\nColumn 1 of file: {0:s} contains wrong type!"
.format(input_file))
raise aXeSIMError(msg)
# go over all rows
for index in range(indata.nrows):
# check for None-entries
if ((indata[0][index] is None) or (indata[1][index] is None)):
msg = ("\nData in file: {0:s} contains NULL"
"entries!".format(input_file))
raise aXeSIMError(msg)
# check for ascending order
if index > 0:
# check that independent data is rising
if indata[0][index] <= indata[0][index-1]:
msg = ("\nIndependent column data in file: {0:s}"
" is not monotonically rising!".format(input_file))
raise aXeSIMError(msg)
def _get_fits_name(self, fits_name):
"""Determine the proper fits name
If not explicitly given, the method determines a proper fits name
for an interpolator. As a base serves the file name the
interpolator was built from.
Parameters
----------
fits_name: str
a fits file name
Returns
-------
fits_name: str
a fits name for the interpolator data
"""
# check whether an explicit name
# is given
if fits_name is not None:
# return that explicit name
return fits_name
# check whether a root for the
# fits name is there
if self.input_file is None:
# give an error and out
error_message = ("\nCan not derive a proper name for the fits "
"file.\nPlease specify a name explicitly!")
raise aXeSIMError(error_message)
else:
# find the position of the
# last dot
pos = self.input_file.rfind('.')
# check whether there is a dot
if pos > -1:
# compose the new name
fits_name = self.input_file[:pos] + '.fits'
else:
# compose the new name
fits_name = self.input_file + '.fits'
return fits_name
def writetofits(self, fits_name=None, colname1=None, colname2=None):
"""Write the interplator values to a fits file
The method writes the data of an interpolator to a binary fits table.
The fiter name as well as the column names of independent and
dependent data are specified explicitly.
Parameters
----------
fits_name: str
the fits file name
colname1: str
column name for independent data
colname2: str
column name for dependent data
Returns
-------
fits_name: str
the fits file name
"""
# get the fits name
out_name = self._get_fits_name(fits_name)
# create a new table
out_tab = axe_asciidata.create(2, len(self))
# go over all data
for index in range(len(self)):
# store the data in the table
out_tab[0][index] = self._indep_data[index]
out_tab[1][index] = self._depen_data[index]
# rename the columns, if
# there are names
if colname1 is not None:
out_tab[0].rename(colname1)
if colname2 is not None:
out_tab[1].rename(colname2)
# write the table to fits
out_tab.writetofits(out_name)
# return the fits name
return out_name
def writeto(self, filename):
"""Write the interplator values to an ASCII
The method writes the data of an interpolator to a binary fits table.
The fiter name as well as the column names of independent and
dependent data are specified explicitly.
Parameters
----------
filename: str
the output file name
Returns
-------
filename: str
the file name
"""
# create a new table
out_tab = axe_asciidata.create(2, len(self))
# go over all data
for index in range(len(self)):
# store the data in the table
out_tab[0][index] = self._indep_data[index]
out_tab[1][index] = self._depen_data[index]
# write the table to fits
out_tab.writeto(filename)
# return the fits name
return filename
def integrate(self):
"""Evaluate the integral over the dependent values
The method computes and returns the integral over the
interpolator values. Only the fixed independent and dependent
data values are used for computing the integral by simple
adding of the differnetial elements.
@return: the integral over the interpolator values
@rtype: float
"""
# initialize the
# integrated value
integral = 0.0
# go over all data
for index in range(1, len(self)-1):
# add the increment
integral += self._depen_data[index] * (self._indep_data[index+1] -
self._indep_data[index-1])
# dont forget the start and end piece
integral += self._depen_data[0] * (self._indep_data[1] -
self._indep_data[0])
integral += self._depen_data[len(self)-1] * (self._indep_data[len(self)-1] - self._indep_data[len(self)-2])
# return the integral,
# correcting the bin extension
return integral/2.0
def toSensitivity(self, A=None):
"""Transfer the bandpass to sensitivity
The method performs all steps to transform an interpolator
representing a total passband curve to a sensitivity curve.
Parameters
----------
A: float
collecting area of telescope
"""
h_erg = 6.6260693E-27
c_cm = 2.99792458E+10
# give a default for A
if A is None:
A = math.pi * 120.0 * 120.0
# go over all data
for index in range(len(self)):
# compute the conversion factor
factor = A / (h_erg * c_cm / (1.0E-08 * self._indep_data[index]))
# apply the conversion factor
self._depen_data[index] = self._depen_data[index] * factor
def toThroughput(self, A=None):
"""Transfer the sensitivity to a passband
The method performs all steps to transform an interpolator
representing a sensitivity curve to a total passband for a given
collecting area.
Parameters
----------
A: float
collecting area of telescope
"""
h_erg = 6.6260693E-27
c_cm = 2.99792458E+10
# give a default for A
if A is None:
A = math.pi * 120.0 * 120.0
# go over all data
for index in range(len(self)):
# compute the conversion factor
factor = A / (h_erg * c_cm / (1.0E-08 * self._indep_data[index]))
# apply the conversion factor
self._depen_data[index] = self._depen_data[index] / factor
def tonm(self):
"""Transfer the independent column to unit [nm]
Provided the unit of the independent column is [AA], this method
transforms the unit to [nm]
"""
self.mult_indep(0.1)
def tofits(self, colname1=None, colname2=None):
"""Transfer the data to a fits extension
The method writes the data of an interpolator to a binary fits
table extension, which is returned to the calling routine.
The fiter name as well as the column names of independent and
dependent data are specified explicitly.
Parameters
----------
colname1: str
column name for independent data
colname2: str
column name for dependent data
Returns
-------
table.hdu()
the interpolator as fits table extension
"""
# create a new ascii table
new_table = axe_asciidata.create(2, len(self))
# go over all data
for index in range(len(self)):
# transfer the values
new_table[0][index] = self._indep_data[index]
new_table[1][index] = self._depen_data[index]
# re-name the table column
if colname1 is not | |
expires after 24 hours.
:type logGroupName: string
:param logGroupName: **[REQUIRED]**
The name of the log group to search.
:type logStreamNames: list
:param logStreamNames:
Filters the results to only logs from the log streams in this list.
If you specify a value for both ``logStreamNamePrefix`` and ``logStreamNames`` , the action returns an ``InvalidParameterException`` error.
- *(string) --*
:type logStreamNamePrefix: string
:param logStreamNamePrefix:
Filters the results to include only events from log streams that have names starting with this prefix.
If you specify a value for both ``logStreamNamePrefix`` and ``logStreamNames`` , but the value for ``logStreamNamePrefix`` does not match any log stream names specified in ``logStreamNames`` , the action returns an ``InvalidParameterException`` error.
:type startTime: integer
:param startTime:
The start of the time range, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC. Events with a timestamp before this time are not returned.
:type endTime: integer
:param endTime:
The end of the time range, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC. Events with a timestamp later than this time are not returned.
:type filterPattern: string
:param filterPattern:
The filter pattern to use. For more information, see `Filter and Pattern Syntax <https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/FilterAndPatternSyntax.html>`__ .
If not provided, all the events are matched.
:type nextToken: string
:param nextToken:
The token for the next set of events to return. (You received this token from a previous call.)
:type limit: integer
:param limit:
The maximum number of events to return. The default is 10,000 events.
:type interleaved: boolean
:param interleaved:
If the value is true, the operation makes a best effort to provide responses that contain events from multiple log streams within the log group, interleaved in a single response. If the value is false, all the matched log events in the first log stream are searched first, then those in the next log stream, and so on. The default is false.
:rtype: dict
:returns:
"""
pass
def generate_presigned_url(self, ClientMethod: str = None, Params: Dict = None, ExpiresIn: int = None, HttpMethod: str = None):
"""
Generate a presigned url given a client, its method, and arguments
:type ClientMethod: string
:param ClientMethod: The client method to presign for
:type Params: dict
:param Params: The parameters normally passed to
``ClientMethod``.
:type ExpiresIn: int
:param ExpiresIn: The number of seconds the presigned url is valid
for. By default it expires in an hour (3600 seconds)
:type HttpMethod: string
:param HttpMethod: The http method to use on the generated url. By
default, the http method is whatever is used in the method\'s model.
:returns: The presigned url
"""
pass
def get_log_events(self, logGroupName: str, logStreamName: str, startTime: int = None, endTime: int = None, nextToken: str = None, limit: int = None, startFromHead: bool = None) -> Dict:
"""
Lists log events from the specified log stream. You can list all the log events or filter using a time range.
By default, this operation returns as many log events as can fit in a response size of 1MB (up to 10,000 log events). You can get additional log events by specifying one of the tokens in a subsequent call.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/GetLogEvents>`_
**Request Syntax**
::
response = client.get_log_events(
logGroupName='string',
logStreamName='string',
startTime=123,
endTime=123,
nextToken='string',
limit=123,
startFromHead=True|False
)
**Response Syntax**
::
{
'events': [
{
'timestamp': 123,
'message': 'string',
'ingestionTime': 123
},
],
'nextForwardToken': 'string',
'nextBackwardToken': 'string'
}
**Response Structure**
- *(dict) --*
- **events** *(list) --*
The events.
- *(dict) --*
Represents a log event.
- **timestamp** *(integer) --*
The time the event occurred, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC.
- **message** *(string) --*
The data contained in the log event.
- **ingestionTime** *(integer) --*
The time the event was ingested, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC.
- **nextForwardToken** *(string) --*
The token for the next set of items in the forward direction. The token expires after 24 hours. If you have reached the end of the stream, it will return the same token you passed in.
- **nextBackwardToken** *(string) --*
The token for the next set of items in the backward direction. The token expires after 24 hours. This token will never be null. If you have reached the end of the stream, it will return the same token you passed in.
:type logGroupName: string
:param logGroupName: **[REQUIRED]**
The name of the log group.
:type logStreamName: string
:param logStreamName: **[REQUIRED]**
The name of the log stream.
:type startTime: integer
:param startTime:
The start of the time range, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC. Events with a timestamp equal to this time or later than this time are included. Events with a timestamp earlier than this time are not included.
:type endTime: integer
:param endTime:
The end of the time range, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC. Events with a timestamp equal to or later than this time are not included.
:type nextToken: string
:param nextToken:
The token for the next set of items to return. (You received this token from a previous call.)
:type limit: integer
:param limit:
The maximum number of log events returned. If you don\'t specify a value, the maximum is as many log events as can fit in a response size of 1 MB, up to 10,000 log events.
:type startFromHead: boolean
:param startFromHead:
If the value is true, the earliest log events are returned first. If the value is false, the latest log events are returned first. The default value is false.
:rtype: dict
:returns:
"""
pass
def get_log_group_fields(self, logGroupName: str, time: int = None) -> Dict:
"""
Returns a list of the fields that are included in log events in the specified log group, along with the percentage of log events that contain each field. The search is limited to a time period that you specify.
In the results, fields that start with @ are fields generated by CloudWatch Logs. For example, ``@timestamp`` is the timestamp of each log event.
The response results are sorted by the frequency percentage, starting with the highest percentage.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/GetLogGroupFields>`_
**Request Syntax**
::
response = client.get_log_group_fields(
logGroupName='string',
time=123
)
**Response Syntax**
::
{
'logGroupFields': [
{
'name': 'string',
'percent': 123
},
]
}
**Response Structure**
- *(dict) --*
- **logGroupFields** *(list) --*
The array of fields found in the query. Each object in the array contains the name of the field, along with the percentage of time it appeared in the log events that were queried.
- *(dict) --*
The fields contained in log events found by a ``GetLogGroupFields`` operation, along with the percentage of queried log events in which each field appears.
- **name** *(string) --*
The name of a log field.
- **percent** *(integer) --*
The percentage of log events queried that contained the field.
:type logGroupName: string
:param logGroupName: **[REQUIRED]**
The name of the log group to search.
:type time: integer
:param time:
The time to set as the center of the query. If you specify ``time`` , the 8 minutes before and 8 minutes after this time are searched. If you omit ``time`` , the past 15 minutes are queried.
The ``time`` value is specified as epoch time, the number of seconds since January 1, 1970, 00:00:00 UTC.
:rtype: dict
:returns:
"""
pass
def get_log_record(self, logRecordPointer: str) -> Dict:
"""
Retrieves all the fields and values of a single log event. All fields are retrieved, even if the original query that produced the ``logRecordPointer`` retrieved only a subset of fields. Fields are returned as field name/field value | |
p_group:
p.p_def.write_new_test_instance(
w, f'{p.name}:', instances=2 if p.repeatable else 1, end=',')
def write_round_trip_test(self, w: GoWriter):
self.write_new_test_instance(w, f'{self.short} :=')
w.write(f'b, err := {self.short}.MarshalBinary()')
with w.condition('err != nil'):
w.write('t.Fatalf("%+v", err)')
w.write(f'var {self.short}2 {self.type_name}')
with w.condition(f'err := {self.short}2.UnmarshalBinary(b); err != nil'):
w.write(f't.Errorf("%+v\\n%# 02x\\n%+v\\n%+v", err, b, {self.short}, {self.short}2)')
with w.condition(f'!reflect.DeepEqual({self.short}, {self.short}2)'):
w.write(f't.Errorf("mismatch:\\n%# 02x\\n%+v\\n%+v", b, {self.short}, {self.short}2)')
def write_get_header(self, w: GoWriter):
with w.block(f'func ({self.short} *{self.type_name}) getHeader() paramHeader'):
self.write_get_header_body(w)
def write_get_header_body(self, w: GoWriter):
f_sizes = []
min_f_size = 0
for f in self.fields:
if f.is_fixed_size():
if f.partial:
continue
else:
min_f_size += f.min_size
elif f.type.name == 'bitArray':
min_f_size += 2
f_sizes += [f'uint16(((int({f.var_name(self.short)}NumBits)-1)>>3)+1)']
elif f.length == -1: # all remaining data, so no length header
f_sizes += [f'uint16('
f'len({f.var_name(self.short)})'
f'{f"*{f.type.size}" if f.type.size > 1 else ""})']
else:
min_f_size += 2
f_sizes += [f'uint16('
f'len({f.var_name(self.short)})'
f'{f"*{f.type.size}" if f.type.size > 1 else ""})']
single_inst = [p for p in self.parameters if p.exactly_one]
zero_or_one = [p for p in self.parameters if p.optional and not p.repeatable]
repeatable = [p for p in self.parameters if p.repeatable]
n_params = [str(len(single_inst))] if any(single_inst) else []
if any(not p.exactly_one for p in self.parameters):
n_params += [f'len({self.short}.{p.name})' for p in repeatable]
if any(n_params):
w.write(f'nParams := {"+ ".join(n_params)}', auto_break=True)
elif any(zero_or_one):
w.write('nParams := 0')
for p in zero_or_one:
with w.condition(f'{self.short}.{p.name} != nil'):
w.write(f'nParams++')
blk = 'ph := paramHeader'
elif any(self.parameters) and not self.fixed_size:
blk = 'ph := paramHeader'
else:
blk = 'return paramHeader'
def write_subs():
if any(zero_or_one) or any(repeatable):
w.write(f'subs: make([]paramHeader, 0, nParams),')
return
if not (self.fixed_size and any(single_inst)):
return
with w.block('subs: []paramHeader', after_end=',\n'):
for p in single_inst:
w.write(f'{self.short}.{p.name}.getHeader(),')
with w.block(blk):
w.write(f'ParamType: {self.const_name},')
w.write(f'data: {self.short},')
if any(f_sizes):
w.write(f'sz: {min_f_size + self.header_size} + {"+ ".join(f_sizes)},', auto_break=True)
else:
w.write(f'sz: {min_f_size + self.header_size},')
write_subs()
if not any(self.parameters) or self.fixed_size:
return
if not any(zero_or_one) and not any(repeatable) and f_sizes:
f_sizes += [f'ph.subs[{i}].sz' for i in range(len(single_inst))]
w.write(f'ph.sz += {"+ ".join(f_sizes)}')
w.write('return ph')
return
for (optional, repeatable, group_name), p_group in groupby(
self.parameters, lambda p: (p.optional, p.repeatable, p.group)):
p_group = list(p_group)
if repeatable:
for p in p_group:
with w.block(f'for i := range {self.short}.{p.name}'):
w.write(f'sh := {self.short}.{p.name}[i].getHeader()')
w.write(f'ph.sz += sh.sz')
w.write(f'ph.subs = append(ph.subs, sh)')
elif optional:
for p in p_group:
with w.condition(f'{self.short}.{p.name} != nil'):
w.write(f'sh := {self.short}.{p.name}.getHeader()')
w.write(f'ph.sz += sh.sz')
w.write(f'ph.subs = append(ph.subs, sh)')
elif group_name is not None:
with w.switch():
for p in p_group:
if p.p_def.can_inline():
cond = f'{self.short}.{p.name} != 0'
else:
cond = ' && '.join([
f'{f.var_name(f"{self.short}.{p.name}")} ' +
("!= 0" if not f.is_fixed_size else "!= nil")
for f in p.p_def.fields])
with w.case(cond):
w.write(f'ph.subs = append(ph.subs, {self.short}.{p.name}.getHeader())')
w.write('ph.sz += ph.subs[len(ph.subs)-1].sz')
else:
for p in p_group:
w.write(f'ph.subs = append(ph.subs, {self.short}.{p.name}.getHeader())')
w.write('ph.sz += ph.subs[len(ph.subs)-1].sz')
w.write(f'return ph')
def write_encode(self, w: GoWriter):
self.write_get_header(w)
with w.block(f'func ({self.short} *{self.type_name}) EncodeFields(w io.Writer) error'):
self.write_encode_body(w)
def value_bytes(self) -> str:
s = self.short if not self.can_inline() else '*' + self.short
vb = [f'{f.value_bytes(f.var_name(s))},' for f in self.fields]
return ''.join(vb)
def write_encode_body(self, w: GoWriter):
s = self.short if not self.can_inline() else '*' + self.short
for fxd, f_group in groupby(self.fields, lambda f: f.is_fixed_size()):
if fxd:
f_group = list(f_group)
vb = []
for shift, fg in groupby(f_group, lambda f: f.type.bits != 8):
fg = list(fg)
if not shift:
vb += [','.join([f'{f.value_bytes(f.var_name(s))}' for f in fg])]
continue
vb += ['']
for f in fg:
put = f'{f.value_bytes(f.var_name(s))}' + \
f'<<{(f.type.size * 8 - f.type.bits) - f.bit}'
if f.partial:
vb[-1] += put + '|'
else:
vb[-1] += put
vb += ['']
vb = vb[:-1]
w.write('if _,err:=w.Write([]byte{ '
f'{",".join(vb)}}});err!=nil{{', auto_break=True)
w.indent()
w.reterr(f'failed to write fields for {self.const_name}', wrap=True)
w.dedent()
w.write('}')
continue
for f in f_group:
if f.type.name == 'bitArray':
with w.condition('_, err := w.Write([]byte{'
f'byte({f.var_name(s)}NumBits>>8), '
f'byte({f.var_name(s)}NumBits&0xFF)'
'}); err != nil'):
w.reterr(f'failed to write length of {f.name}', wrap=True)
with w.condition('_, err := w.Write('
f'{f.var_name(s)}, '
'); err != nil'):
w.reterr(f'failed to write {f.name}', wrap=True)
elif f.length != 0:
with w.condition(f'_, err := w.Write({f.var_name(s)}); err != nil'):
w.reterr(f'failed to write {f.name}', wrap=True)
elif f.is_array or f.type.name == 'string':
with w.condition('_, err := w.Write([]byte{'
f'byte(len({f.var_name(s)})>>8), '
f'byte(len({f.var_name(s)})&0xFF)'
'}); err != nil'):
w.reterr(f'failed to write length of {f.name}', wrap=True)
if f.type.name == 'string':
with w.condition('_, err := w.Write([]byte('
f'{f.var_name(s)}'
')); err != nil'):
w.reterr(f'failed to write {f.name}', wrap=True)
elif f.type.can_copy():
with w.condition('_, err := w.Write('
f'{f.var_name(s)}'
'); err != nil'):
w.reterr(f'failed to write {f.name}', wrap=True)
else:
with w.condition('err := binary.Write(w, binary.BigEndian, '
f'{f.var_name(s)}); err != nil'):
w.reterr(f'failed to write {f.name}', wrap=True)
w.write(f'return nil')
def write_marshal_body(self, w):
w.write('b := bytes.Buffer{}')
w.err_check(f'encodeParams(&b, {self.short}.getHeader())', ret='nil, err')
w.write(f'return b.Bytes()[{self.header_size}:], nil')
def header_bytes(self) -> str:
if self.is_tlv:
return f'{self.type_id_bytes()},0x00,0x00'
else:
return f'{self.type_id_bytes()}'
def type_id_bytes(self) -> str:
if self.is_tlv:
return f'0x{self.type_id >> 8:02x}, 0x{self.type_id & 0xFF:02x}'
else:
return f'0x{self.type_id | 0x80:02x}'
def size_bytes(self) -> str:
assert self.is_tlv and self.fixed_size
return f'0x{self.min_size >> 8:02x}, 0x{self.min_size & 0xFF:02x}'
def len_check(self, w) -> int:
sz = self.min_size - self.header_size
fx = str(self.fixed_size).lower()
if sz == 0:
if self.empty():
w.err_check(f'hasEnoughBytes({self.const_name}, {sz}, len(data), {fx})')
else:
w.comment(f'{self.const_name} can be empty')
else:
w.err_check(f'hasEnoughBytes({self.const_name}, {sz}, len(data), {fx})')
return sz
def write_unmarshal_body(self, w):
# byte length check
known_data_len = self.len_check(w)
# field unmarshaling
pos = 0
if self.can_inline():
assert len(self.fields) == 1
f = self.fields[0]
w.write(f'*{self.short} = {self.type_name}({f.value()})')
elif any(self.fields):
for i, f in enumerate(self.fields):
# skip reslice if this is a fixed size field,
# or if it's the final item (i.e., last field & no params)
reslice = ((not f.is_fixed_size()) or
(i == len(self.fields) - 1 and any(self.parameters)))
f.write_unmarshal(w, reslice, self.short, pos)
if reslice:
pos = 0
elif not f.partial:
pos += f.min_size
if not f.is_fixed_size():
known_data_len = 0
elif not f.partial:
known_data_len -= f.min_size
if not any(self.parameters):
return
# parameters/sub-parameters
w.comment('sub-parameters')
required = {p.name for p in self.parameters if not p.optional}
groups = groupby(self.parameters, lambda x: (x.optional, x.repeatable, x.group))
for i, ((optional, repeatable, g_name), p_group) in enumerate(groups):
# w.comment(f'known data length: {known_data_len}')
p_group: List[ParamSpec] = list(p_group)
req_len = 0 if optional else min(p.p_def.min_size for p in p_group)
if optional and not any(required):
with w.condition(f'len(data) == 0'):
w.write('return nil')
required.difference_update(p.name for p in p_group)
# simple path: there's only a single parameter, or all in p_group are required
if not repeatable and (len(p_group) == 1 or (g_name is None and not optional)):
for p in p_group:
sub: 'Container' = p.p_def
if self.fixed_size:
assert sub.min_size <= known_data_len, f'{self.name}.{sub.name}: {sub.min_size} > {known_data_len}'
if not optional:
known_data_len = sub.header_len_check(w, known_data_len)
if sub.header_size == 1:
self.unmarshal_tv(w, p)
else:
self.unmarshal_tlv(w, p, False)
if not optional:
if sub.fixed_size:
known_data_len -= sub.min_size
else:
known_data_len = 0
continue
# otherwise, the next parameter is one of a collection of
# repeatable, optional, intermixed, or mutually-exclusive parameters
mut_excl = not (optional or repeatable)
tvs = [p for p in p_group if p.p_def.header_size == 1]
tlvs = [p for p in p_group if p.p_def.header_size == 4]
blk = ''
if not mut_excl:
w.noindent(f'\nparamGroup{i}:')
blk = f'for len(data) >= {1 if any(tvs) else 4}'
def default_case(w: GoWriter):
if optional or repeatable:
w.write(f'break paramGroup{i}')
else:
w.reterr('unexpected parameter %v when unmarshaling '
f'Param{self.name}', ['pt'])
has_sub_len = False
with w.block(blk):
if tvs and tlvs:
# special weirdness: the next param could be a single byte TV
# or it could be a TLV with a 4 byte header,
# so we have to check how much data is available
w.write('var pt ParamType')
with w.condition('data[0]&0x80 != 0'):
w.comment('TV parameter')
w.write('pt = ParamType(data[0]&0x7F)')
w.ifelse('len(data) < 4')
w.reterr('expecting a TLV header, but %d < 4 byte remain', ['len(data)'])
w.ifelse()
w.write('pt = ParamType(binary.BigEndian.Uint16(data))')
elif tvs:
w.write('pt := ParamType(data[0]&0x7F)')
else:
w.write(f'pt := ParamType(binary.BigEndian.Uint16(data))')
if not mut_excl:
w.write('subLen := binary.BigEndian.Uint16(data[2:])')
has_sub_len = True
with w.condition('int(subLen) > len(data)'):
w.reterr(f'%v says it has %d bytes, but only %d bytes remain',
['pt', 'subLen', 'len(data)'])
with w.switch('pt'):
self.write_cases(w, p_group, has_sub_len, default_case)
if blk == '' or (tvs and not tlvs):
known_data_len -= req_len
continue
if has_sub_len:
w.write(f'data = data[subLen:]')
known_data_len = 0
return
def unmarshal_tv(self, w: GoWriter, p: ParamSpec):
sub = p.p_def
if p.optional:
blk = w.condition(f'subType := ParamType(data[0]&0x7F); '
f'subType == Param{sub.name}')
else:
blk = w.condition(f'subType := | |
# Constructs the test
@wraps(test_fn)
def instantiated_test(self, name=name, test=test_fn, dtype=dtype, op=op):
device_arg: str = cls.get_primary_device()
if hasattr(test_fn, 'num_required_devices'):
device_arg = cls.get_all_devices()
# Sets precision and runs test
# Note: precision is reset after the test is run
guard_precision = self.precision
try:
self.precision = self._get_precision_override(test_fn, dtype)
args = (arg for arg in (device_arg, dtype, op) if arg is not None)
result = test_fn(self, *args)
except RuntimeError as rte:
# check if rte should stop entire test suite.
self._stop_test_suite = self._should_stop_test_suite()
# raise the runtime error as is for the test suite to record.
raise rte
finally:
self.precision = guard_precision
return result
assert not hasattr(cls, test_name), "Redefinition of test {0}".format(test_name)
setattr(cls, test_name, instantiated_test)
# Handles tests using the ops decorator
if hasattr(test, "op_list"):
for op in test.op_list:
# Acquires dtypes, using the op data if unspecified
dtypes = cls._get_dtypes(test)
if dtypes is None:
if test.opinfo_dtypes == OpDTypes.unsupported:
dtypes = set(get_all_dtypes()).difference(op.supported_dtypes(cls.device_type))
elif test.opinfo_dtypes == OpDTypes.supported:
dtypes = op.supported_dtypes(cls.device_type)
elif test.opinfo_dtypes == OpDTypes.basic:
dtypes = op.default_test_dtypes(cls.device_type)
else:
raise RuntimeError(f"Unknown OpDType: {test.opinfo_dtypes}")
if test.allowed_dtypes is not None:
dtypes = dtypes.intersection(test.allowed_dtypes)
else:
assert test.allowed_dtypes is None, "ops(allowed_dtypes=[...]) and the dtypes decorator are incompatible"
assert test.opinfo_dtypes == OpDTypes.basic, "ops(dtypes=...) and the dtypes decorator are incompatible"
for dtype in dtypes:
instantiate_test_helper(cls,
name,
test=test,
dtype=dtype,
op=op)
else:
# Handles tests that don't use the ops decorator
dtypes = cls._get_dtypes(test)
dtypes = tuple(dtypes) if dtypes is not None else (None,)
for dtype in dtypes:
instantiate_test_helper(cls, name, test=test, dtype=dtype, op=None)
def run(self, result=None):
super().run(result=result)
# Early terminate test if _stop_test_suite is set.
if self._stop_test_suite:
result.stop()
class CPUTestBase(DeviceTypeTestBase):
device_type = 'cpu'
# No critical error should stop CPU test suite
def _should_stop_test_suite(self):
return False
# The meta device represents tensors that don't have any storage; they have
# all metadata (size, dtype, strides) but they don't actually do any compute
class MetaTestBase(DeviceTypeTestBase):
device_type = 'meta'
_ignore_not_implemented_error = True
def _should_stop_test_suite(self):
return False
class CUDATestBase(DeviceTypeTestBase):
device_type = 'cuda'
_do_cuda_memory_leak_check = True
_do_cuda_non_default_stream = True
primary_device: ClassVar[str]
cudnn_version: ClassVar[Any]
no_magma: ClassVar[bool]
no_cudnn: ClassVar[bool]
def has_cudnn(self):
return not self.no_cudnn
@classmethod
def get_primary_device(cls):
return cls.primary_device
@classmethod
def get_all_devices(cls):
primary_device_idx = int(cls.get_primary_device().split(':')[1])
num_devices = torch.cuda.device_count()
prim_device = cls.get_primary_device()
cuda_str = 'cuda:{0}'
non_primary_devices = [cuda_str.format(idx) for idx in range(num_devices) if idx != primary_device_idx]
return [prim_device] + non_primary_devices
@classmethod
def setUpClass(cls):
# has_magma shows up after cuda is initialized
t = torch.ones(1).cuda()
cls.no_magma = not torch.cuda.has_magma
# Determines if cuDNN is available and its version
cls.no_cudnn = not torch.backends.cudnn.is_acceptable(t)
cls.cudnn_version = None if cls.no_cudnn else torch.backends.cudnn.version()
# Acquires the current device as the primary (test) device
cls.primary_device = 'cuda:{0}'.format(torch.cuda.current_device())
# Adds available device-type-specific test base classes
def get_device_type_test_bases():
# set type to List[Any] due to mypy list-of-union issue:
# https://github.com/python/mypy/issues/3351
test_bases: List[Any] = list()
if IS_SANDCASTLE or IS_FBCODE:
if IS_REMOTE_GPU:
# Skip if sanitizer is enabled
if not TEST_WITH_ASAN and not TEST_WITH_TSAN and not TEST_WITH_UBSAN:
test_bases.append(CUDATestBase)
else:
test_bases.append(CPUTestBase)
test_bases.append(MetaTestBase)
else:
test_bases.append(CPUTestBase)
if not TEST_SKIP_NOARCH:
test_bases.append(MetaTestBase)
if torch.cuda.is_available():
test_bases.append(CUDATestBase)
return test_bases
device_type_test_bases = get_device_type_test_bases()
# Note [How to extend DeviceTypeTestBase to add new test device]
# The following logic optionally allows downstream projects like pytorch/xla to
# add more test devices.
# Instructions:
# - Add a python file (e.g. pytorch/xla/test/pytorch_test_base.py) in downstream project.
# - Inside the file, one should inherit from `DeviceTypeTestBase` class and define
# a new DeviceTypeTest class (e.g. `XLATestBase`) with proper implementation of
# `instantiate_test` method.
# - DO NOT import common_device_type inside the file.
# `runpy.run_path` with `globals()` already properly setup the context so that
# `DeviceTypeTestBase` is already available.
# - Set a top-level variable `TEST_CLASS` equal to your new class.
# E.g. TEST_CLASS = XLATensorBase
# - To run tests with new device type, set `TORCH_TEST_DEVICE` env variable to path
# to this file. Multiple paths can be separated by `:`.
# See pytorch/xla/test/pytorch_test_base.py for a more detailed example.
_TORCH_TEST_DEVICES = os.environ.get('TORCH_TEST_DEVICES', None)
if _TORCH_TEST_DEVICES:
for path in _TORCH_TEST_DEVICES.split(':'):
mod = runpy.run_path(path, init_globals=globals())
device_type_test_bases.append(mod['TEST_CLASS'])
PYTORCH_CUDA_MEMCHECK = os.getenv('PYTORCH_CUDA_MEMCHECK', '0') == '1'
# Adds 'instantiated' device-specific test cases to the given scope.
# The tests in these test cases are derived from the generic tests in
# generic_test_class.
# See note "Generic Device Type Testing."
def instantiate_device_type_tests(generic_test_class, scope, except_for=None, only_for=None):
# Removes the generic test class from its enclosing scope so its tests
# are not discoverable.
del scope[generic_test_class.__name__]
# Creates an 'empty' version of the generic_test_class
# Note: we don't inherit from the generic_test_class directly because
# that would add its tests to our test classes and they would be
# discovered (despite not being runnable). Inherited methods also
# can't be removed later, and we can't rely on load_tests because
# pytest doesn't support it (as of this writing).
empty_name = generic_test_class.__name__ + "_base"
empty_class = type(empty_name, generic_test_class.__bases__, {})
# Acquires members names
# See Note [Overriding methods in generic tests]
generic_members = set(generic_test_class.__dict__.keys()) - set(empty_class.__dict__.keys())
generic_tests = [x for x in generic_members if x.startswith('test')]
# Derive defaults from environment variables if available, default is still none
# Usage:
# export PYTORCH_TESTING_DEVICE_ONLY_FOR=cuda,cpu
# export PYTORCH_TESTING_DEVICE_EXCEPT_FOR=xla
if only_for is None:
only_for = os.getenv("PYTORCH_TESTING_DEVICE_ONLY_FOR", ""). split(",")
if except_for is None:
except_for = os.getenv("PYTORCH_TESTING_DEVICE_EXCEPT_FOR", ""). split(",")
# Creates device-specific test cases
for base in device_type_test_bases:
# Skips bases listed in except_for
if except_for and only_for:
assert base.device_type not in except_for or base.device_type not in only_for,\
"same device cannot appear in except_for and only_for"
if except_for and base.device_type in except_for:
continue
if only_for and base.device_type not in only_for:
continue
# Special-case for ROCm testing -- only test for 'cuda' i.e. ROCm device by default
# The except_for and only_for cases were already checked above. At this point we only need to check 'cuda'.
if TEST_WITH_ROCM and base.device_type != 'cuda':
continue
class_name = generic_test_class.__name__ + base.device_type.upper()
# type set to Any and suppressed due to unsupport runtime class:
# https://github.com/python/mypy/wiki/Unsupported-Python-Features
device_type_test_class: Any = type(class_name, (base, empty_class), {})
for name in generic_members:
if name in generic_tests: # Instantiates test member
test = getattr(generic_test_class, name)
# XLA-compat shim (XLA's instantiate_test takes doesn't take generic_cls)
sig = inspect.signature(device_type_test_class.instantiate_test)
if len(sig.parameters) == 3:
# Instantiates the device-specific tests
device_type_test_class.instantiate_test(name, copy.deepcopy(test), generic_cls=generic_test_class)
else:
device_type_test_class.instantiate_test(name, copy.deepcopy(test))
else: # Ports non-test member
assert name not in device_type_test_class.__dict__, "Redefinition of directly defined member {0}".format(name)
nontest = getattr(generic_test_class, name)
setattr(device_type_test_class, name, nontest)
# Mimics defining the instantiated class in the caller's file
# by setting its module to the given class's and adding
# the module to the given scope.
# This lets the instantiated class be discovered by unittest.
device_type_test_class.__module__ = generic_test_class.__module__
scope[class_name] = device_type_test_class
# Category of dtypes to run an OpInfo-based test for
# Example use: @ops(dtype=OpDTypes.supported)
#
# There are 3 categories: supported, unsupported and basic.
# - basic: The dtypes the operator wants to be tested on by default. This will be
# a subset of the types supported by the operator.
# - supported: Every dtype supported by the operator. Use for exhaustive
# testing of all dtypes.
# - unsupported: Run tests on dtypes not supported by the operator. e.g. for
# testing the operator raises an error and doesn't crash.
class OpDTypes(Enum):
basic = 0 # Test the basic set of dtypes (default)
supported = 1 # Test all supported dtypes
unsupported = 2 # Test only unsupported dtypes
# Decorator that defines the ops a test should be run with
# The test signature must be:
# <test_name>(self, device, dtype, op)
# For example:
# @ops(unary_ufuncs)
# def test_numerics(self, device, dtype, op):
# <test_code>
class ops(object):
def __init__(self, op_list, *, dtypes: OpDTypes = OpDTypes.basic,
allowed_dtypes: Optional[Sequence[torch.dtype]] = None):
self.op_list = op_list
self.opinfo_dtypes = dtypes
self.allowed_dtypes = set(allowed_dtypes) if allowed_dtypes is not None else None
def __call__(self, fn):
fn.op_list = self.op_list
fn.allowed_dtypes = self.allowed_dtypes
fn.opinfo_dtypes = self.opinfo_dtypes
return fn
# Decorator that skips a test if the given condition is true.
# Notes:
# (1) Skip conditions stack.
# (2) Skip conditions can be bools or strings. If a string the
# test base must have defined the corresponding attribute to be False
# for the test to run. If you want to use a string argument you should
# probably define a new decorator instead | |
only. PEM-encoded certificate of the CA that signed the source database server's certificate.
"""
return pulumi.get(self, "ca_certificate")
@property
@pulumi.getter(name="caCertificateSet")
def ca_certificate_set(self) -> bool:
"""
Indicates whether the ca_certificate field is set.
"""
return pulumi.get(self, "ca_certificate_set")
@property
@pulumi.getter(name="clientCertificate")
def client_certificate(self) -> str:
"""
Input only. PEM-encoded certificate that will be used by the replica to authenticate against the source database server. If this field is used then the 'client_key' and the 'ca_certificate' fields are mandatory.
"""
return pulumi.get(self, "client_certificate")
@property
@pulumi.getter(name="clientCertificateSet")
def client_certificate_set(self) -> bool:
"""
Indicates whether the client_certificate field is set.
"""
return pulumi.get(self, "client_certificate_set")
@property
@pulumi.getter(name="clientKey")
def client_key(self) -> str:
"""
Input only. PEM-encoded private key associated with the Client Certificate. If this field is used then the 'client_certificate' and the 'ca_certificate' fields are mandatory.
"""
return pulumi.get(self, "client_key")
@property
@pulumi.getter(name="clientKeySet")
def client_key_set(self) -> bool:
"""
Indicates whether the client_key field is set.
"""
return pulumi.get(self, "client_key_set")
@pulumi.output_type
class MysqlTableResponse(dict):
"""
MySQL table.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "mysqlColumns":
suggest = "mysql_columns"
elif key == "tableName":
suggest = "table_name"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in MysqlTableResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
MysqlTableResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
MysqlTableResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
mysql_columns: Sequence['outputs.MysqlColumnResponse'],
table_name: str):
"""
MySQL table.
:param Sequence['MysqlColumnResponse'] mysql_columns: MySQL columns in the database. When unspecified as part of include/exclude lists, includes/excludes everything.
:param str table_name: Table name.
"""
pulumi.set(__self__, "mysql_columns", mysql_columns)
pulumi.set(__self__, "table_name", table_name)
@property
@pulumi.getter(name="mysqlColumns")
def mysql_columns(self) -> Sequence['outputs.MysqlColumnResponse']:
"""
MySQL columns in the database. When unspecified as part of include/exclude lists, includes/excludes everything.
"""
return pulumi.get(self, "mysql_columns")
@property
@pulumi.getter(name="tableName")
def table_name(self) -> str:
"""
Table name.
"""
return pulumi.get(self, "table_name")
@pulumi.output_type
class NoConnectivitySettingsResponse(dict):
"""
No connectivity settings.
"""
def __init__(__self__):
"""
No connectivity settings.
"""
pass
@pulumi.output_type
class OracleColumnResponse(dict):
"""
Oracle Column.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "columnName":
suggest = "column_name"
elif key == "dataType":
suggest = "data_type"
elif key == "ordinalPosition":
suggest = "ordinal_position"
elif key == "primaryKey":
suggest = "primary_key"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in OracleColumnResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
OracleColumnResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
OracleColumnResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
column_name: str,
data_type: str,
encoding: str,
length: int,
nullable: bool,
ordinal_position: int,
precision: int,
primary_key: bool,
scale: int):
"""
Oracle Column.
:param str column_name: Column name.
:param str data_type: The Oracle data type.
:param str encoding: Column encoding.
:param int length: Column length.
:param bool nullable: Whether or not the column can accept a null value.
:param int ordinal_position: The ordinal position of the column in the table.
:param int precision: Column precision.
:param bool primary_key: Whether or not the column represents a primary key.
:param int scale: Column scale.
"""
pulumi.set(__self__, "column_name", column_name)
pulumi.set(__self__, "data_type", data_type)
pulumi.set(__self__, "encoding", encoding)
pulumi.set(__self__, "length", length)
pulumi.set(__self__, "nullable", nullable)
pulumi.set(__self__, "ordinal_position", ordinal_position)
pulumi.set(__self__, "precision", precision)
pulumi.set(__self__, "primary_key", primary_key)
pulumi.set(__self__, "scale", scale)
@property
@pulumi.getter(name="columnName")
def column_name(self) -> str:
"""
Column name.
"""
return pulumi.get(self, "column_name")
@property
@pulumi.getter(name="dataType")
def data_type(self) -> str:
"""
The Oracle data type.
"""
return pulumi.get(self, "data_type")
@property
@pulumi.getter
def encoding(self) -> str:
"""
Column encoding.
"""
return pulumi.get(self, "encoding")
@property
@pulumi.getter
def length(self) -> int:
"""
Column length.
"""
return pulumi.get(self, "length")
@property
@pulumi.getter
def nullable(self) -> bool:
"""
Whether or not the column can accept a null value.
"""
return pulumi.get(self, "nullable")
@property
@pulumi.getter(name="ordinalPosition")
def ordinal_position(self) -> int:
"""
The ordinal position of the column in the table.
"""
return pulumi.get(self, "ordinal_position")
@property
@pulumi.getter
def precision(self) -> int:
"""
Column precision.
"""
return pulumi.get(self, "precision")
@property
@pulumi.getter(name="primaryKey")
def primary_key(self) -> bool:
"""
Whether or not the column represents a primary key.
"""
return pulumi.get(self, "primary_key")
@property
@pulumi.getter
def scale(self) -> int:
"""
Column scale.
"""
return pulumi.get(self, "scale")
@pulumi.output_type
class OracleProfileResponse(dict):
"""
Oracle database profile.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "connectionAttributes":
suggest = "connection_attributes"
elif key == "databaseService":
suggest = "database_service"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in OracleProfileResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
OracleProfileResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
OracleProfileResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
connection_attributes: Mapping[str, str],
database_service: str,
hostname: str,
password: str,
port: int,
username: str):
"""
Oracle database profile.
:param Mapping[str, str] connection_attributes: Connection string attributes
:param str database_service: Database for the Oracle connection.
:param str hostname: Hostname for the Oracle connection.
:param str password: Password for the Oracle connection.
:param int port: Port for the Oracle connection, default value is 1521.
:param str username: Username for the Oracle connection.
"""
pulumi.set(__self__, "connection_attributes", connection_attributes)
pulumi.set(__self__, "database_service", database_service)
pulumi.set(__self__, "hostname", hostname)
pulumi.set(__self__, "password", password)
pulumi.set(__self__, "port", port)
pulumi.set(__self__, "username", username)
@property
@pulumi.getter(name="connectionAttributes")
def connection_attributes(self) -> Mapping[str, str]:
"""
Connection string attributes
"""
return pulumi.get(self, "connection_attributes")
@property
@pulumi.getter(name="databaseService")
def database_service(self) -> str:
"""
Database for the Oracle connection.
"""
return pulumi.get(self, "database_service")
@property
@pulumi.getter
def hostname(self) -> str:
"""
Hostname for the Oracle connection.
"""
return pulumi.get(self, "hostname")
@property
@pulumi.getter
def password(self) -> str:
"""
Password for the Oracle connection.
"""
return pulumi.get(self, "password")
@property
@pulumi.getter
def port(self) -> int:
"""
Port for the Oracle connection, default value is 1521.
"""
return pulumi.get(self, "port")
@property
@pulumi.getter
def username(self) -> str:
"""
Username for the Oracle connection.
"""
return pulumi.get(self, "username")
@pulumi.output_type
class OracleRdbmsResponse(dict):
"""
Oracle database structure.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "oracleSchemas":
suggest = "oracle_schemas"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in OracleRdbmsResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
OracleRdbmsResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
OracleRdbmsResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
oracle_schemas: Sequence['outputs.OracleSchemaResponse']):
"""
Oracle database structure.
:param Sequence['OracleSchemaResponse'] oracle_schemas: Oracle schemas/databases in the database server.
"""
pulumi.set(__self__, "oracle_schemas", oracle_schemas)
@property
@pulumi.getter(name="oracleSchemas")
def oracle_schemas(self) -> Sequence['outputs.OracleSchemaResponse']:
"""
Oracle schemas/databases in the database server.
"""
return pulumi.get(self, "oracle_schemas")
@pulumi.output_type
class OracleSchemaResponse(dict):
"""
Oracle schema.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "oracleTables":
suggest = "oracle_tables"
elif key == "schemaName":
suggest = "schema_name"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in OracleSchemaResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
OracleSchemaResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
OracleSchemaResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
oracle_tables: Sequence['outputs.OracleTableResponse'],
schema_name: str):
"""
Oracle schema.
:param Sequence['OracleTableResponse'] oracle_tables: Tables in the schema.
:param str schema_name: Schema name.
"""
pulumi.set(__self__, "oracle_tables", oracle_tables)
pulumi.set(__self__, "schema_name", schema_name)
@property
@pulumi.getter(name="oracleTables")
def oracle_tables(self) -> Sequence['outputs.OracleTableResponse']:
"""
Tables in the schema.
"""
return pulumi.get(self, "oracle_tables")
@property
@pulumi.getter(name="schemaName")
def schema_name(self) -> str:
"""
Schema name.
"""
return pulumi.get(self, "schema_name")
@pulumi.output_type
class OracleSourceConfigResponse(dict):
"""
Oracle data source configuration
"""
def __init__(__self__, *,
allowlist: 'outputs.OracleRdbmsResponse',
rejectlist: 'outputs.OracleRdbmsResponse'):
"""
Oracle data source configuration
:param 'OracleRdbmsResponse' allowlist: Oracle objects to include in the stream.
:param 'OracleRdbmsResponse' rejectlist: Oracle objects to exclude from the stream.
"""
pulumi.set(__self__, "allowlist", allowlist)
pulumi.set(__self__, "rejectlist", rejectlist)
@property
@pulumi.getter
def allowlist(self) -> 'outputs.OracleRdbmsResponse':
"""
Oracle objects to include in the stream.
"""
return pulumi.get(self, "allowlist")
@property
@pulumi.getter
def rejectlist(self) -> 'outputs.OracleRdbmsResponse':
"""
Oracle objects to exclude from the stream.
"""
return pulumi.get(self, "rejectlist")
@pulumi.output_type
class OracleTableResponse(dict):
"""
Oracle table.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "oracleColumns":
suggest = "oracle_columns"
elif key == "tableName":
suggest = "table_name"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in OracleTableResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
OracleTableResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
OracleTableResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
oracle_columns: Sequence['outputs.OracleColumnResponse'],
table_name: str):
"""
Oracle table.
:param Sequence['OracleColumnResponse'] | |
<filename>src/pydap/handlers/dap.py
"""A handler for remote datasets.
DAP handlers convert from different data formats (NetCDF, eg) to the internal
pydap model. The pydap client is just a handler that converts from a remote
dataset to the internal model.
"""
import io
import gzip
import sys
import pprint
import copy
import re
from itertools import chain
# handlers should be set by the application
# http://docs.python.org/2/howto/logging.html#configuring-logging-for-a-library
import logging
import numpy as np
from six.moves.urllib.parse import urlsplit, urlunsplit, quote
from six import text_type, string_types, BytesIO
from pydap.model import (BaseType,
SequenceType, StructureType,
GridType)
from ..net import GET, raise_for_status
from ..lib import (
encode, combine_slices, fix_slice, hyperslab,
START_OF_SEQUENCE, walk, StreamReader, BytesReader,
DEFAULT_TIMEOUT, DAP2_ARRAY_LENGTH_NUMPY_TYPE)
from .lib import ConstraintExpression, BaseHandler, IterData
from ..parsers.dds import build_dataset
from ..parsers.das import parse_das, add_attributes
from ..parsers import parse_ce
from ..responses.dods import DAP2_response_dtypemap
logger = logging.getLogger('pydap')
logger.addHandler(logging.NullHandler())
BLOCKSIZE = 512
class DAPHandler(BaseHandler):
"""Build a dataset from a DAP base URL."""
def __init__(self, url, application=None, session=None, output_grid=True,
timeout=DEFAULT_TIMEOUT, verify=True, user_charset='ascii'):
# download DDS/DAS
scheme, netloc, path, query, fragment = urlsplit(url)
ddsurl = urlunsplit((scheme, netloc, path + '.dds', query, fragment))
r = GET(ddsurl, application, session, timeout=timeout,
verify=verify)
raise_for_status(r)
dds = safe_charset_text(r, user_charset)
dasurl = urlunsplit((scheme, netloc, path + '.das', query, fragment))
r = GET(dasurl, application, session, timeout=timeout,
verify=verify)
raise_for_status(r)
das = safe_charset_text(r, user_charset)
# build the dataset from the DDS and add attributes from the DAS
self.dataset = build_dataset(dds)
add_attributes(self.dataset, parse_das(das))
# remove any projection from the url, leaving selections
projection, selection = parse_ce(query)
url = urlunsplit((scheme, netloc, path, '&'.join(selection), fragment))
# now add data proxies
for var in walk(self.dataset, BaseType):
var.data = BaseProxy(url, var.id, var.dtype, var.shape,
application=application,
session=session)
for var in walk(self.dataset, SequenceType):
template = copy.copy(var)
var.data = SequenceProxy(url, template, application=application,
session=session)
# apply projections
for var in projection:
target = self.dataset
while var:
token, index = var.pop(0)
target = target[token]
if isinstance(target, BaseType):
target.data.slice = fix_slice(index, target.shape)
elif isinstance(target, GridType):
index = fix_slice(index, target.array.shape)
target.array.data.slice = index
for s, child in zip(index, target.maps):
target[child].data.slice = (s,)
elif isinstance(target, SequenceType):
target.data.slice = index
# retrieve only main variable for grid types:
for var in walk(self.dataset, GridType):
var.set_output_grid(output_grid)
def get_charset(r, user_charset):
charset = r.charset
if not charset:
charset = user_charset
return charset
def safe_charset_text(r, user_charset):
if r.content_encoding == 'gzip':
return (gzip.GzipFile(fileobj=BytesIO(r.body)).read()
.decode(get_charset(r, user_charset)))
else:
r.charset = get_charset(r, user_charset)
return r.text
def safe_dds_and_data(r, user_charset):
if r.content_encoding == 'gzip':
raw = gzip.GzipFile(fileobj=BytesIO(r.body)).read()
else:
raw = r.body
dds, data = raw.split(b'\nData:\n', 1)
return dds.decode(get_charset(r, user_charset)), data
class BaseProxy(object):
"""A proxy for remote base types.
This class behaves like a Numpy array, proxying the data from a base type
on a remote dataset.
"""
def __init__(self, baseurl, id, dtype, shape, slice_=None,
application=None, session=None, timeout=DEFAULT_TIMEOUT,
verify=True, user_charset='ascii'):
self.baseurl = baseurl
self.id = id
self.dtype = dtype
self.shape = shape
self.slice = slice_ or tuple(slice(None) for s in self.shape)
self.application = application
self.session = session
self.timeout = timeout
self.verify = verify
self.user_charset = user_charset
def __repr__(self):
return 'BaseProxy(%s)' % ', '.join(
map(repr, [
self.baseurl, self.id, self.dtype, self.shape, self.slice]))
def __getitem__(self, index, user_charset):
# build download url
index = combine_slices(self.slice, fix_slice(index, self.shape))
scheme, netloc, path, query, fragment = urlsplit(self.baseurl)
url = urlunsplit((
scheme, netloc, path + '.dods',
quote(self.id) + hyperslab(index) + '&' + query,
fragment)).rstrip('&')
# download and unpack data
logger.info("Fetching URL: %s" % url)
r = GET(url, self.application, self.session, timeout=self.timeout,
verify=self.verify)
raise_for_status(r)
dds, data = safe_dds_and_data(r, user_charset)
# Parse received dataset:
dataset = build_dataset(dds)
dataset.data = unpack_data(BytesReader(data), dataset)
return dataset[self.id].data
def __len__(self):
return self.shape[0]
def __iter__(self):
return iter(self[:])
# Comparisons return a boolean array
def __eq__(self, other):
return self[:] == other
def __ne__(self, other):
return self[:] != other
def __ge__(self, other):
return self[:] >= other
def __le__(self, other):
return self[:] <= other
def __gt__(self, other):
return self[:] > other
def __lt__(self, other):
return self[:] < other
class SequenceProxy(object):
"""A proxy for remote sequences.
This class behaves like a Numpy structured array, proxying the data from a
sequence on a remote dataset. The data is streamed from the dataset,
meaning it can be treated one record at a time before the whole data is
downloaded.
"""
shape = ()
def __init__(self, baseurl, template, selection=None, slice_=None,
application=None, session=None, timeout=DEFAULT_TIMEOUT,
verify=True):
self.baseurl = baseurl
self.template = template
self.selection = selection or []
self.slice = slice_ or (slice(None),)
self.application = application
self.session = session
self.timeout = timeout
self.verify = verify
# this variable is true when only a subset of the children are selected
self.sub_children = False
@property
def dtype(self):
return self.template.dtype
def __repr__(self):
return 'SequenceProxy(%s)' % ', '.join(
map(repr, [
self.baseurl, self.template, self.selection, self.slice]))
def __copy__(self):
"""Return a lightweight copy of the object."""
return self.__class__(self.baseurl, self.template, self.selection[:],
self.slice[:], self.application)
def __getitem__(self, key):
"""Return a new object representing a subset of the data."""
out = copy.copy(self)
# return the data for a children
if isinstance(key, string_types):
out.template = out.template[key]
# return a new object with requested columns
elif isinstance(key, list):
out.sub_children = True
out.template._visible_keys = key
# return a copy with the added constraints
elif isinstance(key, ConstraintExpression):
out.selection.extend(str(key).split('&'))
# slice data
else:
if isinstance(key, int):
key = slice(key, key+1)
out.slice = combine_slices(self.slice, (key,))
return out
@property
def url(self):
"""Return url from where data is fetched."""
scheme, netloc, path, query, fragment = urlsplit(self.baseurl)
url = urlunsplit((
scheme, netloc, path + '.dods',
self.id + hyperslab(self.slice) + '&' +
'&'.join(self.selection), fragment)).rstrip('&')
return url
@property
def id(self):
"""Return the id of this sequence."""
if self.sub_children:
id_ = ','.join(
quote(child.id) for child in self.template.children())
else:
id_ = quote(self.template.id)
return id_
def __iter__(self):
# download and unpack data
r = GET(self.url, self.application, self.session, timeout=self.timeout,
verify=self.verify)
raise_for_status(r)
i = r.app_iter
if not hasattr(i, '__next__'):
i = iter(i)
# Fast forward past the DDS header
# the pattern could span chunk boundaries though so make sure to check
pattern = b'Data:\n'
last_chunk = find_pattern_in_string_iter(pattern, i)
if last_chunk is None:
raise ValueError("Could not find data segment in response from {}"
.format(self.url))
# Then construct a stream consisting of everything from
# 'Data:\n' to the end of the chunk + the rest of the stream
def stream_start():
yield last_chunk
stream = StreamReader(chain(stream_start(), i))
return unpack_sequence(stream, self.template)
def __eq__(self, other):
return ConstraintExpression('%s=%s' % (self.id, encode(other)))
def __ne__(self, other):
return ConstraintExpression('%s!=%s' % (self.id, encode(other)))
def __ge__(self, other):
return ConstraintExpression('%s>=%s' % (self.id, encode(other)))
def __le__(self, other):
return ConstraintExpression('%s<=%s' % (self.id, encode(other)))
def __gt__(self, other):
return ConstraintExpression('%s>%s' % (self.id, encode(other)))
def __lt__(self, other):
return ConstraintExpression('%s<%s' % (self.id, encode(other)))
def unpack_sequence(stream, template):
"""Unpack data from a sequence, yielding records."""
# is this a sequence or a base type?
sequence = isinstance(template, SequenceType)
# if there are no children, we use the template as the only column
cols = list(template.children()) or [template]
# if there are no strings and no nested sequences we can unpack record by
# record easily
simple = all(isinstance(c, BaseType) and c.dtype.char not in "SU"
for c in cols)
if simple:
dtype = np.dtype([("", c.dtype, c.shape) for c in cols])
marker = stream.read(4)
while marker == START_OF_SEQUENCE:
rec = np.frombuffer(stream.read(dtype.itemsize), dtype=dtype)[0]
if not sequence:
rec = rec[0]
yield rec
marker = stream.read(4)
else:
marker = stream.read(4)
while marker == START_OF_SEQUENCE:
rec = unpack_children(stream, template)
if not sequence:
rec = rec[0]
else:
rec = tuple(rec)
yield rec
marker = stream.read(4)
def unpack_children(stream, template):
"""Unpack children from a structure, returning their data."""
cols = list(template.children()) or [template]
out = []
for col in cols:
# sequences and other structures
if isinstance(col, SequenceType):
out.append(IterData(list(unpack_sequence(stream, col)), col))
elif isinstance(col, StructureType):
out.append(tuple(unpack_children(stream, col)))
# unpack arrays
else:
out.extend(convert_stream_to_list(stream, col.dtype, col.shape,
col.id))
return out
def convert_stream_to_list(stream, parser_dtype, shape, id):
out = []
response_dtype = DAP2_response_dtypemap(parser_dtype)
if shape:
n = np.frombuffer(stream.read(4), DAP2_ARRAY_LENGTH_NUMPY_TYPE)[0]
count = response_dtype.itemsize * n
if response_dtype.char in 'S':
# Consider on 'S' and not 'SU' because
# response_dtype.char should never be
data = []
for _ in range(n):
k = np.frombuffer(stream.read(4),
DAP2_ARRAY_LENGTH_NUMPY_TYPE)[0]
data.append(stream.read(k))
stream.read(-k % 4)
out.append(np.array([text_type(x.decode('ascii'))
for x in data], 'S').reshape(shape))
else:
stream.read(4) # read additional length
try:
out.append(
np.frombuffer(
stream.read(count), response_dtype)
.astype(parser_dtype).reshape(shape))
except ValueError as e:
if str(e) == 'total size of new array must be unchanged':
# server-side failure.
# it is expected that the | |
np.argsort(cat['MAG_AUTO'][use])
if verbose:
print('# {0:6s} {1:12s} {2:12s} {3:7s} {4} {5}'.format('id', 'ra',
'dec', 'mag',
'nDQ', 'nSat'))
for line in cat[use][so]:
rd = line['X_WORLD'], line['Y_WORLD']
nset = []
nsat = []
for i in range(N):
xi, yi = wcs[i].all_world2pix([rd[0],], [rd[1],], 0)
r = np.sqrt((xp-xi[0])**2 + (yp-yi[0])**2)
unset = (r <= 3) & ((images[i]['DQ'].data & 4096) > 0)
nset.append(unset.sum())
if nset[i] > 0:
images[i]['DQ'].data[unset] -= 4096
# Fill saturated with EPSF fit
satpix = (r <= 10) & (((images[i]['DQ'].data & 256) > 0) | ((images[i]['DQ'].data & 2048) > 0))
nsat.append(satpix.sum())
if nsat[i] > 0:
xpi = int(np.round(xi[0]))
ypi = int(np.round(yi[0]))
slx = slice(xpi-cutout_size, xpi+cutout_size)
sly = slice(ypi-cutout_size, ypi+cutout_size)
sci = images[i]['SCI'].data[sly, slx]
dq = images[i]['DQ'].data[sly, slx]
dqm = dq - (dq & 2048)
err = images[i]['ERR'].data[sly, slx]
mask = satpix[sly, slx]
ivar = 1/err**2
ivar[(~np.isfinite(ivar)) | (dqm > 0)] = 0
# Fit the EPSF model
try:
psf_filter = images[0][0].header['FILTER']
Np = 15
guess = [cutout_size-1, cutout_size-1]
#guess = None
tol = 1.e-3
psf_params = EPSF.fit_ePSF(sci, ivar=ivar, center=None,
tol=tol, N=Np,
origin=(ypi-cutout_size, xpi-cutout_size),
filter=psf_filter, get_extended=True,
method='Powell', only_centering=True,
guess=guess, psf_params=None)
result = EPSF.fit_ePSF(sci, ivar=ivar, center=None,
tol=tol, N=Np,
origin=(ypi-cutout_size, xpi-cutout_size),
filter=psf_filter, get_extended=True,
method='Powell', only_centering=True,
guess=guess, psf_params=psf_params)
psf, psf_bkg, psfA, psf_coeffs = result
# psf = EPSF.get_ePSF(psf_params,
# origin=(ypi-cutout_size, xpi-cutout_size),
# shape=sci.shape, filter=psf_filter,
# get_extended=True)
# if i == 0:
# break
except:
continue
sci[mask] = psf[mask]
dq[mask] -= (dq[mask] & 2048)
#dq[mask] -= (dq[mask] & 256)
#dq[mask] |= 512
if verbose:
print('{0:6d} {1:12.6f} {2:12.6f} {3:7.2f} {4} {5}'.format(
line['NUMBER'], rd[0], rd[1], line['MAG_AUTO'], nset, nsat))
# Overwrite image
for i in range(N):
images[i].flush()
if drizzle:
files = [flt.filename() for flt in images]
bits = 576
if root.startswith('par'):
pixfrac=1.0
else:
pixfrac=0.8
# Fix Nans:
for flt_file in files:
utils.fix_flt_nan(flt_file, bad_bit=4096, verbose=True)
AstroDrizzle(files, output=root,
clean=True, final_pixfrac=pixfrac, context=False,
resetbits=0, final_bits=bits, driz_sep_bits=bits,
preserve=False, driz_separate=False,
driz_sep_wcs=False, median=False, blot=False,
driz_cr=False, driz_cr_corr=False, build=False,
final_wht_type='IVM')
clean_drizzle(root)
#cat = make_drz_catalog(root=root)
cat = make_SEP_catalog(root=root)
def find_single_image_CRs(visit, simple_mask=False, with_ctx_mask=True,
run_lacosmic=True):
"""Use LACosmic to find CRs in parts of an ACS mosaic where only one
exposure was available
Paramters
---------
visit : dict
List of visit information from `~grizli.utils.parse_flt_files`.
simple_mask : bool
If true, set 1024 CR bit for all parts of a given FLT where it does
not overlap with any others in the visit. If False, then run
LACosmic to flag CRs in this area but keep the pixels.
run_lacosmic : bool
Run LA Cosmic.
Requires context (CTX) image `visit['product']+'_drc_ctx.fits`.
"""
from drizzlepac import astrodrizzle
try:
import lacosmicx
has_lacosmicx = True
except:
if run_lacosmic:
print('Warning (find_single_image_CRs): couldn\'t import lacosmicx')
utils.log_exception(utils.LOGFILE, traceback)
utils.log_comment(utils.LOGFILE, "# ! LACosmicx requested but not found")
has_lacosmicx = False
# try:
# import reproject
# HAS_REPROJECT = True
# except:
# HAS_REPROJECT = False
HAS_REPROJECT = False
ctx_files = glob.glob(visit['product']+'_dr?_ctx.fits')
has_ctx = len(ctx_files) > 0
if has_ctx:
ctx = pyfits.open(ctx_files[0])
bits = np.log2(ctx[0].data)
mask = ctx[0].data == 0
#single_image = np.cast[np.float32]((np.cast[int](bits) == bits) & (~mask))
single_image = np.cast[np.float]((np.cast[int](bits) == bits) & (~mask))
ctx_wcs = pywcs.WCS(ctx[0].header)
ctx_wcs.pscale = utils.get_wcs_pscale(ctx_wcs)
else:
simple_mask = False
with_ctx_mask = False
for file in visit['files']:
flt = pyfits.open(file, mode='update')
### WFPC2
if '_c0' in file:
dq_hdu = pyfits.open(file.replace('_c0','_c1'), mode='update')
dq_extname = 'SCI'
else:
dq_hdu = flt
dq_extname = 'DQ'
for ext in [1,2,3,4]:
if ('SCI',ext) not in flt:
continue
flt_wcs = pywcs.WCS(flt['SCI',ext].header, fobj=flt, relax=True)
flt_wcs.pscale = utils.get_wcs_pscale(flt_wcs)
if has_ctx:
blotted = utils.blot_nearest_exact(single_image, ctx_wcs,
flt_wcs)
ctx_mask = blotted > 0
else:
ctx_mask = np.zeros(flt['SCI',ext].data.shape, dtype=bool)
sci = flt['SCI',ext].data
dq = dq_hdu[dq_extname,ext].data
if simple_mask:
print('{0}: Mask image without overlaps, extension {1:d}'.format(file, ext))
dq[ctx_mask] |= 1024
else:
print('{0}: Clean CRs with LACosmic, extension {1:d}'.format(file, ext))
if with_ctx_mask:
inmask = blotted == 0
else:
inmask = dq > 0
if run_lacosmic & has_lacosmicx:
crmask, clean = lacosmicx.lacosmicx(sci, inmask=inmask,
sigclip=4.5, sigfrac=0.3, objlim=5.0, gain=1.0,
readnoise=6.5, satlevel=65536.0, pssl=0.0,
niter=4, sepmed=True, cleantype='meanmask',
fsmode='median', psfmodel='gauss',
psffwhm=2.5,psfsize=7, psfk=None, psfbeta=4.765,
verbose=False)
else:
crmask = ctx_mask
if with_ctx_mask:
dq[crmask & ctx_mask] |= 1024
else:
dq[crmask] |= 1024
#sci[crmask & ctx_mask] = 0
flt.flush()
def drizzle_overlaps(exposure_groups, parse_visits=False, check_overlaps=True, max_files=999, pixfrac=0.8, scale=0.06, skysub=True, skymethod='localmin', skyuser='MDRIZSKY', bits=None, build=False, final_wcs=True, final_rot=0, final_outnx=None, final_outny=None, final_ra=None, final_dec=None, final_wht_type='EXP', final_wt_scl='exptime', final_kernel='square', context=False, static=True, use_group_footprint=False, fetch_flats=True, fix_wcs_system=False, include_saturated=False, run_driz_cr=False, driz_cr_snr=None, driz_cr_scale=None, resetbits=0, log=False):
"""Combine overlapping visits into single output mosaics
Parameters
----------
exposure_groups : list
Output list of visit information from `~grizli.utils.parse_flt_files`.
parse_visits : bool
If set, parse the `exposure_groups` list for overlaps with
`~grizli.utils.parse_visit_overlaps`, otherwise assume that it has
already been parsed.
check_overlaps: bool
Only pass exposures that overlap with the desired output mosaic to
AstroDrizzle.
max_files : bool
Split output products if the number of exposures in a group is greater
than `max_files`. Default value of 999 appropriate for AstroDrizzle,
which crashes because it tries to create a header keyword with only
three digits (i.e., 0-999).
pixfrac : float
`~drizzlepac.astrodrizzle.AstroDrizzle` "pixfrac" value.
scale : type
`~drizzlepac.astrodrizzle.AstroDrizzle` "scale" value, output pixel
scale in `~astropy.units.arcsec`.
skysub : bool
Run `~drizzlepac.astrodrizzle.AstroDrizzle` sky subtraction.
bits : None or int
Data quality bits to treat as OK. If None, then default to 64+32 for
ACS and 512+64 for WFC3/IR.
final_* : Parameters passed through to AstroDrizzle to define output WCS
Note that these are overridden if an exposure group has a 'reference'
keyword pointing to a reference image / WCS.
Returns
-------
Produces drizzled images.
"""
if log:
frame = inspect.currentframe()
utils.log_function_arguments(utils.LOGFILE, frame,
'prep.drizzle_overlaps')
from drizzlepac.astrodrizzle import AstroDrizzle
from shapely.geometry import Polygon
if parse_visits:
exposure_groups = utils.parse_visit_overlaps(exposure_groups, buffer=15.)
## Drizzle can only handle 999 files at a time
if check_overlaps:
for group in exposure_groups:
if 'reference' not in group:
continue
if 'footprints' in group:
footprints = group['footprints']
elif ('footprint' in group) & use_group_footprint:
footprints = [group['footprint']]*len(group['files'])
else:
footprints = []
files=group['files']
for i in range(len(files)):
print(i, files[i])
im = pyfits.open(files[i])
p_i = None
for ext in [1,2,3,4]:
if ('SCI',ext) in im:
wcs = pywcs.WCS(im['SCI',ext], fobj=im)
fp_x = wcs.calc_footprint()
if p_i is None:
p_i = Polygon(fp_x)
else:
p_i = p_i.union(fp_x)
footprints.append()
ref = pyfits.getheader(group['reference'])
wcs = pywcs.WCS(ref)
ref_fp = Polygon(wcs.calc_footprint())
files = []
out_fp = []
if 'awspath' in group:
aws = []
for j in range(len(group['files'])):
olap = ref_fp.intersection(footprints[j])
if olap.area > 0:
files.append(group['files'][j])
if 'awspath' in group:
aws.append(group['awspath'][j])
out_fp.append(footprints[j])
print(group['product'], len(files), len(group['files']))
group['files'] = files
group['footprints'] = out_fp
if 'awspath' in group:
group['awspath'] = aws
# Download the file from aws. The 'awspath' entry
# is a list with the same length of 'files', and starts with
# the bucket name.
if 'awspath' in group:
import boto3
session = boto3.Session()
s3 = boto3.resource('s3')
bkt = None
for awspath, file in zip(group['awspath'], group['files']):
if os.path.exists(file):
continue
spl = awspath.split('/')
bucket_name=spl[0]
path_to_file = '/'.join(spl[1:])
if bkt is None:
bkt = s3.Bucket(bucket_name)
else:
if bkt.name != bucket_name:
bkt = s3.Bucket(bucket_name)
s3_file = (path_to_file+'/'+file).replace('//','/')
print('Fetch from s3: s3://{0}/{1}'.format(bucket_name, s3_file))
bkt.download_file(s3_file, file,
ExtraArgs={"RequestPayer": "requester"})
if max_files > 0:
all_groups = []
for group in exposure_groups:
N = len(group['files']) // int(max_files) +1
if N == 1:
all_groups.append(group)
else:
for k in range(N):
sli = slice(k*max_files,(k+1)*max_files)
files_list = group['files'][sli]
root='{0}-{1:03d}'.format(group['product'], k)
g_k = OrderedDict(product=root,
files=files_list,
reference=group['reference'])
if 'footprints' in group:
g_k['footprints'] = group['footprints'][sli]
all_groups.append(g_k)
else:
all_groups = exposure_groups
for group in all_groups:
if len(group['files']) == 0:
continue
isACS = '_flc' in group['files'][0]
isWFPC2 = '_c0' in group['files'][0]
if (driz_cr_snr is None) | (driz_cr_scale is None):
if isACS:
driz_cr_snr = '3.5 3.0'
driz_cr_scale = '1.2 0.7'
elif | |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import statsmodels.api as sm
import datetime as dt
from statsmodels.stats.multitest import fdrcorrection
from pylab import savefig
# FUNCTIONS YOU CAN USE:
# analyses(filepath) spits out a nifty heatmap to let you check correlation between variables
#
# regress(option, df) churns out a saucy graph of the linear regression for the variables you provided, where
# option is 'snr_total' or 'tsnr', whichever you want to make the dependent variable of your model
# df is the pandas DataFrame containing your data. To modify which variables you want in your model, you'll
# have to directly modify the regress function
# NOTABLE FILENAMES
# ../data/extractions/p2_BOLD.csv - all dates for p2_BOLD
# ../data/extractions/p2Xs4X35mm_BOLD.csv - all dates for p2Xs4X35mm_BOLD
# ../data/extractions/anat.csv - all possible dates for anatomical data
def filter(option, df):
is_p2 = df['Filetype'] == "task-rest_acq-p2_bold.json"
is_x = df['Filetype'] == "task-rest_acq-p2Xs4X35mm_bold.json"
if option == 'x':
return df[is_x]
elif option == 'p2':
return df[is_p2]
def analyses(filepath):
files = pd.read_csv(filepath)
# FIRST CHECK: CONVERSION SOFTWARE VERSIONS
check = files.iloc[0, 7]
valid = True
for i in files.index:
if check != files.iloc[i, 7]:
valid = False
print("All Conversion Softwares are the same: " + str(valid))
# SECOND CHECK: HEATMAP
figure = sns.heatmap(files.corr(), cmap=sns.diverging_palette(h_neg=240, h_pos=10, n=9, sep=1, center="dark"), center=0)
figure
save = figure.get_figure()
save.savefig('heatmap.svg', pad_inches = 0.1)
def add_seasonal_simple(df, col='Date', start='2017-01-01'):
# Add a very simplistic seasonal regressors as cos and sin since some date in a year
time_delta = df[col] - np.datetime64(start)
time_delta_rad = time_delta.apply(lambda d: d.days) * 2 * np.pi / 365.25
df['Seasonal (sin)'] = np.sin(time_delta_rad)
df['Seasonal (cos)'] = np.cos(time_delta_rad)
def Ftest(model, var_prefix, queue, prints=False):
var_columns = [c for c in model.params.index if c.startswith(var_prefix)]
if var_columns:
f_test = model.f_test(' = '.join(var_columns) + " = 0")
if f_test.pvalue < 0.05:
if var_prefix == "Shim":
for i in range(8):
queue.append("Shim" + str(i+1))
elif var_prefix == "IOPD":
for i in range(6):
queue.append("IOPD" + str(i+1))
if prints:
print("%s F-test: %s" % (var_prefix, f_test))
return f_test
else:
if prints:
print("No %s variables in the model" % var_prefix)
return None
# copy pasted from nipy function, renamed from _orthogonalize
def orthogonalize(X):
""" Orthogonalize every column of design `X` w.r.t preceding columns
Parameters
----------
X: array of shape(n, p), the data to be orthogonalized
Returns
-------
X: after orthogonalization
Notes
-----
X is changed in place. the columns are not normalized
"""
if X.size == X.shape[0]:
return X
for i in range(1, X.shape[1]):
X[:, i] -= np.dot(X[:, i], np.dot(X[:, :i], np.linalg.pinv(X[:, :i])))
return X
def regress(target_variable, model_df, plot=True, print_summary=True, add_qa=True, add_seasonal=True, real_data=False):
"""
creates a regression graph plotted against actual data from certain QA metrics
Parameters
----------
target_variable: takes str value of either snr_total or tsnr to model against
model_df : takes pandas DataFrame with data to be used for predictive modeling
plot : boolean to turn the plotted graph on/off
print_summary : boolean to turn the printed summary of OLS regression on/off
add_qa : boolean to add/not add snr_total_qa into list of variables to be modeled
add_seasonal : boolean to add/not add seasonal variables into list of variables to be modeled
real_data : boolean to indicate whether or not the pandas DataFrame being fed in is from real data or not
"""
if type(model_df) is not pd.core.frame.DataFrame:
return "DataFrame must be of type pandas.core.frame.DataFrame"
########## adding seasonal curves to the model
add_seasonal_simple(model_df)
########## Converting date to a format that can be parsed by statsmodels API
model_df = model_df.copy()
date_df = model_df['Date']
model_df['Date'] = pd.to_datetime(model_df['Date'], format="%Y%m%d")
model_df['Date'] = model_df['Date'].map(lambda x: x.toordinal())
f_tests_todo = ['IOPD']
excluded_cols = ['Date', 'IOPD1', 'IOPD2', 'IOPD3', 'IOPD4', 'IOPD5', 'IOPD6', 'Seasonal (sin)', 'Seasonal (cos)']
seasonal_cols = ['Seasonal (sin)', 'Seasonal (cos)',]
cols = ['Date']
if not real_data:
# preparing model_df for orthogonalization
cols += ['AcquisitionTime', 'SAR', 'TxRefAmp',
'IOPD1', 'IOPD2', 'IOPD3', 'IOPD4', 'IOPD5', 'IOPD6']
if add_seasonal:
cols += seasonal_cols
else:
cols += ['age', 'sex_male', 'PatientWeight',]
if add_seasonal:
cols += seasonal_cols
if add_qa:
cols += ['snr_total_qa']
cols += ['IOPD1_real', 'IOPD2_real', 'IOPD3_real', 'IOPD4_real', 'IOPD5_real', 'IOPD6_real']
if add_seasonal:
f_tests_todo += ['Seasonal']
cols.append(target_variable)
model_df = model_df[cols]
# There is apparently a sample date (20170626) with SAR being unknown None/NaN
# For now we will just filter out those samples
if 'SAR' in model_df.columns:
finite_SAR = np.isfinite(model_df['SAR'])
if not np.all(finite_SAR):
print("Following dates didn't have SAR, excluding them: %s" % str(model_df['Date'][~finite_SAR]))
model_df = model_df[finite_SAR]
orthogonalized_df = model_df.drop(target_variable, axis=1) # avoid orthogonalizing target variable
cols = cols[:-1] # remove target variable from column list
# orthogonalize dataframe after its conversion to NumPy array, then convert back and replace in original model_df
model_array = orthogonalize(orthogonalized_df.to_numpy())
orthogonalized_df = pd.DataFrame(model_array)
orthogonalized_df.columns = [cols]
orthogonalized_df[target_variable] = pd.Series(model_df[target_variable])
model_df = orthogonalized_df
# add datetime64[ns] formatted date time
model_df.columns=[x[0] for x in model_df.columns]
model_df['Date'] = pd.to_datetime(model_df['Date'])
model_df = model_df.drop('Date', axis=1)
model_df['Date'] = date_df
########## Assigning independent and dependent variables
model_vars = []
for item in model_df.std().iteritems():
if item[0] != 'Date' and item[0] != target_variable:
model_vars.append(item[0])
X = model_df[model_vars]
y = model_df[target_variable]
X = X.sub(X.mean())
X = sm.add_constant(X)
model_df = sm.add_constant(model_df)
########## modeling predictions
model = sm.OLS(y, X).fit()
predictions = model.predict(X)
################ CODE FOR TESTING INDIVIDUAL VARIABLE EFFECTS ####################
significant_variables = []
F_tests_pvals = {
v: float(Ftest(model, v, significant_variables).pvalue)
for v in f_tests_todo
}
# get p-values
for key, value in dict(model.pvalues).items():
if key not in significant_variables and value < 0.05 or key.lower() == 'const':
# identify statistically insignificant variables in df
significant_variables.append(key)
######## set statistically insignificant variables to 0, then predict
partial_fits = {} # partial_fits = {}
for variable in significant_variables:
X2 = X.copy(True) # prepare for mods
for col in X2:
if col != variable:
X2[col] = 0
partial_fits[str(variable)] = model.predict(X2)
if print_summary:
print("Statistically significant variables: " + str(significant_variables))
################ END CODE FOR TESTING INDIVIDUAL VARIABLE EFFECTS ####################
# Functionality for carrying out FDR correction
outvars = {} # dict containing all predictive variables and their p values from the model
for var in cols:
is_f_test = False
for f_test in f_tests_todo:
if var.startswith(f_test):
is_f_test = True
break
if is_f_test:
continue
if var not in excluded_cols:
var_pvalue = getattr(model.pvalues, var)
outvars[var] = var_pvalue
outvars.update(F_tests_pvals) # add previously conducted F test p values to the outvars
FDR_tuple = fdrcorrection(list(outvars.values())) # actual FDR test conduct
t_f = list(FDR_tuple[0]) # split tuple into true/false array
FDR_pvals = list(FDR_tuple[1]) # split tuple into p value array
print("FDR-corrected p-values:")
for (var, value), fdr_pval, is_sign in zip(outvars.items(), FDR_pvals, t_f):
print("%15s | Original p-value: %8.3g" % (var, value) +
" | FDR-corrected p-value: %8.3g%s" % (fdr_pval, '**' if is_sign else ''))
print("\n")
# giving additional data
if print_summary:
print(model.summary())
print("AIC: " + str(model.aic))
print("BIC: " + str(model.bic))
if not plot:
return model
######### converting the above predictions to a format that can be plotted
plot_df = predictions.to_frame() # new DataFrame containing only data needed for the plot
plot_df.columns = ['full fit']
plot_df = plot_df.join(model_df['Date'])
plot_df = plot_df.join(model_df[target_variable])
summation_df = None
for key, value in partial_fits.items():
column = value.to_frame()
column.columns = ['partial fit']
if summation_df is None:
summation_df = column # used to add up the values
else:
summation_df = summation_df.add(column, axis=1)
plot_df = pd.concat([plot_df, summation_df], axis=1)
# plotting the graph
plt.figure(figsize=(15, 6))
ax = sns.lineplot(x="Date", y=target_variable, data=plot_df, color="#000000")
# plotting partial fit
ax_partial = plt.twinx()
sns.lineplot(x="Date", y="full fit", data=plot_df, color="r", ax=ax)
if partial_fits:
sns.lineplot(x="Date", y="partial fit", data=plot_df, color="#ffcccc", ax=ax_partial)
plt.ylim(145, 305)
ax_partial.legend(['partial fit'])
ax.legend(['actual', 'full fit'], loc='upper left')
plt.savefig("test.svg")
return model
def scrape_var_significance(targets, p_var, df):
dummy = [] # dud list for Seasonal f test comparison
columns = ['Variable', p_var + ' p value', 'R2 value']
result = pd.DataFrame(columns = columns)
raw_pvals = []
for target in targets:
input_df = pd.DataFrame(df,columns=['Date', 'sid', 'ses', | |
<reponame>antmicro/pyvidctrl
import curses
import string
from fcntl import ioctl
from v4l2 import *
from .widgets import *
KEY_ESCAPE = "\x1b"
class CtrlWidget(Row):
"""
Base control widget class
Each CtrlWidget is a label with a name of
its control and another label with text
'Not implemented!'. Child CtrlWidgets should
replace that with their specific widget.
"""
show_statusline = False
def __init__(self, device, ctrl):
self.device = device
self.ctrl = ctrl
self.name = ctrl.name.decode("ascii")
self.label = Label(self.name)
self.widget = Label("Not implemented!", align="center")
self._statusline = Label("Statusline")
super().__init__(self.label, Label(""), self.widget, columns=(4, 1, 4))
@staticmethod
def create(device, ctrl):
"""
Creates and returns CtrlWidget depending
on type of the passed ctrl
"""
return {
V4L2_CTRL_TYPE_INTEGER: IntCtrl,
V4L2_CTRL_TYPE_BOOLEAN: BoolCtrl,
V4L2_CTRL_TYPE_MENU: MenuCtrl,
V4L2_CTRL_TYPE_BUTTON: ButtonCtrl,
V4L2_CTRL_TYPE_INTEGER64: Int64Ctrl,
V4L2_CTRL_TYPE_CTRL_CLASS: CtrlClassCtrl,
V4L2_CTRL_TYPE_STRING: StringCtrl,
V4L2_CTRL_TYPE_BITMASK: BitmaskCtrl,
V4L2_CTRL_TYPE_INTEGER_MENU: IntMenuCtrl,
}[ctrl.type](device, ctrl)
@property
def value(self):
gctrl = v4l2_control()
gctrl.id = self.ctrl.id
try:
ioctl(self.device, VIDIOC_G_CTRL, gctrl)
except OSError:
return None
return gctrl.value
@value.setter
def value(self, value):
sctrl = v4l2_control()
sctrl.id = self.ctrl.id
sctrl.value = value
try:
ioctl(self.device, VIDIOC_S_CTRL, sctrl)
except OSError:
# can fail as some controls can be read-only
# both explicitly (by setting flag) or implicitly
return
def update(self):
"""
Updates child widgets with its value
Also re-query entire control to update flags
"""
ctrl = v4l2_query_ext_ctrl()
ctrl.id = self.ctrl.id
ioctl(self.device, VIDIOC_QUERY_EXT_CTRL, ctrl)
self.ctrl = ctrl
v = self.value
for w in self.widgets:
w.value = v
def get_flags_str(self):
flags = self.ctrl.flags
ret = []
if flags & V4L2_CTRL_FLAG_DISABLED:
ret.append("disabled")
if flags & V4L2_CTRL_FLAG_GRABBED:
ret.append("grabbed")
if flags & V4L2_CTRL_FLAG_READ_ONLY:
ret.append("read only")
if flags & V4L2_CTRL_FLAG_UPDATE:
ret.append("update")
if flags & V4L2_CTRL_FLAG_INACTIVE:
ret.append("inactive")
if flags & V4L2_CTRL_FLAG_SLIDER:
ret.append("slider")
if flags & V4L2_CTRL_FLAG_WRITE_ONLY:
ret.append("write only")
if flags & V4L2_CTRL_FLAG_VOLATILE:
ret.append("volatile")
if flags & V4L2_CTRL_FLAG_HAS_PAYLOAD:
ret.append("has payload")
if flags & V4L2_CTRL_FLAG_EXECUTE_ON_WRITE:
ret.append("execute on write")
if flags & V4L2_CTRL_FLAG_MODIFY_LAYOUT:
ret.append("modify layout")
return ", ".join(ret)
@property
def statusline(self):
return self._statusline
def toggle_statusline(self):
CtrlWidget.show_statusline = not CtrlWidget.show_statusline
def draw_statusline(self, window):
_, w = window.getmaxyx()
self.statusline.draw(window, w, 1, 0, 0,
curses.color_pair(3) | curses.A_REVERSE)
def draw(self, window, w, h, x, y, color):
"""Updates itself and then draws"""
self.update()
super().draw(window, w, h, x, y, color)
class IntCtrl(CtrlWidget):
"""
Integer type control widget
Uses LabeledBar to display its value
"""
def __init__(self, device, ctrl):
super().__init__(device, ctrl)
self.bar = BarLabeled(ctrl.minimum, ctrl.maximum, self.value)
self.widgets[2] = self.bar
def change_step(self, x):
self.set_value(self.value + x * self.ctrl.step)
def change_percent(self, x):
one_percent = (round((self.ctrl.maximum - self.ctrl.minimum) / 100)
or self.ctrl.step)
self.set_value(self.value + x * one_percent)
def set_value(self, value):
if value < self.ctrl.minimum:
self.value = self.ctrl.minimum
elif self.ctrl.maximum < value:
self.value = self.ctrl.maximum
else:
self.value = int(value)
@property
def statusline(self):
minimum = self.ctrl.minimum
maximum = self.ctrl.maximum
step = self.ctrl.step
default = self.ctrl.default
value = self.value
flags = self.get_flags_str()
return Label(", ".join((
"type=Integer",
f"{minimum=}",
f"{maximum=}",
f"{step=}",
f"{default=}",
f"{value=}",
f"{flags=}",
)))
class BoolCtrl(CtrlWidget):
"""
Boolean type control widget
Uses TrueFalse to display its value
"""
def __init__(self, device, ctrl):
super().__init__(device, ctrl)
self.widgets[2] = TrueFalse(self.value)
def true(self):
self.value = True
def false(self):
self.value = False
def neg(self):
self.value = not self.value
@property
def statusline(self):
default = self.ctrl.default
value = self.value
flags = self.get_flags_str()
return Label(", ".join((
"type=Boolean",
f"{default=}",
f"{value=}",
f"{flags=}",
)))
class MenuCtrl(CtrlWidget):
"""
Menu type control widget
Uses Menu to display its value
"""
def __init__(self, device, ctrl):
super().__init__(device, ctrl)
querymenu = v4l2_querymenu()
querymenu.id = ctrl.id
options = {}
for i in range(ctrl.minimum, ctrl.maximum + 1):
querymenu.index = i
try:
ioctl(device, VIDIOC_QUERYMENU, querymenu)
options[i] = querymenu.name.decode("ascii")
except OSError:
# querymenu can fail for given index, but there can
# still be more valid indexes
pass
self.menu = Menu(options)
self.widgets[2] = self.menu
def next(self):
"""Selects next option"""
self.menu.next()
self.value = self.menu.value
def prev(self):
"""Selects previous option"""
self.menu.prev()
self.value = self.menu.value
@property
def statusline(self):
minimum = self.ctrl.minimum
maximum = self.ctrl.maximum
default = self.ctrl.default
value = self.value
flags = self.get_flags_str()
return Label(", ".join((
"type=Menu",
f"{minimum=}",
f"{maximum=}",
f"{default=}",
f"{value=}",
f"{flags=}",
)))
class ButtonCtrl(CtrlWidget):
"""
Button type control widget
Uses Button with 'Click me' text
"""
def __init__(self, device, ctrl):
super().__init__(device, ctrl)
self.widgets[2] = Button("Click me")
def click(self):
"""
Button type controls need to set its
value to 1, and after a while they reset
themselves to 0
"""
self.value = 1
@property
def value(self):
return 0
@value.setter
def value(self, value):
"""
Same as default, but needs to be here, as
property method is reimplemented
"""
sctrl = v4l2_control()
sctrl.id = self.ctrl.id
sctrl.value = value
try:
ioctl(self.device, VIDIOC_S_CTRL, sctrl)
except OSError:
return
@property
def statusline(self):
flags = self.get_flags_str()
return Label(f"type=Button, {flags=}")
class Int64Ctrl(IntCtrl):
"""
Integer64 type control widget
Same as Integer one, except for statusline
"""
@property
def value(self):
ectrl = v4l2_ext_control()
ectrl.id = self.ctrl.id
ectrls = v4l2_ext_controls()
ectrls.controls = ctypes.pointer(ectrl)
ectrls.count = 1
try:
ioctl(self.device, VIDIOC_G_EXT_CTRLS, ectrls)
except OSError:
return None
return ectrl.value64
@value.setter
def value(self, value):
ectrl = v4l2_ext_control()
ectrl.id = self.ctrl.id
ectrl.value64 = value
ectrls = v4l2_ext_controls()
ectrls.controls = ctypes.pointer(ectrl)
ectrls.count = 1
try:
ioctl(self.device, VIDIOC_S_EXT_CTRLS, ectrls)
except OSError:
# can fail as some controls can be read-only
# both explicitly (by setting flag) or implicitly
return
@property
def statusline(self):
minimum = self.ctrl.minimum
maximum = self.ctrl.maximum
step = self.ctrl.step
default = self.ctrl.default
value = self.value
flags = self.get_flags_str()
return Label(", ".join((
"type=Integer64",
f"{minimum=}",
f"{maximum=}",
f"{step=}",
f"{default=}",
f"{value=}",
f"{flags=}",
)))
class CtrlClassCtrl(CtrlWidget):
"""
Control Class control widget
Removes second widget to show just its name,
as it's just a category name control
"""
def __init__(self, device, ctrl):
super().__init__(device, ctrl)
self.widgets = [Label(self.name, align="center")]
self.columns = (1, )
class StringCtrl(CtrlWidget):
"""
String type control widget
Uses TextField to display its value.
Enter key toggles edit mode and Escape aborts edit mode and
restores previous text.
String type controls use minimum and maximum fields to limit
number of characters stored.
When upper limit is reached, then further keys are ignored
(except Enter and Escape).
When minimum number of characters is not present, then spaces
are appended at the end.
"""
def __init__(self, device, ctrl):
super().__init__(device, ctrl)
self.text_field = TextField(self.value)
self.widgets[2] = self.text_field
@property
def value(self):
ectrl = v4l2_ext_control()
ectrl.id = self.ctrl.id
ectrl.size = self.ctrl.elem_size
ectrl.string = bytes(self.ctrl.maximum + 1)
ectrls = v4l2_ext_controls()
ectrls.controls = ctypes.pointer(ectrl)
ectrls.count = 1
try:
ioctl(self.device, VIDIOC_G_EXT_CTRLS, ectrls)
except OSError:
return None
return ectrl.string.decode("ascii")
@value.setter
def value(self, value):
value = str(value)
if len(value) < self.ctrl.minimum:
value = " " * self.ctrl.minimum
ectrl = v4l2_ext_control()
ectrl.id = self.ctrl.id
ectrl.string = value.encode("ascii")
ectrl.size = self.ctrl.elem_size
ectrls = v4l2_ext_controls()
ectrls.controls = ctypes.pointer(ectrl)
ectrls.count = 1
try:
ioctl(self.device, VIDIOC_S_EXT_CTRLS, ectrls)
except OSError:
# can fail as some controls can be read-only
# both explicitly (by setting flag) or implicitly
return
def on_keypress(self, key):
in_edit = self.text_field.in_edit
if in_edit and key == "\n":
self.text_field.edit()
self.value = self.text_field.buffer
elif in_edit and ord(key) == curses.KEY_BACKSPACE:
self.text_field.buffer = self.text_field.buffer[:-1]
elif in_edit and key == KEY_ESCAPE:
self.text_field.abort()
elif in_edit:
if len(self.text_field.buffer) < self.ctrl.maximum:
self.text_field.buffer += key
elif key == "\n":
self.text_field.edit()
else:
return super().on_keypress(key)
@property
def statusline(self):
minimum = self.ctrl.minimum
maximum = self.ctrl.maximum
default = self.ctrl.default
value = self.value
flags = self.get_flags_str()
return Label(", ".join((
"type=String",
f"{minimum=}",
f"{maximum=}",
f"{default=}",
f"{value=}",
f"{flags=}",
)))
class BitmaskCtrl(CtrlWidget):
"""
Bitmask type control widget
Uses TextField to display its value.
Limits possible characters to valid hex digits.
"""
class BitmaskEditWidget(Widget):
def __init__(self, value=0):
self.value = value
self.in_edit = False
self.selected = 0
def draw(self, window, w, h, x, y, color):
render = (self.buffer if self.in_edit else self.value.to_bytes(
4, "big").hex())
if self.in_edit:
left_w = (w - len(render) + 1) // 2
safe_addstr(window, y, x, " " * left_w, color)
x += left_w
sel = self.selected
safe_addstr(window, y, x, render[:sel], color)
x += sel
safe_addstr(window, y, x, render[sel],
color | curses.A_REVERSE)
x += 1
safe_addstr(window, y, x, render[sel + 1:], color)
x += len(render) - sel - 1
right_w = w - len(render) - left_w
safe_addstr(window, y, x, " " * right_w, color)
else:
safe_addstr(window, y, x, render.center(w), color)
def set(self, char):
sel = self.selected
self.buffer = self.buffer[:sel] + char + self.buffer[sel + 1:]
def next(self):
if self.in_edit:
self.selected = min(self.selected + 1, 7)
def prev(self):
if | |
<filename>stentseg/stentdirect/tests/test_stentgraph.py<gh_stars>1-10
from __future__ import print_function, division, absolute_import
import numpy as np
import networkx as nx
from visvis import ssdf
from stentseg.utils.new_pointset import PointSet
from stentseg.stentdirect.stentgraph import (StentGraph, check_path_integrity,
_get_pairs_of_neighbours, add_nodes_at_crossings,
_detect_corners, _add_corner_to_edge,
_pop_node, pop_nodes,
prune_very_weak, prune_weak,
prune_clusters, prune_redundant, prune_tails,)
class TestStentGraph:
def test_prune_redundant1(self):
""" Test removing redundant edges on a graph with two triangles
that are connected by a single edge.
"""
# Create two triangles that are connected with a single edge
graph = StentGraph()
graph.add_edge(11, 12, cost=1, ctvalue=50)
graph.add_edge(12, 13, cost=3, ctvalue=50)
graph.add_edge(13, 11, cost=2, ctvalue=50)
#
graph.add_edge(21, 22, cost=2, ctvalue=60)
graph.add_edge(22, 23, cost=3, ctvalue=60)
graph.add_edge(23, 21, cost=1, ctvalue=60)
#
graph.add_edge(21, 11, cost=4, ctvalue=10)
assert graph.number_of_nodes() == 6
assert graph.number_of_edges() == 7
prune_redundant(graph, 55)
assert graph.number_of_nodes() == 6
assert graph.number_of_edges() == 6
prune_redundant(graph, 55)
assert graph.number_of_nodes() == 6
assert graph.number_of_edges() == 6
prune_redundant(graph, 65)
assert graph.number_of_nodes() == 6
assert graph.number_of_edges() == 5
prune_tails(graph, 2)
assert graph.number_of_nodes() == 2
assert graph.number_of_edges() == 1
def test_prune_redundant2(self):
""" Test removing redundant edges on a graph with two triangles
that are connected by a two edges, twice.
"""
# Create two triangles that are connected with a single edge
graph = StentGraph()
graph.add_edge(11, 12, cost=1, ctvalue=50)
graph.add_edge(12, 13, cost=3, ctvalue=50)
graph.add_edge(13, 11, cost=2, ctvalue=50)
#
graph.add_edge(21, 22, cost=2, ctvalue=60)
graph.add_edge(22, 23, cost=3, ctvalue=60)
graph.add_edge(23, 21, cost=1, ctvalue=60)
#
graph.add_edge(21, 1, cost=4, ctvalue=10)
graph.add_edge(1, 11, cost=4, ctvalue=10)
#
graph.add_edge(22, 2, cost=4, ctvalue=10)
graph.add_edge(2, 12, cost=4, ctvalue=10)
assert graph.number_of_nodes() == 8
assert graph.number_of_edges() == 10
prune_redundant(graph, 55)
assert graph.number_of_nodes() == 8
assert graph.number_of_edges() == 10-1
prune_redundant(graph, 55)
assert graph.number_of_nodes() == 8
assert graph.number_of_edges() == 10-1
prune_redundant(graph, 65)
assert graph.number_of_nodes() == 8
assert graph.number_of_edges() == 10-2
prune_tails(graph, 2)
assert graph.number_of_nodes() == 8-2
assert graph.number_of_edges() == 10-2-2
def test_prune_tails(self):
graph = StentGraph()
graph.add_edge(1, 2, cost=2, ctvalue=50)
graph.add_edge(2, 3, cost=2, ctvalue=50)
graph.add_edge(3, 1, cost=2, ctvalue=50)
# Tail from 1
graph.add_edge(1, 11, cost=3, ctvalue=50)
graph.add_edge(11, 12, cost=3, ctvalue=50)
graph.add_edge(12, 13, cost=3, ctvalue=50)
graph.add_edge(13, 14, cost=3, ctvalue=50)
# Tail from 2
graph.add_edge(2, 21, cost=3, ctvalue=50)
graph.add_edge(21, 22, cost=3, ctvalue=50)
graph.add_edge(22, 23, cost=3, ctvalue=50)
assert graph.number_of_nodes() == 3+4+3
assert graph.number_of_edges() == 3+4+3
prune_tails(graph, 3)
assert graph.number_of_nodes() == 3+4
assert graph.number_of_edges() == 3+4
prune_tails(graph, 9)
assert graph.number_of_nodes() == 3
assert graph.number_of_edges() == 3
def test_prune_clusters(self):
# Create two small cliques
graph = StentGraph()
graph.add_edge(1, 2, cost=2, ctvalue=50)
graph.add_edge(2, 3, cost=2, ctvalue=50)
graph.add_edge(3, 1, cost=2, ctvalue=50)
#
graph.add_edge(4, 5, cost=2, ctvalue=50)
graph.add_edge(5, 6, cost=2, ctvalue=50)
graph.add_edge(6, 7, cost=2, ctvalue=50)
graph.add_edge(7, 4, cost=2, ctvalue=50)
# Connect them
graph.add_edge(1, 4, cost=3, ctvalue=50)
# Also add loose node
graph.add_nodes_from([101, 102])
# Remove cliques and check that nothing happened
prune_clusters(graph, 4)
assert graph.number_of_edges() == 8
assert graph.number_of_nodes() == 7
# Remove connection
graph.remove_edge(1, 4)
# Remove cliques and check that one clique is removed
prune_clusters(graph, 4)
assert graph.number_of_edges() == 4
assert graph.number_of_nodes() == 4
# Remove cliques and check that one clique is removed
prune_clusters(graph, 5)
assert graph.number_of_edges() == 0
assert graph.number_of_nodes() == 0
def test_very_weak(self):
# Create simple graph
graph = StentGraph()
graph.add_edge(1, 4, ctvalue=50)
graph.add_edge(1, 5, ctvalue=40)
graph.add_edge(1, 2, ctvalue=30)
graph.add_edge(1, 3, ctvalue=20)
# Remove weak edges
th = 35
prune_very_weak(graph, th)
# Check result
assert graph.number_of_edges() == 2
for (n1, n2) in graph.edges_iter():
assert graph[n1][n2]['ctvalue'] > th
def test_weak1(self):
""" 2
/ | \
5 - 1 - 3
\ | /
4
"""
# Test that indeed only weakest are removed
graph = StentGraph()
graph.add_edge(1, 2, cost=2, ctvalue=50)
graph.add_edge(1, 3, cost=3, ctvalue=50) # gets removed
graph.add_edge(1, 4, cost=4, ctvalue=50) # gets removed
graph.add_edge(1, 5, cost=1, ctvalue=50)
#
graph.add_edge(2, 3, cost=1, ctvalue=50)
graph.add_edge(3, 4, cost=1, ctvalue=50)
graph.add_edge(4, 5, cost=1, ctvalue=50)
graph.add_edge(5, 2, cost=1, ctvalue=50)
prune_weak(graph, 2, 80)
# Check result
assert graph.number_of_edges() == 6
for e in graph.edges_iter():
assert e not in [(1, 3), (1, 4)]
def test_weak2(self):
""" 2 5
/ | | \
3 - 1 - 4 - 6
"""
# Test that indeed only weakest are removed
graph = StentGraph()
graph.add_edge(1, 2, cost=2, ctvalue=50)
graph.add_edge(2, 3, cost=2, ctvalue=50)
graph.add_edge(3, 1, cost=2, ctvalue=50)
#
graph.add_edge(4, 5, cost=2, ctvalue=50)
graph.add_edge(5, 6, cost=2, ctvalue=50)
graph.add_edge(6, 4, cost=2, ctvalue=50)
# Connect two subgraphs with weaker connection
graph.add_edge(1, 4, cost=3, ctvalue=50)
# Prune
prune_weak(graph, 2, 80)
# Check result
assert graph.number_of_edges() == 6
for e in graph.edges_iter():
assert e not in [(1, 4)]
# Again, now with lower cost (stronger connection)
graph.add_edge(1, 4, cost=1, ctvalue=50)
# Prune
prune_weak(graph, 2, 80)
# Check result
assert graph.number_of_edges() == 7
# Again, now with high ct value
graph.add_edge(1, 4, cost=3, ctvalue=90)
# Prune
prune_weak(graph, 2, 80)
# Check result
assert graph.number_of_edges() == 7
def test_weak3(self):
""" 2 456
/ | |
3 - 1 - 0 - 789
"""
# Test that indeed only weakest are removed
graph = StentGraph()
graph.add_edge(1, 2, cost=2, ctvalue=50)
graph.add_edge(2, 3, cost=2, ctvalue=50)
graph.add_edge(3, 1, cost=2, ctvalue=50)
#
graph.add_edge(4, 5, cost=2, ctvalue=50)
graph.add_edge(5, 6, cost=2, ctvalue=50)
graph.add_edge(6, 4, cost=2, ctvalue=50)
#
graph.add_edge(7, 8, cost=2, ctvalue=50)
graph.add_edge(8, 9, cost=2, ctvalue=50)
graph.add_edge(9, 7, cost=2, ctvalue=50)
# Connect three subgraphs
graph.add_edge(0, 1, cost=2, ctvalue=50)
graph.add_edge(0, 4, cost=3, ctvalue=50) # gets removed
graph.add_edge(0, 7, cost=2, ctvalue=50)
# Prune
prune_weak(graph, 2, 80)
# Check result
assert graph.number_of_edges() == 9+2
for e in graph.edges_iter():
assert e not in [(0, 4)]
# Connect three subgraphs
graph.add_edge(0, 1, cost=1, ctvalue=50)
graph.add_edge(0, 4, cost=1, ctvalue=50)
graph.add_edge(0, 7, cost=2, ctvalue=50) # gets removed
# Prune
prune_weak(graph, 2, 80)
# Check result
assert graph.number_of_edges() == 9+2
for e in graph.edges_iter():
assert e not in [(0, 7)]
# Connect three subgraphs
graph.add_edge(0, 1, cost=3, ctvalue=50)
graph.add_edge(0, 4, cost=4, ctvalue=90) # None gets removed
graph.add_edge(0, 7, cost=3, ctvalue=50)
# Prune
prune_weak(graph, 2, 80)
# Check result
assert graph.number_of_edges() == 9+3
def test_pack1(self):
# Custom stent
g = StentGraph(summary='dit is een stent!', lala=3)
g.add_node((10,20), foo=3)
g.add_node((30,40), foo=5)
g.add_edge((1,1), (2,2), bar=10)
g.add_edge((10,20),(1,1), bar=20)
fname = '/home/almar/test.ssdf'
ssdf.save(fname, g.pack())
g2 = StentGraph()
g2.unpack(ssdf.load(fname))
#print(nx.is_isomorphic(g, g2))
assert nx.is_isomorphic(g, g2)
def test_pack2(self):
# Auto generate
import random
n = 500
p=dict((i,(random.gauss(0,2),random.gauss(0,2))) for i in range(n))
g_ = nx.random_geometric_graph(n, 0.1, dim=3, pos=p)
g = StentGraph(summary='dit is een stent!', lala=3)
g.add_nodes_from(g_.nodes_iter())
g.add_edges_from(g_.edges_iter())
fname = '/home/almar/test.ssdf'
ssdf.save(fname, g.pack())
g2 = StentGraph()
g2.unpack(ssdf.load(fname))
#print(nx.is_isomorphic(g, g2))
assert nx.is_isomorphic(g, g2)
def test_pop_node(self):
# Create paths
path1 = PointSet(2)
path1.append(1, 11)
path1.append(1, 12)
path2 = PointSet(2)
path2.append(1, 12)
path2.append(1, 13)
#
path12 = PointSet(2)
path12.append(1, 11)
path12.append(1, 12)
path12.append(1, 13)
# create 4 nodes (6-7-8-9), remove 8
graph = StentGraph()
graph.add_edge(6, 7, cost=4, ctvalue=70)
graph.add_edge(7, 8, cost=2, ctvalue=50, path=path1)
graph.add_edge(8, 9, cost=3, ctvalue=60, path=path2)
# Pop
_pop_node(graph, 8)
# Check
assert graph.number_of_nodes() == 3
assert 8 not in graph.nodes()
assert graph.edge[7][9]['ctvalue'] == 50
assert graph.edge[7][9]['cost'] == 5
assert np.all(graph.edge[7][9]['path'] == path12)
# create 4 nodes (6-8-7-9), remove 7
graph = StentGraph()
graph.add_edge(6, 8, cost=4, ctvalue=70)
graph.add_edge(8, 7, cost=2, ctvalue=50, path=np.flipud(path1))
graph.add_edge(7, 9, cost=3, ctvalue=60, path=path2)
# Pop
_pop_node(graph, 7)
# Check
assert graph.number_of_nodes() == 3
assert 7 not in graph.nodes()
assert graph.edge[8][9]['ctvalue'] == 50
assert graph.edge[8][9]['cost'] == 5
assert np.all(graph.edge[8][9]['path'] == path12)
# create 4 nodes (7-8-6-9), remove 8
graph = StentGraph()
graph.add_edge(7, 8, cost=4, ctvalue=70, path=np.flipud(path2))
graph.add_edge(8, 6, cost=2, ctvalue=50, path=path1)
graph.add_edge(6, 9, cost=3, ctvalue=60)
# Pop
_pop_node(graph, 8)
# Check
assert graph.number_of_nodes() == 3
assert 8 not in graph.nodes()
assert graph.edge[6][7]['ctvalue'] == 50
assert graph.edge[6][7]['cost'] == 6
assert np.all(graph.edge[6][7]['path'] == path12)
# | |
<filename>Lib/site-packages/filebrowser/views.py
# coding: utf-8
# general imports
import os, re
from time import gmtime, strftime
# django imports
from django.shortcuts import render_to_response, HttpResponse
from django.template import RequestContext as Context
from django.http import HttpResponseRedirect
from django.contrib.admin.views.decorators import staff_member_required
from django.views.decorators.cache import never_cache
from django.utils.translation import ugettext as _
from django.conf import settings
from django import forms
from django.core.urlresolvers import reverse
from django.core.exceptions import ImproperlyConfigured
from django.dispatch import Signal
from django.core.paginator import Paginator, InvalidPage, EmptyPage
from django.utils.encoding import smart_str
try:
# django SVN
from django.views.decorators.csrf import csrf_exempt
except:
# django 1.1
from django.contrib.csrf.middleware import csrf_exempt
# filebrowser imports
from filebrowser.settings import *
from filebrowser.functions import path_to_url, sort_by_attr, get_path, get_file, get_version_path, get_breadcrumbs, get_filterdate, get_settings_var, handle_file_upload, convert_filename
from filebrowser.templatetags.fb_tags import query_helper
from filebrowser.base import FileObject
from filebrowser.decorators import flash_login_required
# Precompile regular expressions
filter_re = []
for exp in EXCLUDE:
filter_re.append(re.compile(exp))
for k,v in VERSIONS.iteritems():
exp = (r'_%s.(%s)') % (k, '|'.join(EXTENSION_LIST))
filter_re.append(re.compile(exp))
def browse(request):
"""
Browse Files/Directories.
"""
# QUERY / PATH CHECK
query = request.GET.copy()
path = get_path(query.get('dir', ''))
directory = get_path('')
if path is None:
msg = _('The requested Folder does not exist.')
request.user.message_set.create(message=msg)
if directory is None:
# The DIRECTORY does not exist, raise an error to prevent eternal redirecting.
raise ImproperlyConfigured, _("Error finding Upload-Folder. Maybe it does not exist?")
redirect_url = reverse("fb_browse") + query_helper(query, "", "dir")
return HttpResponseRedirect(redirect_url)
abs_path = os.path.join(MEDIA_ROOT, DIRECTORY, path)
# INITIAL VARIABLES
results_var = {'results_total': 0, 'results_current': 0, 'delete_total': 0, 'images_total': 0, 'select_total': 0 }
counter = {}
for k,v in EXTENSIONS.iteritems():
counter[k] = 0
dir_list = os.listdir(abs_path)
files = []
for file in dir_list:
# EXCLUDE FILES MATCHING VERSIONS_PREFIX OR ANY OF THE EXCLUDE PATTERNS
filtered = file.startswith('.')
for re_prefix in filter_re:
if re_prefix.search(file):
filtered = True
if filtered:
continue
results_var['results_total'] += 1
# CREATE FILEOBJECT
fileobject = FileObject(os.path.join(DIRECTORY, path, file))
# FILTER / SEARCH
append = False
if fileobject.filetype == request.GET.get('filter_type', fileobject.filetype) and get_filterdate(request.GET.get('filter_date', ''), fileobject.date):
append = True
if request.GET.get('q') and not re.compile(request.GET.get('q').lower(), re.M).search(file.lower()):
append = False
# APPEND FILE_LIST
if append:
try:
# COUNTER/RESULTS
if fileobject.filetype == 'Image':
results_var['images_total'] += 1
if fileobject.filetype != 'Folder':
results_var['delete_total'] += 1
elif fileobject.filetype == 'Folder' and fileobject.is_empty:
results_var['delete_total'] += 1
if query.get('type') and query.get('type') in SELECT_FORMATS and fileobject.filetype in SELECT_FORMATS[query.get('type')]:
results_var['select_total'] += 1
elif not query.get('type'):
results_var['select_total'] += 1
except OSError:
# Ignore items that have problems
continue
else:
files.append(fileobject)
results_var['results_current'] += 1
# COUNTER/RESULTS
if fileobject.filetype:
counter[fileobject.filetype] += 1
# SORTING
query['o'] = request.GET.get('o', DEFAULT_SORTING_BY)
query['ot'] = request.GET.get('ot', DEFAULT_SORTING_ORDER)
files = sort_by_attr(files, request.GET.get('o', DEFAULT_SORTING_BY))
if not request.GET.get('ot') and DEFAULT_SORTING_ORDER == "desc" or request.GET.get('ot') == "desc":
files.reverse()
p = Paginator(files, LIST_PER_PAGE)
try:
page_nr = request.GET.get('p', '1')
except:
page_nr = 1
try:
page = p.page(page_nr)
except (EmptyPage, InvalidPage):
page = p.page(p.num_pages)
return render_to_response('filebrowser/index.html', {
'dir': path,
'p': p,
'page': page,
'results_var': results_var,
'counter': counter,
'query': query,
'title': _(u'FileBrowser'),
'settings_var': get_settings_var(),
'breadcrumbs': get_breadcrumbs(query, path),
'breadcrumbs_title': ""
}, context_instance=Context(request))
browse = staff_member_required(never_cache(browse))
# mkdir signals
filebrowser_pre_createdir = Signal(providing_args=["path", "dirname"])
filebrowser_post_createdir = Signal(providing_args=["path", "dirname"])
def mkdir(request):
"""
Make Directory.
"""
from filebrowser.forms import MakeDirForm
# QUERY / PATH CHECK
query = request.GET
path = get_path(query.get('dir', ''))
if path is None:
msg = _('The requested Folder does not exist.')
request.user.message_set.create(message=msg)
return HttpResponseRedirect(reverse("fb_browse"))
abs_path = os.path.join(MEDIA_ROOT, DIRECTORY, path)
if request.method == 'POST':
form = MakeDirForm(abs_path, request.POST)
if form.is_valid():
server_path = os.path.join(abs_path, form.cleaned_data['dir_name'])
try:
# PRE CREATE SIGNAL
filebrowser_pre_createdir.send(sender=request, path=path, dirname=form.cleaned_data['dir_name'])
# CREATE FOLDER
os.mkdir(server_path)
os.chmod(server_path, 0775)
# POST CREATE SIGNAL
filebrowser_post_createdir.send(sender=request, path=path, dirname=form.cleaned_data['dir_name'])
# MESSAGE & REDIRECT
msg = _('The Folder %s was successfully created.') % (form.cleaned_data['dir_name'])
request.user.message_set.create(message=msg)
# on redirect, sort by date desc to see the new directory on top of the list
# remove filter in order to actually _see_ the new folder
# remove pagination
redirect_url = reverse("fb_browse") + query_helper(query, "ot=desc,o=date", "ot,o,filter_type,filter_date,q,p")
return HttpResponseRedirect(redirect_url)
except OSError, (errno, strerror):
if errno == 13:
form.errors['dir_name'] = forms.util.ErrorList([_('Permission denied.')])
else:
form.errors['dir_name'] = forms.util.ErrorList([_('Error creating folder.')])
else:
form = MakeDirForm(abs_path)
return render_to_response('filebrowser/makedir.html', {
'form': form,
'query': query,
'title': _(u'New Folder'),
'settings_var': get_settings_var(),
'breadcrumbs': get_breadcrumbs(query, path),
'breadcrumbs_title': _(u'New Folder')
}, context_instance=Context(request))
mkdir = staff_member_required(never_cache(mkdir))
# upload signals
filebrowser_pre_upload = Signal(providing_args=["path", "file"])
filebrowser_post_upload = Signal(providing_args=["path", "file"])
def upload(request):
"""
Multiple File Upload.
"""
from django.forms.formsets import formset_factory
# QUERY / PATH CHECK
query = request.GET
path = get_path(query.get('dir', ''))
if path is None:
msg = _('The requested Folder does not exist.')
request.user.message_set.create(message=msg)
return HttpResponseRedirect(reverse("fb_browse"))
abs_path = os.path.join(MEDIA_ROOT, DIRECTORY, path)
if STRICT_PIL:
from PIL import ImageFile
else:
try:
from PIL import ImageFile
except ImportError:
import ImageFile
ImageFile.MAXBLOCK = IMAGE_MAXBLOCK # default is 64k
from filebrowser.forms import UploadForm, BaseUploadFormSet
UploadFormSet = formset_factory(UploadForm, formset=BaseUploadFormSet, extra=5)
if request.method == 'POST':
formset = UploadFormSet(data=request.POST, files=request.FILES, path=abs_path)
if formset.is_valid():
for cleaned_data in formset.cleaned_data:
if cleaned_data:
f = cleaned_data['file']
f.name = convert_filename(f.name)
# PRE UPLOAD SIGNAL
filebrowser_pre_upload.send(sender=request, path=abs_path, file=f)
# HANDLE UPLOAD
uploadedfile = handle_file_upload(abs_path, f)
# POST UPLOAD SIGNAL
filebrowser_post_upload.send(sender=request, path=abs_path, file=uploadedfile)
# MESSAGE & REDIRECT
msg = _('Upload successful.')
request.user.message_set.create(message=msg)
# on redirect, sort by date desc to see the uploaded files on top of the list
redirect_url = reverse("fb_browse") + query_helper(query, "ot=desc,o=date", "ot,o")
return HttpResponseRedirect(redirect_url)
else:
formset = UploadFormSet(path=abs_path)
return render_to_response('filebrowser/upload.html', {
'formset': formset,
'dir': path,
'query': query,
'settings_var': get_settings_var(),
'breadcrumbs_title': _(u'Upload'),
'title': _(u'Select files to upload'),
}, context_instance=Context(request))
upload = staff_member_required(never_cache(upload))
# delete signals
filebrowser_pre_delete = Signal(providing_args=["path", "filename"])
filebrowser_post_delete = Signal(providing_args=["path", "filename"])
def delete(request):
"""
Delete existing File/Directory.
When trying to delete a Directory, the Directory has to be empty.
"""
# QUERY / PATH CHECK
query = request.GET
path = get_path(query.get('dir', ''))
filename = get_file(query.get('dir', ''), query.get('filename', ''))
if path is None or filename is None:
if path is None:
msg = _('The requested Folder does not exist.')
else:
msg = _('The requested File does not exist.')
request.user.message_set.create(message=msg)
return HttpResponseRedirect(reverse("fb_browse"))
abs_path = os.path.join(MEDIA_ROOT, DIRECTORY, path)
msg = ""
if request.GET:
if request.GET.get('filetype') != "Folder":
relative_server_path = os.path.join(DIRECTORY, path, filename)
try:
# PRE DELETE SIGNAL
filebrowser_pre_delete.send(sender=request, path=path, filename=filename)
# DELETE IMAGE VERSIONS/THUMBNAILS
for version in VERSIONS:
try:
os.unlink(os.path.join(MEDIA_ROOT, get_version_path(relative_server_path, version)))
except:
pass
# DELETE FILE
os.unlink(smart_str(os.path.join(abs_path, filename)))
# POST DELETE SIGNAL
filebrowser_post_delete.send(sender=request, path=path, filename=filename)
# MESSAGE & REDIRECT
msg = _('The file %s was successfully deleted.') % (filename.lower())
request.user.message_set.create(message=msg)
redirect_url = reverse("fb_browse") + query_helper(query, "", "filename,filetype")
return HttpResponseRedirect(redirect_url)
except OSError:
# todo: define error message
msg = OSError
else:
try:
# PRE DELETE SIGNAL
filebrowser_pre_delete.send(sender=request, path=path, filename=filename)
# DELETE FOLDER
os.rmdir(os.path.join(abs_path, filename))
# POST DELETE SIGNAL
filebrowser_post_delete.send(sender=request, path=path, filename=filename)
# MESSAGE & REDIRECT
msg = _('The folder %s was successfully deleted.') % (filename.lower())
request.user.message_set.create(message=msg)
redirect_url = reverse("fb_browse") + query_helper(query, "", "filename,filetype")
return HttpResponseRedirect(redirect_url)
except OSError:
# todo: define error message
msg = OSError
if msg:
request.user.message_set.create(message=msg)
return render_to_response('filebrowser/index.html', {
'dir': dir_name,
'file': request.GET.get('filename', ''),
'query': query,
'settings_var': get_settings_var(),
'breadcrumbs': get_breadcrumbs(query, dir_name),
'breadcrumbs_title': ""
}, context_instance=Context(request))
delete = staff_member_required(never_cache(delete))
# rename signals
filebrowser_pre_rename = Signal(providing_args=["path", "filename", "new_filename"])
filebrowser_post_rename = Signal(providing_args=["path", "filename", "new_filename"])
def rename(request):
"""
Rename existing File/Directory.
Includes renaming existing Image Versions/Thumbnails.
"""
from filebrowser.forms import RenameForm
# QUERY / PATH CHECK
query = request.GET
path = get_path(query.get('dir', ''))
filename = get_file(query.get('dir', ''), query.get('filename', ''))
if path is None or filename is None:
if path is None:
msg = _('The requested Folder does not exist.')
else:
msg = _('The requested File does not exist.')
request.user.message_set.create(message=msg)
return HttpResponseRedirect(reverse("fb_browse"))
abs_path = os.path.join(MEDIA_ROOT, DIRECTORY, path)
file_extension = os.path.splitext(filename)[1].lower()
if request.method == 'POST':
form = RenameForm(abs_path, file_extension, request.POST)
if form.is_valid():
relative_server_path = os.path.join(DIRECTORY, path, filename)
new_filename = form.cleaned_data['name'] + file_extension
new_relative_server_path = os.path.join(DIRECTORY, path, new_filename)
try:
# PRE RENAME SIGNAL
filebrowser_pre_rename.send(sender=request, path=path, filename=filename, new_filename=new_filename)
# DELETE IMAGE VERSIONS/THUMBNAILS
# regenerating versions/thumbs will be done automatically
for version in VERSIONS:
try:
os.unlink(os.path.join(MEDIA_ROOT, get_version_path(relative_server_path, version)))
except:
pass
# RENAME ORIGINAL
os.rename(os.path.join(MEDIA_ROOT, relative_server_path), os.path.join(MEDIA_ROOT, new_relative_server_path))
# POST RENAME SIGNAL
filebrowser_post_rename.send(sender=request, path=path, filename=filename, new_filename=new_filename)
# MESSAGE & REDIRECT
msg = _('Renaming was successful.')
request.user.message_set.create(message=msg)
redirect_url = reverse("fb_browse") + query_helper(query, "", "filename")
return HttpResponseRedirect(redirect_url)
except OSError, (errno, strerror):
form.errors['name'] = forms.util.ErrorList([_('Error.')])
else:
form = RenameForm(abs_path, file_extension)
return render_to_response('filebrowser/rename.html', {
'form': form,
'query': query,
'file_extension': file_extension,
'title': _(u'Rename | |
= contcar_dict['len_vec_b']
len_c_contcar = contcar_dict['len_vec_c']
angle_alpha_degree_poscar = poscar_dict['angle_alpha_degree']
angle_beta_degree_poscar = poscar_dict['angle_beta_degree']
angle_gamma_degree_poscar = poscar_dict['angle_gamma_degree']
angle_alpha_degree_contcar = contcar_dict['angle_alpha_degree']
angle_beta_degree_contcar = contcar_dict['angle_beta_degree']
angle_gamma_degree_contcar = contcar_dict['angle_gamma_degree']
volume_poscar = poscar_dict['volume']
volume_contcar = contcar_dict['volume']
continue
if job_status == 'finished':
temp_str = temp_str + (folder_path_str_list[i_folder_indx] + ' ' * (max_length_folder_path_str - len(folder_path_str_list[i_folder_indx])) + ' ' +
'{:.4f}'.format(len_a_poscar) + ' ' * (13 - len('{:.4f}'.format(len_a_poscar))) +
'{:.4f}'.format(len_b_poscar) + ' ' * (13 - len('{:.4f}'.format(len_b_poscar))) +
'{:.4f}'.format(len_c_poscar) + ' ' * (13 - len('{:.4f}'.format(len_c_poscar))) +
'{:.4f}'.format(len_a_contcar) + ' ' * (13 - len('{:.4f}'.format(len_a_contcar))) +
'{:.4f}'.format(len_b_contcar) + ' ' * (13 - len('{:.4f}'.format(len_b_contcar))) +
'{:.4f}'.format(len_c_contcar) + ' ' * (13 - len('{:.4f}'.format(len_c_contcar))) +
'{:.2f}'.format(angle_alpha_degree_poscar) + ' ' * (11 - len('{:.2f}'.format(angle_alpha_degree_poscar))) +
'{:.2f}'.format(angle_beta_degree_poscar) + ' ' * (11 - len('{:.2f}'.format(angle_beta_degree_poscar))) +
'{:.2f}'.format(angle_gamma_degree_poscar) + ' ' * (11 - len('{:.2f}'.format(angle_gamma_degree_poscar))) +
'{:.2f}'.format(angle_alpha_degree_contcar) + ' ' * (11 - len('{:.2f}'.format(angle_alpha_degree_contcar))) +
'{:.2f}'.format(angle_beta_degree_contcar) + ' ' * (11 - len('{:.2f}'.format(angle_beta_degree_contcar))) +
'{:.2f}'.format(angle_gamma_degree_contcar) + ' ' * (11 - len('{:.2f}'.format(angle_gamma_degree_contcar))) +
'{:.4f}'.format(volume_poscar) + ' ' * (13 - len('{:.4f}'.format(volume_poscar))) +
'{:.4f}'.format(volume_contcar) + ' ' * (13 - len('{:.4f}'.format(volume_contcar))) +
' \n')
else:
temp_str = temp_str + (folder_path_str_list[i_folder_indx] + ' ' * (max_length_folder_path_str - len(folder_path_str_list[i_folder_indx])) + ' ' +
'--' + ' ' * (13 - len('--')) +
'--' + ' ' * (13 - len('--')) +
'--' + ' ' * (13 - len('--')) +
'--' + ' ' * (13 - len('--')) +
'--' + ' ' * (13 - len('--')) +
'--' + ' ' * (13 - len('--')) +
'--' + ' ' * (11 - len('--')) +
'--' + ' ' * (11 - len('--')) +
'--' + ' ' * (11 - len('--')) +
'--' + ' ' * (11 - len('--')) +
'--' + ' ' * (11 - len('--')) +
'--' + ' ' * (11 - len('--')) +
'--' + ' ' * (13 - len('--')) +
'--' + ' ' * (13 - len('--')) +
' \n')
with open(job_status_file_path, 'w') as f:
f.write(temp_str)
##################################
# Determine the args string
##################################
log_str = ''
func_name = 'vasp_tools.get_latt_param'
args_str = func_name + '(' + '\n'
for i_arg in args_dict.keys():
arg_value = args_dict[i_arg]
if isinstance(arg_value,str):
arg_value_str = '\'' + arg_value + '\''
else:
arg_value_str = str(arg_value)
if i_arg == 'job_parent_dir':
arg_value_str = 'r\'' + job_parent_dir + '\''
args_str += ' ' + i_arg + ' = ' + arg_value_str + ',\n'
args_str += ' )\n'
args_str += '################################################\n'
log_str += args_str
funcs.write_log(logfile, log_str)
return temp_str
def convert_coord_system(poscar_file_path, mode = 'direct2cartesian'):
'''
Convert from Direct coordinate to Cartesian coordinate
mode: 'direct2cartesian' or 'cartesian2direct'
'''
args_dict = locals()
import os
from . import vasp_read
from . import vasp_write
from .. import default_params
defaults_dict = default_params.default_params()
poscar_file_path = os.path.abspath(poscar_file_path)
poscar_dict = vasp_read.read_poscar(poscar_file_path)
fpath, fname = os.path.split(poscar_file_path)
if mode == 'direct2cartesian':
coord_system = 'Cartesian'
poscar_suffix = '_car'
elif mode == 'cartesian2direct':
coord_system = 'Direct'
poscar_suffix = '_dir'
poscar_file_path_cartesian = os.path.join(fpath, fname + poscar_suffix)
vasp_write.write_poscar(output_poscar_file_path = poscar_file_path_cartesian, poscar_dict = poscar_dict, coord_system = coord_system)
return 0
def rm_outputs(dst_dir = None, clean_subdir = True):
'''
remove the output files of VASP jobs
clean_subdir = True: also clean the subdirectories
'''
args_dict = locals()
import os
from .. import default_params
from .. import funcs
defaults_dict = default_params.default_params()
vasp_output_list = ['CHGCAR', 'CHG', 'CONTCAR', 'DOSCAR', 'EIGENVAL', 'IBZKPT', 'OSZICAR', 'OUTCAR', 'PCDAT', 'PROCAR', 'WAVECAR', 'XDATCAR', 'vasprun.xml']
aux_file_list = ['e.*', 'o.*', 'error.*', 'output.*', 'vaspjob.*', 'REPORT']
dst_dir = os.path.abspath(dst_dir)
if clean_subdir == True:
dir_list = funcs.get_dirs(dst_dir)
elif clean_subdir == False:
dir_list = [dst_dir]
for i_dir in dir_list:
for item in vasp_output_list:
try:
os.remove(os.path.join(i_dir, item))
except OSError:
pass
return 0
def poscar_layer_dist_tolerance(poscar_dict, radius_style = 'csd', radius_scale_factor = 1.15):
'''
Get the tolerance value of the layer distance for a specific POSCAR file
'''
args_dict = locals()
###############################
# Set layer_dist_tolerance
###############################
from .. import periodic_table
periodic_table_dict = periodic_table.periodic_tab()
if radius_style in ['csd', 'CSD']:
# Find the smallest two CSD covalent radii of the elements in the system. The layer_dist_tolerance is 115% of the sum of two smallest CSD covalent radii.
csd_covalent_radius_list = [periodic_table_dict['csd_covalent_radius'][i_elmt] for i_elmt in poscar_dict['elmt_species_arr']]
csd_covalent_radius_list.sort()
if len(csd_covalent_radius_list) == 1:
smallest_atom_radius = csd_covalent_radius_list[0]
#second_smallest_atom_radius = csd_covalent_radius_list[0]
largest_atom_radius = csd_covalent_radius_list[-1]
#second_largest_atom_radius = csd_covalent_radius_list[-1]
else:
smallest_atom_radius = csd_covalent_radius_list[0]
#second_smallest_atom_radius = csd_covalent_radius_list[1]
largest_atom_radius = csd_covalent_radius_list[-1]
#second_largest_atom_radius = csd_covalent_radius_list[-2]
min_atom_bonding_len = smallest_atom_radius * 2 / 100 * radius_scale_factor
max_atom_bonding_len = largest_atom_radius * 2 / 100 * radius_scale_factor
layer_dist_tolerance = min_atom_bonding_len
##print('automatically generated layer_dist_tolerance=', layer_dist_tolerance)
return layer_dist_tolerance
def poscar_direction_params(poscar_dict, direction = 'z'):
'''
get parameters related with a specified direction in a POSCAR file.
'''
args_dict = locals()
import numpy as np
from .. import funcs
poscar_direction_dict = {}
poscar_direction_dict['direction'] = {}
poscar_direction_dict['number_order_text'] = None
poscar_direction_dict['l_arr_row'] = None
poscar_direction_dict['l_arr_column_1'] = None
poscar_direction_dict['l_arr_column_2'] = None
poscar_direction_dict['side_vector'] = None
poscar_direction_dict['side_vector_len'] = None
poscar_direction_dict['vec_1'] = None
poscar_direction_dict['vec_2'] = None
poscar_direction_dict['unit_vector'] = None
poscar_direction_dict['ortho_vector'] = None
poscar_direction_dict['ortho_vector_1'] = None
poscar_direction_dict['ortho_vector_2'] = None
poscar_direction_dict['box_len_ortho'] = None
poscar_direction_dict['cos_angle'] = None
poscar_direction_dict['pos_arr_column'] = None
poscar_direction_dict['pos_arr_column_1'] = None
poscar_direction_dict['pos_arr_column_2'] = None
poscar_direction_dict['pos_arr_direct_column'] = None
poscar_direction_dict['pos_arr_direct_column_1'] = None
poscar_direction_dict['pos_arr_direct_column_2'] = None
poscar_direction_dict['direction'] = direction
if direction in ['x', 'X']:
#number_order_text = 'yzx'
number_order_text = 'x'
l_arr_row = 0
l_arr_column_1 = 1
l_arr_column_2 = 2
side_vector = poscar_dict['vec_a']
side_vector_len = poscar_dict['len_vec_a']
vec_1 = poscar_dict['vec_b']
vec_2 = poscar_dict['vec_c']
unit_vector = funcs.unit_vec(side_vector)
ortho_vector = np.array([1, 0, 0])
ortho_vector_1 = np.array([0, 1, 0])
ortho_vector_2 = np.array([0, 0, 1])
box_len_ortho = abs(poscar_dict['vec_a'] + poscar_dict['vec_b'] + poscar_dict['vec_c']).dot(ortho_vector)
cos_angle = unit_vector.dot(ortho_vector)
pos_arr_column = 3
pos_arr_column_1 = 4
pos_arr_column_2 = 5
pos_arr_direct_column = 0
pos_arr_direct_column_1 = 1
pos_arr_direct_column_2 = 2
if direction in ['y', 'Y']:
#number_order_text = 'zxy'
number_order_text = 'y'
l_arr_row = 1
l_arr_column_1 = 0
l_arr_column_2 = 2
side_vector = poscar_dict['vec_b']
side_vector_len = poscar_dict['len_vec_b']
vec_1 = poscar_dict['vec_a']
vec_2 = poscar_dict['vec_c']
unit_vector = funcs.unit_vec(side_vector)
ortho_vector = np.array([0, 1, 0])
ortho_vector_1 = np.array([1, 0, 0])
ortho_vector_2 = np.array([0, 0, 1])
box_len_ortho = abs(poscar_dict['vec_a'] + poscar_dict['vec_b'] + poscar_dict['vec_c']).dot(ortho_vector)
cos_angle = unit_vector.dot(ortho_vector)
pos_arr_column = 4
pos_arr_column_1 = 3
pos_arr_column_2 = 5
pos_arr_direct_column = 1
pos_arr_direct_column_1 = 0
pos_arr_direct_column_2 = 2
if direction in ['z', 'Z']:
#number_order_text = 'xyz'
number_order_text = 'z'
l_arr_row = 2
l_arr_column_1 = 0
l_arr_column_2 = 1
side_vector = poscar_dict['vec_c']
side_vector_len = poscar_dict['len_vec_c']
vec_1 = poscar_dict['vec_a']
vec_2 = poscar_dict['vec_b']
unit_vector = funcs.unit_vec(side_vector)
ortho_vector = np.array([0, 0, 1])
ortho_vector_1 = np.array([1, 0, 0])
ortho_vector_2 = np.array([0, 1, 0])
box_len_ortho = abs(poscar_dict['vec_a'] + poscar_dict['vec_b'] + poscar_dict['vec_c']).dot(ortho_vector)
cos_angle = unit_vector.dot(ortho_vector)
pos_arr_column = 5
pos_arr_column_1 = 3
pos_arr_column_2 = 4
pos_arr_direct_column = 2
pos_arr_direct_column_1 = 0
pos_arr_direct_column_2 = 1
poscar_direction_dict['number_order_text'] = number_order_text
poscar_direction_dict['l_arr_row'] = l_arr_row
poscar_direction_dict['l_arr_column_1'] = l_arr_column_1
poscar_direction_dict['l_arr_column_2'] = l_arr_column_2
poscar_direction_dict['side_vector'] = side_vector
poscar_direction_dict['side_vector_len'] = side_vector_len
poscar_direction_dict['vec_1'] = vec_1
poscar_direction_dict['vec_2'] = vec_2
poscar_direction_dict['unit_vector'] = unit_vector
poscar_direction_dict['ortho_vector'] = ortho_vector
poscar_direction_dict['ortho_vector_1'] = ortho_vector_1
poscar_direction_dict['ortho_vector_2'] = ortho_vector_2
poscar_direction_dict['box_len_ortho'] = box_len_ortho
poscar_direction_dict['cos_angle'] = cos_angle
poscar_direction_dict['pos_arr_column'] = pos_arr_column
poscar_direction_dict['pos_arr_column_1'] = pos_arr_column_1
poscar_direction_dict['pos_arr_column_2'] = pos_arr_column_2
poscar_direction_dict['pos_arr_direct_column'] = pos_arr_direct_column
poscar_direction_dict['pos_arr_direct_column_1'] = pos_arr_direct_column_1
poscar_direction_dict['pos_arr_direct_column_2'] = pos_arr_direct_column_2
return poscar_direction_dict
def poscar_layer_params(poscar_dict, poscar_direction_dict, criteria = 'auto', delta = 0.05, layer_dist_tolerance = 'auto', radius_style = 'csd', radius_scale_factor = 2.00, write_layer_info = False, suppress_warning = True):
'''
Get layer parameters from POSCAR
delta: This is the spacial resolution (in unit of Angstrom) for the atoms, for example z=0.24 and z=0.25 are considered to be in the same position if delta=0.01. Default: delta = 0.05
'''
args_dict = locals()
import numpy as np
from .. import funcs
from . import vasp_read
from .. import periodic_table
from copy import copy, deepcopy
import math
import os
from . import vasp_build
import itertools
periodic_table_dict = periodic_table.periodic_tab()
original_criteria = criteria
original_layer_dist_tolerance = layer_dist_tolerance
#recommended_layer_dist_tolerance = 0.664
#######################################################
# Creating layer property dictionary: poscar_layer_dict
#######################################################
poscar_layer_dict = {}
poscar_layer_dict['delta'] = delta
##import pprint
##pprint.pprint(poscar_direction_dict)
poscar_dict['atom_number_ortho_' + poscar_direction_dict['number_order_text']] = funcs.atom_number_ortho(
atom_key_arr = | |
import datetime
from sklearn.metrics import mean_squared_error, mean_absolute_error, f1_score
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from .dataset import DataSet
from .metergroup import MeterGroup
from .disaggregate import (
CombinatorialOptimisation,
Mean,
FHMM,
Zero,
DAE,
Seq2Point,
Seq2Seq,
DSC,
Disaggregator,
) # , AFHMM,AFHMM_SAC
class API:
"""
The API ia designed for rapid experimentation with NILM Algorithms.
"""
def __init__(self, params):
"""
Initializes the API with default parameters
"""
self.power = {}
self.sample_period = 1
self.appliances = []
self.methods = {}
self.chunk_size = None
self.method_dict = {
"CO": {},
"FHMM": {},
"Hart85": {},
"DAE": {},
"Mean": {},
"Zero": {},
"WindowGRU": {},
"Seq2Point": {},
"RNN": {},
"Seq2Seq": {},
"DSC": {},
"AFHMM": {},
"AFHMM_SAC": {},
}
self.pre_trained = False
self.metrics = []
self.train_datasets_dict = {}
self.test_datasets_dict = {}
self.artificial_aggregate = False
self.train_submeters = []
self.train_mains = pd.DataFrame()
self.test_submeters = []
self.test_mains = pd.DataFrame()
self.gt_overall = {}
self.pred_overall = {}
self.classifiers = []
self.DROP_ALL_NANS = True
self.mae = pd.DataFrame()
self.rmse = pd.DataFrame()
self.experiment(params)
def initialise(self, params):
"""
Instantiates the API with the specified Parameters
"""
for elems in params["params"]["power"]:
self.power = params["params"]["power"]
self.sample_period = params["sample_rate"]
for elems in params["appliances"]:
self.appliances.append(elems)
self.pre_trained = ["pre_trained"]
self.train_datasets_dict = params["train"]["datasets"]
self.test_datasets_dict = params["test"]["datasets"]
self.metrics = params["test"]["metrics"]
self.methods = params["methods"]
self.artificial_aggregate = params.get(
"artificial_aggregate", self.artificial_aggregate
)
self.chunk_size = params.get("chunk_size", self.chunk_size)
def experiment(self, params):
"""
Calls the Experiments with the specified parameters
"""
self.params = params
self.initialise(params)
if params["chunk_size"]:
# This is for training and Testing in Chunks
self.load_datasets_chunks()
else:
# This is to load all the data from all buildings and use it for training and testing. This might not be possible to execute on computers with low specs
self.load_datasets()
def load_datasets_chunks(self):
"""
This function loads the data from buildings and datasets with the specified chunk size and trains on each of them.
After the training process is over, it tests on the specified testing set whilst loading it in chunks.
"""
# First, we initialize all the models
self.store_classifier_instances()
d = self.train_datasets_dict
for model_name, clf in self.classifiers:
# If the model is a neural net, it has an attribute n_epochs, Ex: DAE, Seq2Point
if hasattr(clf, "n_epochs"):
epochs = clf.n_epochs
# If it doesn't have the attribute n_epochs, this is executed. Ex: Mean, Zero
else:
epochs = 1
# If the model has the filename specified for loading the pretrained model, then we don't need to load training data
if clf.load_model_path:
print(clf.MODEL_NAME, " is loading the pretrained model")
continue
for q in range(epochs):
for dataset in d:
print("Loading data for ", dataset, " dataset")
for building in d[dataset]["buildings"]:
train = DataSet(d[dataset]["path"])
print("Loading building ... ", building)
train.set_window(
start=d[dataset]["buildings"][building]["start_time"],
end=d[dataset]["buildings"][building]["end_time"],
)
mains_iterator = (
train.buildings[building]
.elec.mains()
.load(
chunksize=self.chunk_size,
physical_quantity="power",
ac_type=self.power["mains"],
sample_period=self.sample_period,
)
)
print(self.appliances)
appliance_iterators = [
train.buildings[building]
.elec.select_using_appliances(type=app_name)
.load(
chunksize=self.chunk_size,
physical_quantity="power",
ac_type=self.power["appliance"],
sample_period=self.sample_period,
)
for app_name in self.appliances
]
print(train.buildings[building].elec.mains())
for chunk_num, chunk in enumerate(
train.buildings[building]
.elec.mains()
.load(
chunksize=self.chunk_size,
physical_quantity="power",
ac_type=self.power["mains"],
sample_period=self.sample_period,
)
):
# Dummry loop for executing on outer level. Just for looping till end of a chunk
print("starting enumeration..........")
train_df = next(mains_iterator)
appliance_readings = []
for i in appliance_iterators:
try:
appliance_df = next(i)
except StopIteration:
pass
appliance_readings.append(appliance_df)
if self.DROP_ALL_NANS:
train_df, appliance_readings = self.dropna(
train_df, appliance_readings
)
if self.artificial_aggregate:
print("Creating an Artificial Aggregate")
train_df = pd.DataFrame(
np.zeros(appliance_readings[0].shape),
index=appliance_readings[0].index,
columns=appliance_readings[0].columns,
)
for app_reading in appliance_readings:
train_df += app_reading
train_appliances = []
for cnt, i in enumerate(appliance_readings):
train_appliances.append((self.appliances[cnt], [i]))
self.train_mains = [train_df]
self.train_submeters = train_appliances
clf.partial_fit(self.train_mains, self.train_submeters)
print("...............Finished the Training Process ...................")
print("...............Started the Testing Process ...................")
d = self.test_datasets_dict
for dataset in d:
print("Loading data for ", dataset, " dataset")
for building in d[dataset]["buildings"]:
test = DataSet(d[dataset]["path"])
test.set_window(
start=d[dataset]["buildings"][building]["start_time"],
end=d[dataset]["buildings"][building]["end_time"],
)
mains_iterator = (
test.buildings[building]
.elec.mains()
.load(
chunksize=self.chunk_size,
physical_quantity="power",
ac_type=self.power["mains"],
sample_period=self.sample_period,
)
)
appliance_iterators = [
test.buildings[building]
.elec.select_using_appliances(type=app_name)
.load(
chunksize=self.chunk_size,
physical_quantity="power",
ac_type=self.power["appliance"],
sample_period=self.sample_period,
)
for app_name in self.appliances
]
for chunk_num, chunk in enumerate(
test.buildings[building]
.elec.mains()
.load(
chunksize=self.chunk_size,
physical_quantity="power",
ac_type=self.power["mains"],
sample_period=self.sample_period,
)
):
test_df = next(mains_iterator)
appliance_readings = []
for i in appliance_iterators:
try:
appliance_df = next(i)
except StopIteration:
appliance_df = pd.DataFrame()
appliance_readings.append(appliance_df)
if self.DROP_ALL_NANS:
test_df, appliance_readings = self.dropna(
test_df, appliance_readings
)
if self.artificial_aggregate:
print("Creating an Artificial Aggregate")
test_df = pd.DataFrame(
np.zeros(appliance_readings[0].shape),
index=appliance_readings[0].index,
columns=appliance_readings[0].columns,
)
for app_reading in appliance_readings:
test_df += app_reading
test_appliances = []
for cnt, i in enumerate(appliance_readings):
test_appliances.append((self.appliances[cnt], [i]))
self.test_mains = [test_df]
self.test_submeters = test_appliances
print(
"Results for Dataset {dataset} Building {building} Chunk {chunk_num}".format(
dataset=dataset, building=building, chunk_num=chunk_num
)
)
self.call_predict(self.classifiers)
def dropna(self, mains_df, appliance_dfs):
"""
Drops the missing values in the Mains reading and appliance readings and returns consistent data by copmuting the intersection
"""
print("Dropping missing values")
# The below steps are for making sure that data is consistent by doing intersection across appliances
mains_df = mains_df.dropna()
for i in range(len(appliance_dfs)):
appliance_dfs[i] = appliance_dfs[i].dropna()
ix = mains_df.index
for app_df in appliance_dfs:
ix = ix.intersection(app_df.index)
mains_df = mains_df.loc[ix]
new_appliances_list = []
for app_df in appliance_dfs:
new_appliances_list.append(app_df.loc[ix])
return mains_df, new_appliances_list
def load_datasets(self):
# This function has a few issues, which should be addressed soon
self.store_classifier_instances()
d = self.train_datasets_dict
print("............... Loading Data for training ...................")
# store the train_main readings for all buildings
for dataset in d:
print("Loading data for ", dataset, " dataset")
train = DataSet(d[dataset]["path"])
for building in d[dataset]["buildings"]:
print("Loading building ... ", building)
train.set_window(
start=d[dataset]["buildings"][building]["start_time"],
end=d[dataset]["buildings"][building]["end_time"],
)
self.train_mains = self.train_mains.append(
next(
train.buildings[building]
.elec.mains()
.load(
physical_quantity="power",
ac_type=self.power["mains"],
sample_period=self.sample_period,
)
)
)
# store train submeters reading
train_buildings = pd.DataFrame()
for appliance in self.appliances:
train_df = pd.DataFrame()
print("For appliance .. ", appliance)
for dataset in d:
print("Loading data for ", dataset, " dataset")
train = DataSet(d[dataset]["path"])
for building in d[dataset]["buildings"]:
print("Loading building ... ", building)
# store data for submeters
train.set_window(
start=d[dataset]["buildings"][building]["start_time"],
end=d[dataset]["buildings"][building]["end_time"],
)
train_df = train_df.append(
next(
train.buildings[building]
.elec.submeters()
.select_using_appliances(type=appliance)
.load(
physical_quantity="power",
ac_type=self.power["appliance"],
sample_period=self.sample_period,
)
)
)
self.train_submeters.append((appliance, [train_df]))
# create instance of the training methods
# train models
# store data for mains
self.train_mains = [self.train_mains]
self.call_partial_fit()
d = self.test_datasets_dict
# store the test_main readings for all buildings
for dataset in d:
print("Loading data for ", dataset, " dataset")
test = DataSet(d[dataset]["path"])
for building in d[dataset]["buildings"]:
test.set_window(
start=d[dataset]["buildings"][building]["start_time"],
end=d[dataset]["buildings"][building]["end_time"],
)
self.test_mains = next(
test.buildings[building]
.elec.mains()
.load(
physical_quantity="power",
ac_type=self.power["mains"],
sample_period=self.sample_period,
)
)
self.test_submeters = []
for appliance in self.appliances:
test_df = next(
(
test.buildings[building]
.elec.submeters()
.select_using_appliances(type=appliance)
.load(
physical_quantity="power",
ac_type=self.power["appliance"],
sample_period=self.sample_period,
)
)
)
self.test_submeters.append((appliance, [test_df]))
self.test_mains = [self.test_mains]
self.call_predict(self.classifiers)
def store_classifier_instances(self):
"""
This function is reponsible for initializing the models with the specified model parameters
"""
method_dict = {}
for i in self.method_dict:
if i in self.methods:
self.method_dict[i].update(self.methods[i])
method_dict = {
"CO": CombinatorialOptimisation(self.method_dict["CO"]),
"FHMM": FHMM(self.method_dict["FHMM"]),
"DAE": DAE(self.method_dict["DAE"]),
"Mean": Mean(self.method_dict["Mean"]),
"Zero": Zero(self.method_dict["Zero"]),
"Seq2Seq": Seq2Seq(self.method_dict["Seq2Seq"]),
"Seq2Point": Seq2Point(self.method_dict["Seq2Point"]),
"DSC": DSC(self.method_dict["DSC"]),
# 'AFHMM':AFHMM(self.method_dict['AFHMM']),
# 'AFHMM_SAC':AFHMM_SAC(self.method_dict['AFHMM_SAC'])
#'RNN':RNN(self.method_dict['RNN'])
}
for name in self.methods:
if name in method_dict:
clf = method_dict[name]
self.classifiers.append((name, clf))
else:
print(
"\n\nThe method {model_name} specied does not exist. \n\n".format(
model_name=i
)
)
def call_predict(self, classifiers):
"""
This functions computers the predictions on the self.test_mains using all the trained models and then compares different learn't models using the metrics specified
"""
pred_overall = {}
gt_overall = {}
for name, clf in classifiers:
gt_overall, pred_overall[name] = self.predict(
clf,
self.test_mains,
self.test_submeters,
self.sample_period,
"Europe/London",
)
self.gt_overall = gt_overall
self.pred_overall = pred_overall
for i in gt_overall.columns:
plt.figure()
plt.plot(gt_overall[i], label="truth")
for clf in pred_overall:
plt.plot(pred_overall[clf][i], label=clf)
plt.title(i)
plt.legend()
if gt_overall.size == 0:
print("No samples found in ground truth")
return None
for metric in self.metrics:
if metric == "f1-score":
f1_score = {}
for clf_name, clf in classifiers:
f1_score[clf_name] = self.compute_f1_score(
gt_overall, pred_overall[clf_name]
)
f1_score = pd.DataFrame(f1_score)
print("............ ", metric, " ..............")
print(f1_score)
elif metric == "rmse":
rmse = {}
for clf_name, clf in classifiers:
rmse[clf_name] = self.compute_rmse(
gt_overall, pred_overall[clf_name]
)
rmse = pd.DataFrame(rmse)
self.rmse = rmse
print("............ ", metric, " ..............")
print(rmse)
elif metric == "mae":
mae = {}
for clf_name, clf in classifiers:
mae[clf_name] = self.compute_mae(gt_overall, pred_overall[clf_name])
mae = pd.DataFrame(mae)
self.mae = mae
print("............ ", metric, " ..............")
print(mae)
elif metric == "rel_error":
rel_error = {}
for clf_name, clf | |
<filename>tools/RNN/rnn_compiler/lstm_compiler/parser.py<gh_stars>1-10
#
# Copyright 2019 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
from functools import reduce
import numpy as np
from xir import Graph
from xir import Op
from utils.processing import *
from utils.data import *
from utils.tools import *
from .utils import *
class XirParser():
def __init__(self, *args, **kwargs):
self.__nodes_list = kwargs['ordered_nodes']
self.__graph = kwargs['graph']
self.__fix_configs = kwargs['fix_configs']
self.__fix_data = kwargs['fix_data']
self.__op_edge = []
self.__op_info = []
self.__invalid_nodes = []
self.__cycle_ops = []
#self.DIM_UNCHANGE_OPS = ['eltwise', 'mul', 'relu', 'relu6', 'sigmoid', 'tanh', 'sub']
#self.MATMUL_OPS = ['matmul', 'linear']
self.__DATA_NODES = ['group', 'data', 'vector']
self.__MULTI_NODES = ['matmul', 'matmul_relu']
self.__EMUL_NODES = ['mul']
self.__ACTI_NODES = ['sigmoid', 'tanh']
self.__ADD_NODES = ['eltwise', 'sub', 'add', 'eltwise_relu']
self.__CONCAT_NODES = ['concat']
def reorder_add_and_matmul(self):
for i in range(len(self.__nodes_list)):
node = self.__nodes_list[i]
if node.get_type() == 'add' and node.get_input_num() == 2:
input_ops = get_input_ops_list(node)
input_ops_set = set([input_op.get_type() for input_op in input_ops])
if input_ops_set == set(["add", "matmul"]):
for input_op in input_ops:
if input_op.get_type() == "matmul":
op_temp = input_op
self.__nodes_list.pop(self.__nodes_list.index(input_op))
node_index = self.__nodes_list.index(node)
self.__nodes_list.insert(node_index, op_temp)
#nodes_list, graph, fix_configs, fix_data
def make_op_connection(self):
for node in self.__nodes_list:
node_info = self.__get_node_info(node)
self.__op_info.append(node_info)
op_names = [node['name'] for node in self.__op_info]
'''
def index(name):
return op_names.index(name)
'''
for node in self.__nodes_list:
if node.get_input_num() > 0:
'''
input_names = [input_op.get_name() for input_op in get_input_ops_list(node)]
input_names.sort(key=index)
for name in input_names:
self.__op_edge.append([name, node.get_name()])
'''
for op in get_input_ops_list(node):
self.__op_edge.append([op.get_name(), node.get_name()])
self.__reorder_info_by_edge()
self.__reorder_cross_in_graph()
#self.__add_data_to_concat()
for key in self.__fix_configs:
for node in self.__op_info:
if node['name'] == key:
node['bn'] = self.__fix_configs[key]['bit_width']
node['fp'] = self.__fix_configs[key]['fix_point']
node['signed'] = self.__fix_configs[key]['if_signed']
def add_data_to_concat(self):
concat_nodes_list = XirParser.get_nodes_from_type(self.__op_info, 'concat')
if len(concat_nodes_list) == 0:
return
for concat_node in concat_nodes_list:
input_nodes_name = XirParser.get_input_nodes(self.__op_edge, concat_node)
for node_name in input_nodes_name:
node = XirParser.get_node_from_name(self.__op_info, node_name)
if node['type'] not in self.__DATA_NODES:
data_node = {}
data_node['name'] = node['name'] + '_data'
data_node['type'] = 'data'
if 'shape' in node.keys(): data_node['shape'] = node['shape']
if 'fp' in node.keys(): data_node['fp'] = node['fp']
if 'bn' in node.keys(): data_node['bn'] = node['bn']
if 'signed' in node.keys(): data_node['signed'] = node['signed']
if 'shift' in node.keys(): data_node['shift'] = node['shift']
XirParser.insert_node(self.__op_info, node, data_node)
concat_edge = [node['name'], concat_node['name']]
XirParser.insert_edge(self.__op_edge, concat_edge, data_node)
self.__invalid_nodes.append(data_node)
def __reorder_info_by_edge(self):
nodes_rev = self.__op_edge[::-1]
nodes_1 = []
nodes_2 = []
for node in nodes_rev:
node_temp = copy.deepcopy(node)
if node_temp[1] in nodes_2:
node_temp.pop(1)
else:
nodes_2.append(node_temp[1])
nodes_1.append(node_temp)
nodes_3 = nodes_1[::-1]
nodes_reorder = [node for nodes_4 in nodes_3 for node in nodes_4]
self.__op_info.sort(key=lambda x:nodes_reorder.index(x['name']))
# This function should rewrite later
def __reorder_cross_in_graph(self):
engine_connection_dict = {}
for edge in self.__op_edge:
edge_node0 = XirParser.get_node_from_name(self.__op_info, edge[0])
edge_node1 = XirParser.get_node_from_name(self.__op_info, edge[1])
edge_node0_engine = GLOBAL_VARIABLE.TYPE_ENGINE_DICT[edge_node0['type']]
edge_node1_engine = GLOBAL_VARIABLE.TYPE_ENGINE_DICT[edge_node1['type']]
engine_connection_name = edge_node0_engine + '2' + edge_node1_engine
if engine_connection_name not in engine_connection_dict.keys():
engine_connection_dict[engine_connection_name] = [[edge_node0, edge_node1]]
else:
engine_connection_dict[engine_connection_name].append([edge_node0, edge_node1])
for (name, edge_list) in engine_connection_dict.items():
engine0_name, engine1_name = name.split('2')
edge_len = len(edge_list)
for i in range(edge_len):
node_edge = edge_list[i]
for item in edge_list[i+1::]:
cur_edge_node0_index = self.__op_info.index(node_edge[0])
next_edge_node0_index = self.__op_info.index(item[0])
if (cur_edge_node0_index < next_edge_node0_index) and ([item[1]['name'], node_edge[1]['name']] in self.__op_edge):
temp = node_edge[0]
self.__op_info[cur_edge_node0_index] = item[0]
self.__op_info[next_edge_node0_index] = temp
elif (cur_edge_node0_index > next_edge_node0_index) and ([node_edge[1]['name'], item[1]['name']] in self.__op_edge):
temp = node_edge[0]
self.__op_info[cur_edge_node0_index] = item[0]
self.__op_info[next_edge_node0_index] = temp
info_name_list = [node['name'] for node in self.__op_info]
self.__op_edge.sort(key=lambda x:info_name_list.index(x[1]))
self.__reorder_info_by_edge()
#node, info_list, graph, fix_data
def __get_node_info(self, node):
node_type = node.get_type()
#print(self.__nodes_list)
info_name = [temp['name'] for temp in self.__op_info]
node_info = {'name':node.get_name(), 'fp':0, 'bn':16, 'signed':True}
if node_type == 'const':
#node_info['shape'] = node.get_output_tensor().dims
node_ndim = node.get_output_tensor().ndim
down_ops = get_down_ops(node, self.__graph)
if node.get_name() in self.__fix_data:
const_data = self.__fix_data[node.get_name()]
else:
#const_data = node.get_attrs()['data']
#const_data = node.get_attr('data')
const_data = const_op_data(node)
if node_ndim == 2:
if (len(down_ops) > 0) and (any(op.get_type() in GLOBAL_VARIABLE.MATMUL_OPS for op in down_ops)):
node_info['type'] = 'group'
#node_info['shape'] = node.get_output_tensor().dims
#node_info['data'] = const_data.tolist()
else:
node_info['type'] = 'data'
#node_info['shape'] = node.get_output_tensor().dims[::-1]
#node_info['data'] = const_data.transpose().tolist()
elif node_ndim == 1:
node_info['type'] = 'data'
'''
node_info['shape'] = [node.get_output_tensor().dims[0], 1]
node_info['data'] = const_data[:,np.newaxis].tolist()
'''
node_info['shape'] = node.get_output_tensor().dims
node_info['data'] = const_data.tolist()
elif node_type == 'data':
#node_info['shape'] = node.get_output_tensor().dims[::-1]
#node_info['shape'] = node.get_output_tensor().dims
node_info['shape'] = node.get_output_tensor().dims
node_info['data'] = np.zeros(node_info['shape'], dtype=np.int64).tolist()
down_ops = get_down_ops(node, self.__graph)
if (len(down_ops) > 0) and (any(op.get_type() in GLOBAL_VARIABLE.MATMUL_OPS for op in down_ops)):
node_info['type'] = 'vector'
else:
node_info['type'] = 'data'
elif node_type in GLOBAL_VARIABLE.MATMUL_OPS:
attr_dict = get_matmul_weights_vector(node)
weights_name = attr_dict['weights']
vector_name = attr_dict['vector']
if (weights_name not in info_name) or (vector_name not in info_name):
raise AttributeError('Have not this op yet')
weight_dim = XirParser.get_shape_from_name(self.__op_info, weights_name)
vector_dim = XirParser.get_shape_from_name(self.__op_info, vector_name)
if node.get_type() == 'matmul':
node_info['transpose_a'] = node.get_attr('transpose_a')
node_info['transpose_b'] = node.get_attr('transpose_b')
else:
node_info['transpose_a'] = False
node_info['transpose_b'] = True
if node_info['transpose_a']:
vector_dim = vector_dim[::-1]
if node_info['transpose_b']:
weight_dim = weight_dim[::-1]
if node.has_attr('fuse_relu'):
node_info['type'] = 'matmul_relu'
else:
node_info['type'] = 'matmul'
node_info['shape'] = [vector_dim[0], weight_dim[1]]
elif node_type in GLOBAL_VARIABLE.DIM_UNCHANGE_OPS:
any_op_name = get_any_input_op(node)
if any_op_name not in info_name:
raise AttributeError('Have not this op yet')
node_info['type'] = node.get_type()
#print('self.__op_info', self.__op_info)
#print('any_op_name', any_op_name)
node_info['shape'] = XirParser.get_shape_from_name(self.__op_info, any_op_name)
#node_info['data'] = np.zeros(node_info['shape'], dtype=np.int64).tolist()
elif node_type in GLOBAL_VARIABLE.ADD_OPS:
input_op_num = node.get_input_num()
node_info['type'] = 'eltwise' if node_type == 'add' else node_type
if node.has_attr('fuse_relu'):
node_info['type'] = node_info['type'] + '_relu'
if input_op_num > 0:
input_ops = get_input_ops_list(node)
ops_shape = []
for op in input_ops:
op_shape = XirParser.get_shape_from_name(self.__op_info, op.get_name())
ops_shape.append(op_shape)
ops_array = [np.ones(shape) for shape in ops_shape]
node_info['shape'] = reduce(lambda x,y: np.add(x,y), ops_array).shape
elif node_type == 'mul':
input_op_num = node.get_input_num()
node_info['type'] = node.get_type()
if input_op_num > 0:
input_ops = get_input_ops_list(node)
ops_shape = []
for op in input_ops:
op_shape = XirParser.get_shape_from_name(self.__op_info, op.get_name())
ops_shape.append(op_shape)
ops_array = [np.ones(shape) for shape in ops_shape]
node_info['shape'] = reduce(lambda x,y: x*y, ops_array).shape
elif node_type == 'concat':
concat_axis = node.get_attrs()['axis']
input_op_num = node.get_input_num()
node_info['type'] = node.get_type()
node_info['axis'] = concat_axis
if input_op_num > 0:
input_ops = get_input_ops_list(node)
ops_shape = []
for op in input_ops:
op_shape = XirParser.get_shape_from_name(self.__op_info, op.get_name())
ops_shape.append(op_shape)
ops_array = [np.ones(shape) for shape in ops_shape]
node_info['shape'] = reduce(lambda x,y: np.concatenate((x,y), axis=concat_axis), ops_array).shape
else:
raise KeyError('Node type is not supported')
return node_info
#node_edge, node_info_list, actv_configs
def extend_op_connection(self):
result_dict = {'name':self.__graph.get_name()+'__result', 'type':'data', 'fp':0, 'bn':16, 'signed':True}
#result_dict['shape'] = self.__op_info[-1]['shape']
result_dict['data'] = np.zeros(self.__op_info[-1]['shape'], dtype=np.int64).tolist()
result_dict['shape'] = np.array(result_dict['data']).squeeze().shape
if len(result_dict['shape'])==0: result_dict['shape'] = [1]
result_dict['data'] = np.zeros(result_dict['shape'], dtype=np.int64).tolist()
self.__op_info.append(result_dict)
last_node_name = self.__op_info[-2]['name']
self.__op_edge.append([last_node_name, result_dict['name']])
sgmd_dict = {'name':'actv_sgmd', 'type':'data', 'fp':12, 'bn':16, 'signed':False}
sgmd = np.array(GLOBAL_VARIABLE.SIGMOID_ACTV).reshape(2048)
sgmd_dict['shape'] = sgmd.shape
sgmd_dict['data'] = sgmd.tolist()
self.__op_info.append(sgmd_dict)
tanh_dict = {'name':'actv_tanh', 'type':'data', 'fp':13, 'bn':16, 'signed':True}
tanh = np.array(GLOBAL_VARIABLE.TANH_ACTV).reshape(2048)
tanh_dict['shape'] = tanh.shape
tanh_dict['data'] = tanh.tolist()
self.__op_info.append(tanh_dict)
def make_shift_attrs(self):
output_fp = self.__op_info[-4]['fp']
for info_node in self.__op_info:
if info_node['type'] in self.__DATA_NODES:
info_node['shift'] = info_node['fp']
elif info_node['type'] in self.__MULTI_NODES:
info_node['shift'] = 0
cur_op = self.__graph.get_op(info_node['name'])
input_ops = get_input_ops_list(cur_op)
for input_op in input_ops:
info_node['shift'] += XirParser.get_fp_from_name(self.__op_info, input_op.get_name())
info_node['shift'] -= output_fp
elif info_node['type'] in self.__EMUL_NODES:
info_node['shift'] = 0
cur_op = self.__graph.get_op(info_node['name'])
input_ops = get_input_ops_list(cur_op)
for input_op in input_ops:
if input_op.get_type() == 'sigmoid':
info_node['shift'] += GLOBAL_VARIABLE.SIGMOID_EXPO
elif input_op.get_type() == 'tanh':
info_node['shift'] += GLOBAL_VARIABLE.TANH_EXPO
else:
info_node['shift'] += XirParser.get_fp_from_name(self.__op_info, input_op.get_name())
info_node['shift'] -= output_fp
elif info_node['type'] in self.__ACTI_NODES:
info_node['shift'] = output_fp
elif info_node['type'] in self.__ADD_NODES:
info_node['shift'] = 0
elif info_node['type'] in self.__CONCAT_NODES:
info_node['shift'] = 0
'''
def get_cycle_ops(self):
h_prev_name = XirParser.get_op_from_name_and_type(self.__op_info, 'h_prev', 'vector')
if h_prev_name is not None:
h_next_name = XirParser.get_op_from_name_and_type(self.__op_info, 'h_next', None)
if h_next_name is not None:
self.__cycle_ops.append([h_prev_name, h_next_name])
c_prev_name = XirParser.get_op_from_name_and_type(self.__op_info, 'c_prev', 'data')
if c_prev_name is not None:
c_next_name = XirParser.get_op_from_name_and_type(self.__op_info, 'c_next', 'eltwise')
if c_next_name is not None:
self.__cycle_ops.append([c_prev_name, c_next_name])
return self.__cycle_ops
'''
def get_cycle_ops(self):
if not self._XirParser__graph.has_attr('return_ops'):
return
#return_ops = getattr(self.__graph.metadata.get_attrs(), "get_attr_vstr")("return_ops")
return_ops = self._XirParser__graph.get_attr('return_ops')
h_prev_name = XirParser.get_op_from_name_and_type(self.__op_info, 'input_1', 'vector')
if h_prev_name is not None:
h_next_name = return_ops[0][:-4] if return_ops[0].endswith('_fix') else return_ops[0]
self.__cycle_ops.append([h_prev_name, h_next_name])
c_prev_name = XirParser.get_op_from_name_and_type(self.__op_info, 'input_2', 'data')
| |
VelocityBodyYawspeed object
return \
(self.forward_m_s == to_compare.forward_m_s) and \
(self.right_m_s == to_compare.right_m_s) and \
(self.down_m_s == to_compare.down_m_s) and \
(self.yawspeed_deg_s == to_compare.yawspeed_deg_s)
except AttributeError:
return False
def __str__(self):
""" VelocityBodyYawspeed in string representation """
struct_repr = ", ".join([
"forward_m_s: " + str(self.forward_m_s),
"right_m_s: " + str(self.right_m_s),
"down_m_s: " + str(self.down_m_s),
"yawspeed_deg_s: " + str(self.yawspeed_deg_s)
])
return f"VelocityBodyYawspeed: [{struct_repr}]"
@staticmethod
def translate_from_rpc(rpcVelocityBodyYawspeed):
""" Translates a gRPC struct to the SDK equivalent """
return VelocityBodyYawspeed(
rpcVelocityBodyYawspeed.forward_m_s,
rpcVelocityBodyYawspeed.right_m_s,
rpcVelocityBodyYawspeed.down_m_s,
rpcVelocityBodyYawspeed.yawspeed_deg_s
)
def translate_to_rpc(self, rpcVelocityBodyYawspeed):
""" Translates this SDK object into its gRPC equivalent """
rpcVelocityBodyYawspeed.forward_m_s = self.forward_m_s
rpcVelocityBodyYawspeed.right_m_s = self.right_m_s
rpcVelocityBodyYawspeed.down_m_s = self.down_m_s
rpcVelocityBodyYawspeed.yawspeed_deg_s = self.yawspeed_deg_s
class VelocityNedYaw:
"""
Type for velocity commands in NED (North East Down) coordinates and yaw.
Parameters
----------
north_m_s : float
Velocity North (in metres/second)
east_m_s : float
Velocity East (in metres/second)
down_m_s : float
Velocity Down (in metres/second)
yaw_deg : float
Yaw in degrees (0 North, positive is clock-wise looking from above)
"""
def __init__(
self,
north_m_s,
east_m_s,
down_m_s,
yaw_deg):
""" Initializes the VelocityNedYaw object """
self.north_m_s = north_m_s
self.east_m_s = east_m_s
self.down_m_s = down_m_s
self.yaw_deg = yaw_deg
def __equals__(self, to_compare):
""" Checks if two VelocityNedYaw are the same """
try:
# Try to compare - this likely fails when it is compared to a non
# VelocityNedYaw object
return \
(self.north_m_s == to_compare.north_m_s) and \
(self.east_m_s == to_compare.east_m_s) and \
(self.down_m_s == to_compare.down_m_s) and \
(self.yaw_deg == to_compare.yaw_deg)
except AttributeError:
return False
def __str__(self):
""" VelocityNedYaw in string representation """
struct_repr = ", ".join([
"north_m_s: " + str(self.north_m_s),
"east_m_s: " + str(self.east_m_s),
"down_m_s: " + str(self.down_m_s),
"yaw_deg: " + str(self.yaw_deg)
])
return f"VelocityNedYaw: [{struct_repr}]"
@staticmethod
def translate_from_rpc(rpcVelocityNedYaw):
""" Translates a gRPC struct to the SDK equivalent """
return VelocityNedYaw(
rpcVelocityNedYaw.north_m_s,
rpcVelocityNedYaw.east_m_s,
rpcVelocityNedYaw.down_m_s,
rpcVelocityNedYaw.yaw_deg
)
def translate_to_rpc(self, rpcVelocityNedYaw):
""" Translates this SDK object into its gRPC equivalent """
rpcVelocityNedYaw.north_m_s = self.north_m_s
rpcVelocityNedYaw.east_m_s = self.east_m_s
rpcVelocityNedYaw.down_m_s = self.down_m_s
rpcVelocityNedYaw.yaw_deg = self.yaw_deg
class OffboardResult:
"""
Result type.
Parameters
----------
result : Result
Result enum value
result_str : std::string
Human-readable English string describing the result
"""
class Result(Enum):
"""
Possible results returned for offboard requests
Values
------
UNKNOWN
Unknown result
SUCCESS
Request succeeded
NO_SYSTEM
No system is connected
CONNECTION_ERROR
Connection error
BUSY
Vehicle is busy
COMMAND_DENIED
Command denied
TIMEOUT
Request timed out
NO_SETPOINT_SET
Cannot start without setpoint set
"""
UNKNOWN = 0
SUCCESS = 1
NO_SYSTEM = 2
CONNECTION_ERROR = 3
BUSY = 4
COMMAND_DENIED = 5
TIMEOUT = 6
NO_SETPOINT_SET = 7
def translate_to_rpc(self):
if self == OffboardResult.Result.UNKNOWN:
return offboard_pb2.OffboardResult.RESULT_UNKNOWN
if self == OffboardResult.Result.SUCCESS:
return offboard_pb2.OffboardResult.RESULT_SUCCESS
if self == OffboardResult.Result.NO_SYSTEM:
return offboard_pb2.OffboardResult.RESULT_NO_SYSTEM
if self == OffboardResult.Result.CONNECTION_ERROR:
return offboard_pb2.OffboardResult.RESULT_CONNECTION_ERROR
if self == OffboardResult.Result.BUSY:
return offboard_pb2.OffboardResult.RESULT_BUSY
if self == OffboardResult.Result.COMMAND_DENIED:
return offboard_pb2.OffboardResult.RESULT_COMMAND_DENIED
if self == OffboardResult.Result.TIMEOUT:
return offboard_pb2.OffboardResult.RESULT_TIMEOUT
if self == OffboardResult.Result.NO_SETPOINT_SET:
return offboard_pb2.OffboardResult.RESULT_NO_SETPOINT_SET
@staticmethod
def translate_from_rpc(rpc_enum_value):
""" Parses a gRPC response """
if rpc_enum_value == offboard_pb2.OffboardResult.RESULT_UNKNOWN:
return OffboardResult.Result.UNKNOWN
if rpc_enum_value == offboard_pb2.OffboardResult.RESULT_SUCCESS:
return OffboardResult.Result.SUCCESS
if rpc_enum_value == offboard_pb2.OffboardResult.RESULT_NO_SYSTEM:
return OffboardResult.Result.NO_SYSTEM
if rpc_enum_value == offboard_pb2.OffboardResult.RESULT_CONNECTION_ERROR:
return OffboardResult.Result.CONNECTION_ERROR
if rpc_enum_value == offboard_pb2.OffboardResult.RESULT_BUSY:
return OffboardResult.Result.BUSY
if rpc_enum_value == offboard_pb2.OffboardResult.RESULT_COMMAND_DENIED:
return OffboardResult.Result.COMMAND_DENIED
if rpc_enum_value == offboard_pb2.OffboardResult.RESULT_TIMEOUT:
return OffboardResult.Result.TIMEOUT
if rpc_enum_value == offboard_pb2.OffboardResult.RESULT_NO_SETPOINT_SET:
return OffboardResult.Result.NO_SETPOINT_SET
def __str__(self):
return self.name
def __init__(
self,
result,
result_str):
""" Initializes the OffboardResult object """
self.result = result
self.result_str = result_str
def __equals__(self, to_compare):
""" Checks if two OffboardResult are the same """
try:
# Try to compare - this likely fails when it is compared to a non
# OffboardResult object
return \
(self.result == to_compare.result) and \
(self.result_str == to_compare.result_str)
except AttributeError:
return False
def __str__(self):
""" OffboardResult in string representation """
struct_repr = ", ".join([
"result: " + str(self.result),
"result_str: " + str(self.result_str)
])
return f"OffboardResult: [{struct_repr}]"
@staticmethod
def translate_from_rpc(rpcOffboardResult):
""" Translates a gRPC struct to the SDK equivalent """
return OffboardResult(
OffboardResult.Result.translate_from_rpc(rpcOffboardResult.result),
rpcOffboardResult.result_str
)
def translate_to_rpc(self, rpcOffboardResult):
""" Translates this SDK object into its gRPC equivalent """
rpcOffboardResult.result = self.result.translate_to_rpc()
rpcOffboardResult.result_str = self.result_str
class OffboardError(Exception):
""" Raised when a OffboardResult is a fail code """
def __init__(self, result, origin, *params):
self._result = result
self._origin = origin
self._params = params
def __str__(self):
return f"{self._result.result}: '{self._result.result_str}'; origin: {self._origin}; params: {self._params}"
class Offboard(AsyncBase):
"""
*
Control a drone with position, velocity, attitude or motor commands.
The module is called offboard because the commands can be sent from external sources
as opposed to onboard control right inside the autopilot "board".
Client code must specify a setpoint before starting offboard mode.
Mavsdk automatically sends setpoints at 20Hz (PX4 Offboard mode requires that setpoints
are minimally sent at 2Hz).
Generated by dcsdkgen - MAVSDK Offboard API
"""
# Plugin name
name = "Offboard"
def _setup_stub(self, channel):
""" Setups the api stub """
self._stub = offboard_pb2_grpc.OffboardServiceStub(channel)
def _extract_result(self, response):
""" Returns the response status and description """
return OffboardResult.translate_from_rpc(response.offboard_result)
async def start(self):
"""
Start offboard control.
Raises
------
OffboardError
If the request fails. The error contains the reason for the failure.
"""
request = offboard_pb2.StartRequest()
response = await self._stub.Start(request)
result = self._extract_result(response)
if result.result is not OffboardResult.Result.SUCCESS:
raise OffboardError(result, "start()")
async def stop(self):
"""
Stop offboard control.
The vehicle will be put into Hold mode: https://docs.px4.io/en/flight_modes/hold.html
Raises
------
OffboardError
If the request fails. The error contains the reason for the failure.
"""
request = offboard_pb2.StopRequest()
response = await self._stub.Stop(request)
result = self._extract_result(response)
if result.result is not OffboardResult.Result.SUCCESS:
raise OffboardError(result, "stop()")
async def is_active(self):
"""
Check if offboard control is active.
True means that the vehicle is in offboard mode and we are actively sending
setpoints.
Returns
-------
is_active : bool
True if offboard is active
"""
request = offboard_pb2.IsActiveRequest()
response = await self._stub.IsActive(request)
return response.is_active
async def set_attitude(self, attitude):
"""
Set the attitude in terms of roll, pitch and yaw in degrees with thrust.
Parameters
----------
attitude : Attitude
Attitude roll, pitch and yaw along with thrust
Raises
------
OffboardError
If the request fails. The error contains the reason for the failure.
"""
request = offboard_pb2.SetAttitudeRequest()
attitude.translate_to_rpc(request.attitude)
response = await self._stub.SetAttitude(request)
result = self._extract_result(response)
if result.result is not OffboardResult.Result.SUCCESS:
raise OffboardError(result, "set_attitude()", attitude)
async def set_actuator_control(self, actuator_control):
"""
Set direct actuator control values to groups #0 and #1.
First 8 controls will go to control group 0, the following 8 controls to control group 1 (if
actuator_control.num_controls more than 8).
Parameters
----------
actuator_control : ActuatorControl
Actuator control values
Raises
------
OffboardError
If the request fails. The error contains the reason for the failure.
"""
request = offboard_pb2.SetActuatorControlRequest()
actuator_control.translate_to_rpc(request.actuator_control)
response = await self._stub.SetActuatorControl(request)
result = self._extract_result(response)
if result.result is not OffboardResult.Result.SUCCESS:
raise OffboardError(result, "set_actuator_control()", actuator_control)
async def set_attitude_rate(self, attitude_rate):
"""
Set the attitude rate in terms of pitch, roll and yaw angular rate along with thrust.
Parameters
----------
attitude_rate : AttitudeRate
Attitude rate roll, pitch and yaw angular rate along with thrust
Raises
------
OffboardError
If the request fails. The error contains the reason for the failure.
"""
request = offboard_pb2.SetAttitudeRateRequest()
attitude_rate.translate_to_rpc(request.attitude_rate)
response = await self._stub.SetAttitudeRate(request)
result = self._extract_result(response)
if result.result is not OffboardResult.Result.SUCCESS:
raise OffboardError(result, "set_attitude_rate()", attitude_rate)
async def set_position_ned(self, position_ned_yaw):
"""
Set the position in NED coordinates and yaw.
Parameters
----------
position_ned_yaw : PositionNedYaw
Position and yaw
Raises
------
OffboardError
If the request fails. The error contains the reason for the failure.
"""
request = offboard_pb2.SetPositionNedRequest()
position_ned_yaw.translate_to_rpc(request.position_ned_yaw)
response = await self._stub.SetPositionNed(request)
result = | |
BotInfo entity exists and has Machine Provider-related fields.
Args:
machine_lease: MachineLease instance.
"""
if machine_lease.bot_id == machine_lease.hostname:
return
bot_info = bot_management.get_info_key(machine_lease.hostname).get()
if not (
bot_info
and bot_info.lease_id
and bot_info.lease_expiration_ts
and bot_info.machine_type
):
logging.info(
'Creating BotEvent\nKey: %s\nHostname: %s\nBotInfo: %s',
machine_lease.key,
machine_lease.hostname,
bot_info,
)
bot_management.bot_event(
event_type='bot_leased',
bot_id=machine_lease.hostname,
external_ip=None,
authenticated_as=None,
dimensions=None,
state=None,
version=None,
quarantined=False,
task_id='',
task_name=None,
lease_id=machine_lease.lease_id,
lease_expiration_ts=machine_lease.lease_expiration_ts,
machine_type=machine_lease.machine_type.id(),
)
# Occasionally bot_management.bot_event fails to store the BotInfo so
# verify presence of Machine Provider fields. See https://crbug.com/681224.
bot_info = bot_management.get_info_key(machine_lease.hostname).get()
if not (
bot_info
and bot_info.lease_id
and bot_info.lease_expiration_ts
and bot_info.machine_type
):
# If associate_bot_id isn't called, cron will try again later.
logging.error(
'Failed to put BotInfo\nKey: %s\nHostname: %s\nBotInfo: %s',
machine_lease.key,
machine_lease.hostname,
bot_info,
)
return
logging.info(
'Put BotInfo\nKey: %s\nHostname: %s\nBotInfo: %s',
machine_lease.key,
machine_lease.hostname,
bot_info,
)
associate_bot_id(machine_lease.key, machine_lease.hostname)
@ndb.transactional
def associate_instruction_ts(key, instruction_ts):
"""Associates an instruction time with the given machine lease.
Args:
key: ndb.Key for a MachineLease entity.
instruction_ts: DateTime indicating when the leased machine was instructed.
"""
machine_lease = key.get()
if not machine_lease:
logging.error('MachineLease does not exist\nKey: %s', key)
return
if machine_lease.instruction_ts:
return
machine_lease.instruction_ts = instruction_ts
machine_lease.put()
def send_connection_instruction(machine_lease):
"""Sends an instruction to the given machine to connect to the server.
Args:
machine_lease: MachineLease instance.
"""
now = utils.utcnow()
response = machine_provider.instruct_machine(
machine_lease.client_request_id,
'https://%s' % app_identity.get_default_version_hostname(),
)
if not response:
logging.error(
'MachineLease instruction got empty response:\nKey: %s\nHostname: %s',
machine_lease.key,
machine_lease.hostname,
)
elif not response.get('error'):
associate_instruction_ts(machine_lease.key, now)
elif response['error'] == 'ALREADY_RECLAIMED':
# Can happen if lease duration is very short or there is a significant delay
# in creating the BotInfo or instructing the machine. Consider it an error.
logging.error(
'MachineLease expired before machine connected:\nKey: %s\nHostname: %s',
machine_lease.key,
machine_lease.hostname,
)
clear_lease_request(machine_lease.key, machine_lease.client_request_id)
else:
logging.warning(
'MachineLease instruction error:\nKey: %s\nHostname: %s\nError: %s',
machine_lease.key,
machine_lease.hostname,
response['error'],
)
@ndb.transactional
def associate_connection_ts(key, connection_ts):
"""Associates a connection time with the given machine lease.
Args:
key: ndb.Key for a MachineLease entity.
connection_ts: DateTime indicating when the bot first connected.
"""
machine_lease = key.get()
if not machine_lease:
logging.error('MachineLease does not exist\nKey: %s', key)
return
if machine_lease.connection_ts:
return
machine_lease.connection_ts = connection_ts
machine_lease.put()
def check_for_connection(machine_lease):
"""Checks for a bot_connected event.
Args:
machine_lease: MachineLease instance.
"""
assert machine_lease.instruction_ts
# Technically this query is wrong because it looks at events in reverse
# chronological order. The connection time we find here is actually the
# most recent connection when we want the earliest. However, this function
# is only called for new bots and stops being called once the connection
# time is recorded, so the connection time we record should end up being the
# first connection anyways. Iterating in the correct order would require
# building a new, large index.
for event in bot_management.get_events_query(machine_lease.bot_id, True):
# We don't want to find a bot_connected event from before we sent the
# connection instruction (e.g. in the event of hostname reuse), so do not
# look at events from before the connection instruction was sent.
if event.ts < machine_lease.instruction_ts:
break
if event.event_type == 'bot_connected':
logging.info(
'Bot connected:\nKey: %s\nHostname: %s\nTime: %s',
machine_lease.key,
machine_lease.hostname,
event.ts,
)
associate_connection_ts(machine_lease.key, event.ts)
ts_mon_metrics.machine_types_connection_time.add(
(event.ts - machine_lease.instruction_ts).total_seconds(),
fields={
'machine_type': machine_lease.machine_type.id(),
},
)
return
# The bot hasn't connected yet. If it's dead or missing, release the lease.
# At this point we have sent the connection instruction so the bot could still
# connect after we release the lease but before Machine Provider actually
# deletes the bot. Therefore we also schedule a termination task if releasing
# the bot. That way, if the bot connects, it will just shut itself down.
bot_info = bot_management.get_info_key(machine_lease.hostname).get()
if not bot_info:
logging.error(
'BotInfo missing:\nKey: %s\nHostname: %s',
machine_lease.key,
machine_lease.hostname,
)
task_scheduler.schedule_request(
task_request.create_termination_task(machine_lease.hostname, True),
None,
check_acls=False,
)
if release(machine_lease):
clear_lease_request(machine_lease.key, machine_lease.client_request_id)
return
if bot_info.is_dead(utils.utcnow()):
logging.warning(
'Bot failed to connect in time:\nKey: %s\nHostname: %s',
machine_lease.key,
machine_lease.hostname,
)
task_scheduler.schedule_request(
task_request.create_termination_task(machine_lease.hostname, True),
None,
check_acls=False,
)
if release(machine_lease):
cleanup_bot(machine_lease)
def cleanup_bot(machine_lease):
"""Cleans up entities after a bot is removed."""
task_queues.cleanup_after_bot(machine_lease.hostname)
bot_management.get_info_key(machine_lease.hostname).delete()
clear_lease_request(machine_lease.key, machine_lease.client_request_id)
def last_shutdown_ts(hostname):
"""Returns the time the given bot posted a final bot_shutdown event.
The bot_shutdown event is only considered if it is the last recorded event.
Args:
hostname: Hostname of the machine.
Returns:
datetime.datetime or None if the last recorded event is not bot_shutdown.
"""
bot_event = bot_management.get_events_query(hostname, True).get()
if bot_event and bot_event.event_type == 'bot_shutdown':
return bot_event.ts
def release(machine_lease):
"""Releases the given lease.
Args:
machine_lease: MachineLease instance.
Returns:
True if the lease was released, False otherwise.
"""
response = machine_provider.release_machine(machine_lease.client_request_id)
if response.get('error'):
error = machine_provider.LeaseReleaseRequestError.lookup_by_name(
response['error'])
if error not in (
machine_provider.LeaseReleaseRequestError.ALREADY_RECLAIMED,
machine_provider.LeaseReleaseRequestError.NOT_FOUND,
):
logging.error(
'Lease release failed\nKey: %s\nRequest ID: %s\nError: %s',
machine_lease.key,
response['client_request_id'],
response['error'],
)
return False
logging.info(
'MachineLease released:\nKey%s\nHostname: %s',
machine_lease.key,
machine_lease.hostname,
)
return True
def handle_termination_task(machine_lease):
"""Checks the state of the termination task, releasing the lease if completed.
Args:
machine_lease: MachineLease instance.
"""
assert machine_lease.termination_task
task_result_summary = task_pack.unpack_result_summary_key(
machine_lease.termination_task).get()
if task_result_summary.state in task_result.State.STATES_EXCEPTIONAL:
logging.info(
'Termination failed:\nKey: %s\nHostname: %s\nTask ID: %s\nState: %s',
machine_lease.key,
machine_lease.hostname,
machine_lease.termination_task,
task_result.State.to_string(task_result_summary.state),
)
clear_termination_task(machine_lease.key, machine_lease.termination_task)
return
if task_result_summary.state == task_result.State.COMPLETED:
# There is a race condition where the bot reports the termination task as
# completed but hasn't exited yet. The last thing it does before exiting
# is post a bot_shutdown event. Check for the presence of a bot_shutdown
# event which occurred after the termination task was completed.
shutdown_ts = last_shutdown_ts(machine_lease.bot_id)
if not shutdown_ts or shutdown_ts < task_result_summary.completed_ts:
logging.info(
'Machine terminated but not yet shut down:\nKey: %s\nHostname: %s',
machine_lease.key,
machine_lease.hostname,
)
return
if release(machine_lease):
cleanup_bot(machine_lease)
def handle_early_release(machine_lease):
"""Handles the early release of a leased machine.
Args:
machine_lease: MachineLease instance.
"""
assert not machine_lease.termination_task, machine_lease.termination_task
early_expiration_ts = machine_lease.lease_expiration_ts - datetime.timedelta(
seconds=machine_lease.early_release_secs)
if machine_lease.drained or early_expiration_ts <= utils.utcnow():
logging.info(
'MachineLease ready to be released:\nKey: %s\nHostname: %s',
machine_lease.key,
machine_lease.hostname,
)
task_result_summary = task_scheduler.schedule_request(
task_request.create_termination_task(machine_lease.hostname, True),
None,
check_acls=False,
)
associate_termination_task(
machine_lease.key, machine_lease.hostname, task_result_summary.task_id)
def manage_leased_machine(machine_lease):
"""Manages a leased machine.
Args:
machine_lease: MachineLease instance with client_request_id, hostname,
lease_expiration_ts set.
"""
assert machine_lease.client_request_id, machine_lease.key
assert machine_lease.hostname, machine_lease.key
assert machine_lease.lease_expiration_ts, machine_lease.key
# Handle a newly leased machine.
if not machine_lease.bot_id:
ensure_bot_info_exists(machine_lease)
# Once BotInfo is created, send the instruction to join the server.
if not machine_lease.instruction_ts:
send_connection_instruction(machine_lease)
return
# Once the instruction is sent, check for connection.
if not machine_lease.connection_ts:
check_for_connection(machine_lease)
# Handle an expired lease.
if machine_lease.lease_expiration_ts <= utils.utcnow():
logging.info(
'MachineLease expired:\nKey: %s\nHostname: %s',
machine_lease.key,
machine_lease.hostname,
)
cleanup_bot(machine_lease)
return
# Handle an active lease with a termination task scheduled.
# TODO(smut): Check if the bot got terminated by some other termination task.
if machine_lease.termination_task:
logging.info(
'MachineLease pending termination:\nKey: %s\nHostname: %s\nTask ID: %s',
machine_lease.key,
machine_lease.hostname,
machine_lease.termination_task,
)
handle_termination_task(machine_lease)
return
# Handle a lease ready for early release.
if machine_lease.early_release_secs or machine_lease.drained:
handle_early_release(machine_lease)
return
def handle_lease_request_error(machine_lease, response):
"""Handles an error in the lease request response from Machine Provider.
Args:
machine_lease: MachineLease instance.
response: Response returned by components.machine_provider.lease_machine.
"""
error = machine_provider.LeaseRequestError.lookup_by_name(response['error'])
if error in (
machine_provider.LeaseRequestError.DEADLINE_EXCEEDED,
machine_provider.LeaseRequestError.TRANSIENT_ERROR,
):
logging.warning(
'Transient failure: %s\nRequest ID: %s\nError: %s',
machine_lease.key,
response['client_request_id'],
response['error'],
)
else:
logging.error(
'Lease request failed\nKey: %s\nRequest ID: %s\nError: %s',
machine_lease.key,
response['client_request_id'],
response['error'],
)
clear_lease_request(machine_lease.key, machine_lease.client_request_id)
def handle_lease_request_response(machine_lease, response):
"""Handles a successful lease request response from Machine Provider.
Args:
machine_lease: MachineLease instance.
response: Response returned by components.machine_provider.lease_machine.
"""
assert not response.get('error')
state = machine_provider.LeaseRequestState.lookup_by_name(response['state'])
if state == machine_provider.LeaseRequestState.FULFILLED:
if not response.get('hostname'):
# Lease has already expired. This shouldn't happen, but it indicates the
# lease expired faster than we could tell it even got fulfilled.
logging.error(
'Request expired\nKey: %s\nRequest ID:%s\nExpired: %s',
machine_lease.key,
machine_lease.client_request_id,
response['lease_expiration_ts'],
)
clear_lease_request(machine_lease.key, machine_lease.client_request_id)
else:
logging.info(
'Request fulfilled: %s\nRequest ID: %s\nHostname: %s\nExpires: %s',
machine_lease.key,
machine_lease.client_request_id,
response['hostname'],
response['lease_expiration_ts'],
)
log_lease_fulfillment(
machine_lease.key,
machine_lease.client_request_id,
response['hostname'],
int(response['lease_expiration_ts']),
response['request_hash'],
)
elif state == machine_provider.LeaseRequestState.DENIED:
logging.warning(
'Request denied: %s\nRequest ID: %s',
machine_lease.key,
machine_lease.client_request_id,
)
clear_lease_request(machine_lease.key, machine_lease.client_request_id)
def manage_pending_lease_request(machine_lease):
"""Manages a pending lease request.
Args:
machine_lease: MachineLease instance with client_request_id set.
"""
assert machine_lease.client_request_id, machine_lease.key
logging.info(
'Sending lease request: %s\nRequest ID: %s',
machine_lease.key,
machine_lease.client_request_id,
)
response = machine_provider.lease_machine(
machine_provider.LeaseRequest(
dimensions=machine_lease.mp_dimensions,
# TODO(smut): Vary duration so machines don't expire all at once.
duration=machine_lease.lease_duration_secs,
request_id=machine_lease.client_request_id,
),
)
if response.get('error'):
handle_lease_request_error(machine_lease, response)
return
handle_lease_request_response(machine_lease, response)
def manage_lease(key):
"""Manages a MachineLease.
Args:
key: ndb.Key for | |
'physical-router']:
return True
kvps = self.bindings.get('key_value_pair') or []
kvp_dict = dict((kvp['key'], kvp['value']) for kvp in kvps)
vnic_type = kvp_dict.get('vnic_type') or ''
if vnic_type == 'baremetal':
return True
return False
# end
def is_last_vpg_vmi(self):
if self.virtual_port_group:
vpg_obj = VirtualPortGroupDM.get(self.virtual_port_group)
if vpg_obj:
vmi_list = vpg_obj.virtual_machine_interfaces
if not vmi_list:
return True
if len(vmi_list) < 2:
for vmi_id in vmi_list:
if vmi_id == self.uuid:
return True
return False
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
if obj.vpg_name:
if obj.is_last_vpg_vmi():
obj.delete_job_trans(
name=obj.vpg_name, obj_descr="Virtual Port Group",
old_pr_list=obj.pr_list)
else:
obj.update_job_trans(
name=obj.vpg_name, obj_descr="Virtual Port Group",
old_pr_list=obj.pr_list)
obj.update_multiple_refs('logical_interface', {})
obj.update_multiple_refs('interface_route_table', {})
obj.update_single_ref('virtual_network', {})
obj.update_single_ref('floating_ip', {})
obj.update_single_ref('instance_ip', {})
obj.update_single_ref('physical_interface', {})
obj.update_multiple_refs('routing_instance', {})
obj.update_multiple_refs('security_group', {})
obj.update_multiple_refs('port_profile', {})
obj.update_single_ref('port_tuple', {})
obj.update_single_ref('service_endpoint', {})
obj.update_single_ref('virtual_port_group', {})
del cls._dict[uuid]
# end delete
# end VirtualMachineInterfaceDM
class LogicalRouterDM(DBBaseDM):
_dict = {}
obj_type = 'logical_router'
def __init__(self, uuid, obj_dict=None):
"""Logical Router Object"""
self.uuid = uuid
self.physical_routers = set()
self.fabric = None
self.data_center_interconnects = set()
self.lr_route_target_for_dci = None
self.virtual_machine_interfaces = set()
self.dhcp_relay_servers = set()
# internal virtual-network
self.virtual_network = None
self.is_master = False
self.loopback_pr_ip_map = {}
self.loopback_vn_uuid = None
self.port_tuples = set()
self.logical_router_gateway_external = False
self.configured_route_targets = set()
self.update(obj_dict)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.fq_name = obj['fq_name']
self.name = DMUtils.sanitize_name(self.fq_name[-1])
if self.do_update_trans(obj):
self.update_job_trans(
old_pr_list=self.physical_routers,
new_pr_refs=obj.get('physical_router_refs'))
if not self.virtual_network:
vn_name = DMUtils.get_lr_internal_vn_name(self.uuid)
vn_obj = VirtualNetworkDM.find_by_name_or_uuid(vn_name)
if vn_obj:
self.virtual_network = vn_obj.uuid
vn_obj.logical_router = self.uuid
self.logical_router_gateway_external = obj.get(
"logical_router_gateway_external")
if obj.get('logical_router_dhcp_relay_server', None):
self.dhcp_relay_servers = obj.get(
'logical_router_dhcp_relay_server').get('ip_address')
self.configured_route_targets = set(obj.get(
'configured_route_target_list', {}).get('route_target', []))
self.update_single_ref('fabric', obj)
self.update_multiple_refs('physical_router', obj)
self.update_multiple_refs('data_center_interconnect', obj)
self.update_multiple_refs('virtual_machine_interface', obj)
self.update_multiple_refs('port_tuple', obj)
self.is_master = self.check_if_default_master_lr()
for rt_ref in obj.get('route_target_refs', []):
for rt in rt_ref.get('to', []):
if rt.lower().startswith('target:'):
self.lr_route_target_for_dci = rt
break
if self.lr_route_target_for_dci is not None:
break
# end update
def check_if_default_master_lr(self):
if self.fabric is not None:
fabric_obj = FabricDM.get(self.fabric)
# form default fab-master-LR fqname here
# so that master-LRs from non default
# project are not wrongly checked as
# master-LRs.
def_fab_master_lr_fqname = [
"default-domain",
"default-project",
fabric_obj.fq_name[-1] + "-master-LR"
]
return self.fq_name == def_fab_master_lr_fqname
return False
def do_update_trans(self, obj):
if self.get_oper() == 'Create':
return True
# Check if dhcp relay servers are different
dhcp_ips = set(obj.get('logical_router_dhcp_relay_server', {}).
get('ip_address', []))
old_dhcp_ips = set(self.dhcp_relay_servers)
if dhcp_ips ^ old_dhcp_ips:
return True
# Check if external gateway flag is different
if obj.get("logical_router_gateway_external") != \
self.logical_router_gateway_external:
return True
# Check if configured route targets are different
configured_route_targets = set(obj.get(
'configured_route_target_list', {}).get('route_target', []))
if self.configured_route_targets ^ configured_route_targets:
return True
# Check if physical routers are different
physical_routers = set([pr['uuid'] for
pr in obj.get('physical_router_refs', [])])
if self.physical_routers ^ physical_routers:
return True
# Check if VMIs are different
vmis = set([vmi['uuid'] for
vmi in obj.get('virtual_machine_interface_refs', [])])
if self.virtual_machine_interfaces ^ vmis:
return True
return False
def get_internal_vn_name(self):
return '__contrail_' + self.uuid + '_lr_internal_vn__'
# end get_internal_vn_name
def is_pruuid_in_routed_vn(self, pr_uuid, vn):
if pr_uuid:
for route_param in vn.routed_properties or []:
if pr_uuid == route_param.get('physical_router_uuid'):
return True
return False
def _create_pr_loopback_ip_map(self, pr_uuid, vn):
for route_param in vn.routed_properties or []:
if self.uuid == route_param.get('logical_router_uuid', None) and\
pr_uuid == route_param.get('physical_router_uuid', None):
self.loopback_pr_ip_map[pr_uuid] = route_param.\
get('loopback_ip_address', None)
def get_connected_networks(self, include_internal=True, pr_uuid=None):
vn_list = []
if include_internal and self.virtual_network:
vn_list.append(self.virtual_network)
for vmi_uuid in self.virtual_machine_interfaces or []:
vmi = VirtualMachineInterfaceDM.get(vmi_uuid)
if vmi and vmi.virtual_network:
vn_obj = VirtualNetworkDM.get(vmi.virtual_network)
if vn_obj:
if vn_obj.virtual_network_category == 'routed':
if vn_obj.is_loopback_vn is True:
self._create_pr_loopback_ip_map(pr_uuid,
vn_obj)
self.loopback_vn_uuid = vn_obj.uuid
continue
if self.is_pruuid_in_routed_vn(pr_uuid,
vn_obj) is False:
continue
vn_list.append(vmi.virtual_network)
return vn_list
# end get_connected_networks
def get_protocols_connected_routedvn(self, pr_uuid, vn_list):
static_routes, bgp = False, False
for vmi_uuid in self.virtual_machine_interfaces or []:
vmi = VirtualMachineInterfaceDM.get(vmi_uuid)
if vmi and vmi.virtual_network:
if len(vn_list) > 0 and vmi.virtual_network not in vn_list:
continue
vm_obj = VirtualNetworkDM.get(vmi.virtual_network)
if vm_obj and vm_obj.virtual_network_category == 'routed':
for route_param in vm_obj.routed_properties or []:
if pr_uuid != route_param.get('physical_router_uuid'):
continue
if route_param.get('routing_protocol') == 'bgp':
bgp = True
elif route_param.get('routing_protocol')\
== 'static-routes':
static_routes = True
if bgp is True and static_routes is True:
return static_routes, bgp
return static_routes, bgp
# end get_connected_networks
def get_interfabric_dci(self):
for dci_uuid in self.data_center_interconnects:
dci = DataCenterInterconnectDM.get(dci_uuid)
if dci and dci.is_this_inter_fabric():
return dci
return None
# end get_interfabric_dci
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
obj.delete_job_trans(old_pr_list=obj.physical_routers)
obj.update_multiple_refs('physical_router', {})
obj.update_multiple_refs('virtual_machine_interface', {})
obj.update_multiple_refs('port_tuple', {})
obj.update_multiple_refs('data_center_interconnect', {})
obj.update_single_ref('virtual_network', None)
obj.update_single_ref('fabric', {})
del cls._dict[uuid]
# end delete
# end LogicalRouterDM
class NetworkIpamDM(DBBaseDM):
_dict = {}
obj_type = 'network_ipam'
def __init__(self, uuid, obj_dict=None):
"""Network Ipam Object"""
self.uuid = uuid
self.name = None
self.ipam_subnets = set()
self.ipam_method = None
self.server_discovery_params = None
self.virtual_networks = set()
self.update(obj_dict)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.fq_name = obj['fq_name']
self.name = self.fq_name[-1]
self.ipam_method = obj.get('ipam_subnet_method')
self.ipam_subnets = obj.get('ipam_subnets')
if self.ipam_subnets:
self.server_discovery_params = \
DMUtils.get_server_discovery_parameters(self.ipam_subnets.get(
'subnets', []))
self.update_multiple_refs('virtual_network', obj)
# end update
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
obj.update_multiple_refs('virtual_network', {})
del cls._dict[uuid]
# end delete
# end NetworkIpamDM
class IntentMapDM(DBBaseDM):
_dict = {}
obj_type = 'intent_map'
def __init__(self, uuid, obj_dict=None):
"""Intent Map Object"""
self.uuid = uuid
self.name = None
self.physical_routers = set()
self.virtual_networks = set()
self.fabrics = set()
self.intent_type = None
self.update(obj_dict)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.update_multiple_refs('physical_router', obj)
self.update_multiple_refs('virtual_network', obj)
self.update_multiple_refs('fabric', obj)
self.fq_name = obj['fq_name']
self.name = self.fq_name[-1]
self.intent_type = obj.get('intent_map_intent_type')
# end update
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
obj.update_multiple_refs('physical_router', {})
obj.update_multiple_refs('virtual_network', {})
obj.update_multiple_refs('fabric', {})
del cls._dict[uuid]
# end delete
class VirtualNetworkDM(DBBaseDM):
_dict = {}
obj_type = 'virtual_network'
def __init__(self, uuid, obj_dict=None):
"""Virtual Network Object"""
self.uuid = uuid
self.name = None
self.physical_routers = set()
self.tags = set()
self.network_ipams = set()
self.data_center_interconnects = set()
self.logical_router = None
self.router_external = False
self.forwarding_mode = None
self.gateways = None
self.floating_ip_pools = set()
self.instance_ip_map = {}
self.route_targets = None
self.has_ipv6_subnet = False
self.ipv6_ll_vn_id = None
self.virtual_network_category = None
self.routed_properties = None
self.intent_maps = set()
self.is_loopback_vn = False
self.update(obj_dict)
# end __init__
def get_route_targets(self):
export_set, import_set = None, None
vn_obj = self
if vn_obj.routing_instances:
for ri_id in vn_obj.routing_instances:
ri_obj = RoutingInstanceDM.get(ri_id)
if ri_obj is None:
continue
if ri_obj.fq_name[-1] == vn_obj.fq_name[-1]:
if vn_obj.route_targets:
export_set = (vn_obj.route_targets &
ri_obj.export_targets)
import_set = (vn_obj.route_targets &
ri_obj.import_targets)
else:
export_set = copy.copy(ri_obj.export_targets)
import_set = copy.copy(ri_obj.import_targets)
break
return export_set, import_set
# end get_route_targets
def set_logical_router(self, name):
lr_uuid = None
if DMUtils.get_lr_internal_vn_prefix() in name:
lr_uuid = DMUtils.extract_lr_uuid_from_internal_vn_name(name)
else:
# for overlay VN (non-contrail-vn) set LR through VMI_back_refs
for vmi_uuid in self.virtual_machine_interfaces:
vmi = VirtualMachineInterfaceDM.get(vmi_uuid)
if vmi is None or vmi.is_device_owner_bms() is True:
continue
if vmi.logical_router:
lr_uuid = vmi.logical_router
break
if lr_uuid is None:
return
lr_obj = LogicalRouterDM.get(lr_uuid)
if lr_obj:
self.logical_router = lr_obj.uuid
self.router_external = lr_obj.logical_router_gateway_external
if DMUtils.get_lr_internal_vn_prefix() in name:
lr_obj.virtual_network = self.uuid
# end set_logical_router
# set_ipv6_ll_data
# store ipv6 link local internal VN uuid in database for later use. As
# this VN is internally created by DM there is no way to get reference.
def set_ipv6_ll_data(self, ipam_refs=[]):
db_data = {"vn_uuid": self.uuid}
self._object_db.add_ipv6_ll_subnet(self.name, db_data)
# end _set_ipv6_ll_data
# read_ipv6_object
# Function reads object from api library. DM cannont control the
# sequence in which it receives VirtualNetworkDM update. It might happen
# user-defined object update is received(which needs ipv6 ll VN info)
# first then internal ipv6 ll VN.
def read_ipv6_object(self):
nw_fq_name = ['default-domain', 'default-project',
'_internal_vn_ipv6_link_local']
try:
net_obj = self._manager._vnc_lib.virtual_network_read(
fq_name=nw_fq_name)
except Exception as e:
self._logger.error("virtual network '%s' does not exist %s"
% (nw_fq_name[-1], str(e)))
return None, None
obj = self._manager._vnc_lib.obj_to_dict(net_obj)
(gateways, has_ipv6_subnet) = \
DMUtils.get_network_gateways(obj.get('network_ipam_refs', []))
return gateways, net_obj.get_uuid()
def get_routed_properties(self, obj):
vn_routed_props = obj.get('virtual_network_routed_properties', None)
if vn_routed_props:
self.routed_properties = vn_routed_props['routed_properties']
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.virtual_machine_interfaces = set(
[vmi['uuid'] for vmi in
obj.get('virtual_machine_interface_back_refs', [])])
self.set_logical_router(obj.get("fq_name")[-1])
self.update_multiple_refs('physical_router', obj)
self.update_multiple_refs('tag', obj)
self.update_multiple_refs('network_ipam', obj)
self.update_multiple_refs('data_center_interconnect', obj)
self.set_children('floating_ip_pool', obj)
self.fq_name = obj['fq_name']
self.name = self.fq_name[-1]
if not self.logical_router:
self.router_external = obj.get('router_external', False)
self.vn_network_id = obj.get('virtual_network_network_id')
self.virtual_network_properties = obj.get('virtual_network_properties')
self.set_forwarding_mode(obj)
self.routing_instances = set([ri['uuid'] for ri in
obj.get('routing_instances', [])])
(self.gateways, self.has_ipv6_subnet) = \
DMUtils.get_network_gateways(obj.get('network_ipam_refs', []))
self.virtual_network_category = obj.get('virtual_network_category')
if self.virtual_network_category == 'routed':
if "overlay-loopback" in self.name:
| |
Leakage': 0.00611897,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00348781,
'Renaming Unit/Peak Dynamic': 4.56169,
'Renaming Unit/Runtime Dynamic': 0.299797,
'Renaming Unit/Subthreshold Leakage': 0.070483,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0362779,
'Runtime Dynamic': 8.19561,
'Subthreshold Leakage': 6.21877,
'Subthreshold Leakage with power gating': 2.58311},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.202689,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.0,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.246177,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.397075,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.20043,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.843682,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.281556,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.37392,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0103258,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0746685,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0763655,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.0746685,
'Execution Unit/Register Files/Runtime Dynamic': 0.0866912,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.157306,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.476571,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 2.01501,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00197615,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00197615,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00175619,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000698971,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.001097,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.0068055,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0176981,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0734121,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 4.66964,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.23884,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.24934,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 7.11478,
'Instruction Fetch Unit/Runtime Dynamic': 0.586096,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0530254,
'L2/Runtime Dynamic': 0.0150002,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 3.7605,
'Load Store Unit/Data Cache/Runtime Dynamic': 1.23277,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0816375,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0816374,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 4.14601,
'Load Store Unit/Runtime Dynamic': 1.71702,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.201304,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.402608,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0714435,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0722369,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.290341,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0391624,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.569177,
'Memory Management Unit/Runtime Dynamic': 0.111399,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 19.8464,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.0,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.0111068,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.128198,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': | |
"""Tests for the REST API."""
import asyncio
import json
import re
import sys
import uuid
from datetime import datetime
from datetime import timedelta
from unittest import mock
from urllib.parse import quote
from urllib.parse import urlparse
from pytest import fixture
from pytest import mark
from tornado.httputil import url_concat
import jupyterhub
from .. import orm
from ..apihandlers.base import PAGINATION_MEDIA_TYPE
from ..objects import Server
from ..utils import url_path_join as ujoin
from ..utils import utcnow
from .conftest import new_username
from .mocking import public_host
from .mocking import public_url
from .utils import add_user
from .utils import api_request
from .utils import async_requests
from .utils import auth_header
from .utils import find_user
# --------------------
# Authentication tests
# --------------------
async def test_auth_api(app):
db = app.db
r = await api_request(app, 'authorizations', 'gobbledygook')
assert r.status_code == 404
# make a new cookie token
user = find_user(db, 'admin')
api_token = user.new_api_token()
# check success:
r = await api_request(app, 'authorizations/token', api_token)
assert r.status_code == 200
reply = r.json()
assert reply['name'] == user.name
# check fail
r = await api_request(
app, 'authorizations/token', api_token, headers={'Authorization': 'no sir'}
)
assert r.status_code == 403
r = await api_request(
app,
'authorizations/token',
api_token,
headers={'Authorization': 'token: %s' % user.cookie_id},
)
assert r.status_code == 403
async def test_cors_checks(app):
url = ujoin(public_host(app), app.hub.base_url)
host = urlparse(url).netloc
# add admin user
user = find_user(app.db, 'admin')
if user is None:
user = add_user(app.db, name='admin', admin=True)
cookies = await app.login_user('admin')
r = await api_request(
app, 'users', headers={'Authorization': '', 'Referer': 'null'}, cookies=cookies
)
assert r.status_code == 403
r = await api_request(
app,
'users',
headers={
'Authorization': '',
'Referer': 'http://attack.com/csrf/vulnerability',
},
cookies=cookies,
)
assert r.status_code == 403
r = await api_request(
app,
'users',
headers={'Authorization': '', 'Referer': url, 'Host': host},
cookies=cookies,
)
assert r.status_code == 200
r = await api_request(
app,
'users',
headers={
'Authorization': '',
'Referer': ujoin(url, 'foo/bar/baz/bat'),
'Host': host,
},
cookies=cookies,
)
assert r.status_code == 200
r = await api_request(
app,
'users',
method='post',
data='{}',
headers={
"Authorization": "",
"Content-Type": "text/plain",
},
cookies=cookies,
)
assert r.status_code == 403
r = await api_request(
app,
'users',
method='post',
data='{}',
headers={
"Authorization": "",
"Content-Type": "application/json; charset=UTF-8",
},
cookies=cookies,
)
assert r.status_code == 400 # accepted, but invalid
# --------------
# User API tests
# --------------
def normalize_timestamp(ts):
"""Normalize a timestamp
For easier comparison
"""
if ts is None:
return
return re.sub(r'\d(\.\d+)?', '0', ts)
def normalize_user(user):
"""Normalize a user model for comparison
smooths out user model with things like timestamps
for easier comparison
"""
for key in ('created', 'last_activity'):
user[key] = normalize_timestamp(user[key])
if 'servers' in user:
for server in user['servers'].values():
for key in ('started', 'last_activity'):
server[key] = normalize_timestamp(server[key])
server['progress_url'] = re.sub(
r'.*/hub/api', 'PREFIX/hub/api', server['progress_url']
)
if isinstance(server['state'], dict) and isinstance(
server['state'].get('pid', None), int
):
server['state']['pid'] = 0
return user
def fill_user(model):
"""Fill a default user model
Any unspecified fields will be filled with the defaults
"""
model.setdefault('server', None)
model.setdefault('kind', 'user')
model.setdefault('roles', [])
model.setdefault('groups', [])
model.setdefault('admin', False)
model.setdefault('pending', None)
model.setdefault('created', TIMESTAMP)
model.setdefault('last_activity', TIMESTAMP)
model.setdefault('servers', {})
return model
TIMESTAMP = normalize_timestamp(datetime.now().isoformat() + 'Z')
@mark.user
@mark.role
async def test_get_users(app):
db = app.db
r = await api_request(app, 'users', headers=auth_header(db, 'admin'))
assert r.status_code == 200
users = sorted(r.json(), key=lambda d: d['name'])
users = [normalize_user(u) for u in users]
user_model = {
'name': 'user',
'admin': False,
'roles': ['user'],
'auth_state': None,
}
assert users == [
fill_user(
{'name': 'admin', 'admin': True, 'roles': ['admin'], 'auth_state': None}
),
fill_user(user_model),
]
r = await api_request(app, 'users', headers=auth_header(db, 'user'))
assert r.status_code == 403
@fixture
def default_page_limit(app):
"""Set and return low default page size for testing"""
n = 10
with mock.patch.dict(app.tornado_settings, {"api_page_default_limit": n}):
yield n
@fixture
def max_page_limit(app):
"""Set and return low max page size for testing"""
n = 20
with mock.patch.dict(app.tornado_settings, {"api_page_max_limit": n}):
yield n
@mark.user
@mark.role
@mark.parametrize(
"n, offset, limit, accepts_pagination, expected_count",
[
(10, None, None, False, 10),
(10, None, None, True, 10),
(10, 5, None, True, 5),
(10, 5, None, False, 5),
(10, 5, 1, True, 1),
(10, 10, 10, True, 0),
( # default page limit, pagination expected
30,
None,
None,
True,
'default',
),
(
# default max page limit, pagination not expected
30,
None,
None,
False,
'max',
),
(
# limit exceeded
30,
None,
500,
False,
'max',
),
],
)
async def test_get_users_pagination(
app,
n,
offset,
limit,
accepts_pagination,
expected_count,
default_page_limit,
max_page_limit,
):
db = app.db
if expected_count == 'default':
expected_count = default_page_limit
elif expected_count == 'max':
expected_count = max_page_limit
# populate users
usernames = []
existing_users = db.query(orm.User).order_by(orm.User.id.asc())
usernames.extend(u.name for u in existing_users)
for i in range(n - existing_users.count()):
name = new_username()
usernames.append(name)
add_user(db, app, name=name)
print(f"{db.query(orm.User).count()} total users")
url = 'users'
params = {}
if offset:
params['offset'] = offset
if limit:
params['limit'] = limit
url = url_concat(url, params)
headers = auth_header(db, 'admin')
if accepts_pagination:
headers['Accept'] = PAGINATION_MEDIA_TYPE
r = await api_request(app, url, headers=headers)
assert r.status_code == 200
response = r.json()
if accepts_pagination:
assert set(response) == {
"items",
"_pagination",
}
pagination = response["_pagination"]
users = response["items"]
else:
users = response
assert len(users) == expected_count
expected_usernames = usernames
if offset:
expected_usernames = expected_usernames[offset:]
expected_usernames = expected_usernames[:expected_count]
got_usernames = [u['name'] for u in users]
assert got_usernames == expected_usernames
@mark.user
@mark.parametrize(
"state",
("inactive", "active", "ready", "invalid"),
)
async def test_get_users_state_filter(app, state):
db = app.db
# has_one_active: one active, one inactive, zero ready
has_one_active = add_user(db, app=app, name='has_one_active')
# has_two_active: two active, ready servers
has_two_active = add_user(db, app=app, name='has_two_active')
# has_two_inactive: two spawners, neither active
has_two_inactive = add_user(db, app=app, name='has_two_inactive')
# has_zero: no Spawners registered at all
has_zero = add_user(db, app=app, name='has_zero')
test_usernames = {
"has_one_active",
"has_two_active",
"has_two_inactive",
"has_zero",
}
user_states = {
"inactive": ["has_two_inactive", "has_zero"],
"ready": ["has_two_active"],
"active": ["has_one_active", "has_two_active"],
"invalid": [],
}
expected = user_states[state]
def add_spawner(user, name='', active=True, ready=True):
"""Add a spawner in a requested state
If active, should turn up in an active query
If active and ready, should turn up in a ready query
If not active, should turn up in an inactive query
"""
spawner = user.spawners[name]
db.commit()
if active:
orm_server = orm.Server()
db.add(orm_server)
db.commit()
spawner.server = Server(orm_server=orm_server)
db.commit()
if not ready:
spawner._spawn_pending = True
return spawner
for name in ("", "secondary"):
add_spawner(has_two_active, name, active=True)
add_spawner(has_two_inactive, name, active=False)
add_spawner(has_one_active, active=True, ready=False)
add_spawner(has_one_active, "inactive", active=False)
r = await api_request(app, f'users?state={state}')
if state == "invalid":
assert r.status_code == 400
return
assert r.status_code == 200
usernames = sorted(u["name"] for u in r.json() if u["name"] in test_usernames)
assert usernames == expected
@mark.user
async def test_get_self(app):
db = app.db
# basic get self
r = await api_request(app, 'user')
r.raise_for_status()
assert r.json()['kind'] == 'user'
# identifying user via oauth token works
u = add_user(db, app=app, name='orpheus')
token = uuid.uuid4().hex
oauth_client = orm.OAuthClient(identifier='eurydice')
db.add(oauth_client)
db.commit()
oauth_token = orm.APIToken(
user=u.orm_user,
oauth_client=oauth_client,
token=token,
)
db.add(oauth_token)
db.commit()
r = await api_request(
app,
'user',
headers={'Authorization': 'token ' + token},
)
r.raise_for_status()
model = r.json()
assert model['name'] == u.name
# invalid auth gets 403
r = await api_request(
app,
'user',
headers={'Authorization': 'token notvalid'},
)
assert r.status_code == 403
async def test_get_self_service(app, mockservice):
r = await api_request(
app, "user", headers={"Authorization": f"token {mockservice.api_token}"}
)
r.raise_for_status()
service_info = r.json()
assert service_info['kind'] == 'service'
assert service_info['name'] == mockservice.name
@mark.user
@mark.role
async def test_add_user(app):
db = app.db
name = 'newuser'
r = await api_request(app, 'users', name, method='post')
assert r.status_code == 201
user = find_user(db, name)
assert user is not None
assert user.name == name
assert not user.admin
# assert newuser has default 'user' role
assert orm.Role.find(db, 'user') in user.roles
assert orm.Role.find(db, 'admin') not in user.roles
@mark.user
@mark.role
async def test_get_user(app):
name = 'user'
# get own model
r = await api_request(app, 'users', name, headers=auth_header(app.db, name))
r.raise_for_status()
# admin request
r = await api_request(
app,
'users',
name,
)
r.raise_for_status()
user = normalize_user(r.json())
assert user == fill_user({'name': name, 'roles': ['user'], 'auth_state': None})
# admin request, no such user
r = await api_request(
app,
'users',
'nosuchuser',
)
assert r.status_code == 404
# unauthorized request, no such user
r = await api_request(
app,
'users',
'nosuchuser',
headers=auth_header(app.db, name),
)
assert r.status_code == 404
# unauthorized request for existing user
r = await api_request(
app,
'users',
'admin',
headers=auth_header(app.db, name),
)
assert r.status_code == 404
@mark.user
async def test_add_multi_user_bad(app):
r = await api_request(app, 'users', method='post')
assert r.status_code == 400
r = await api_request(app, 'users', method='post', data='{}')
assert r.status_code == 400
r = await api_request(app, 'users', method='post', data='[]')
assert r.status_code == 400
@mark.user
async def test_add_multi_user_invalid(app):
app.authenticator.username_pattern = r'w.*'
r = await api_request(
app,
'users',
method='post',
data=json.dumps({'usernames': ['Willow', 'Andrew', 'Tara']}),
)
app.authenticator.username_pattern = ''
| |
<reponame>pyspace/pyspace
""" Select only a part of the instances
.. todo: group instance selectors
"""
import random
import logging
from collections import defaultdict
from pySPACE.missions.nodes.base_node import BaseNode
from pySPACE.tools.memoize_generator import MemoizeGenerator
class InstanceSelectionNode(BaseNode):
"""Retain only a certain percentage of the instances
The node InstanceSelectionNode forwards only
*train_percentage_selected* percent of the training instances passed to
him to the successor node and only
*test_percentage_selected* percent of the test instances. The forwarded
instances are selected randomly but so that the class ratio is kept.
If *reduce_class* is used, only the chosen class is reduced, without
keeping the class ratio. So the total mount of reduced data does not match
the percentage values.
**Parameters**
:train_percentage_selected:
The percentage of training instances which
is forwarded to successor node.
(*optional, default: 100*)
:test_percentage_selected:
The percentage of test instances which
is forwarded to successor node.
(*optional, default: 100*)
:reduce_class:
If you want only to reduce one class, choose this parameter
otherwise, both classes are reduced in a balanced fashion.
(*optional, default: False*)
:num_train_instances:
Instead of specifying *train_percentage_selected*, this option
allows to specify the absolute number of training instances of
class *class_label* that should be in the training set.
All instances that occur until *num_train_instances* are found are
used for training.
(*optional, default: None*)
:class_label:
If *num_train_instances*-option is used, this string determines the
class of which training examples are count.
(*optional, default: 'Target'*)
:random:
If *False*, the order of the data is retained. I.e. the first X
percent or number of train instances are used for training. If
*True*, the training data is sampled randomly without taking into
consideration the data's order.
(*optional, default: True*)
**Exemplary call**
.. code-block:: yaml
-
node : InstanceSelection
parameters :
train_percentage_selected : 80
test_percentage_selected : 100
reduce_class : Standard
:Author: <NAME> (<EMAIL>)
:Created: 2010/03/31
"""
def __init__(self, train_percentage_selected=100,
test_percentage_selected=100, reduce_class=False,
num_train_instances=None, class_label='Target', random=True,
**kwargs):
super(InstanceSelectionNode, self).__init__(**kwargs)
self.set_permanent_attributes(
train_percentage_selected=train_percentage_selected,
test_percentage_selected=test_percentage_selected,
reduce_class=reduce_class,
num_train_instances=num_train_instances,
class_label=class_label, random=random)
def get_num_data(self, iterator):
""" Return a list of instances that contain *num_train_instances* many
instances of class *class_label* and all other instances that occur
up to this point
"""
counter = 0
retained_instances = []
while counter < self.num_train_instances:
try:
instance, label = iterator.next()
except: #TODO: give some warning to user
break
else:
if label == self.class_label:
counter += 1
retained_instances.append((instance,label))
return retained_instances
def request_data_for_training(self, use_test_data):
""" Returns data for training of subsequent nodes
.. todo:: to document
.. note::
This method works differently in InstanceSelectionNode
than in other nodes: Only *percentage_selected* of the available
data are returned.
"""
assert(self.input_node is not None)
if self.train_percentage_selected > 100:
self._log("Train percentage of %f reduced to 100." %
self.train_percentage_selected,
level=logging.ERROR)
self.train_percentage_selected = 100
self._log("Data for training is requested.", level=logging.DEBUG)
if self.train_percentage_selected == 100 and \
self.num_train_instances is None:
return super(InstanceSelectionNode, self).request_data_for_training(
use_test_data)
# If we haven't computed the data for training yet
if self.data_for_training is None:
self._log("Producing data for training.", level=logging.DEBUG)
# Train this node
self.train_sweep(use_test_data)
if not self.num_train_instances is None and self.random == False:
retained_instances = self.get_num_data(
self.input_node.request_data_for_training(use_test_data))
else:
# Store all data
if self.num_train_instances is None:
all_instances = defaultdict(list)
for instance, label in self.input_node.request_data_for_training(
use_test_data):
all_instances[label].append(instance)
else:
all_instances = list(
self.input_node.request_data_for_traning(use_test_data))
if self.random:
r = random.Random(self.run_number)
if not self.num_train_instances is None and self.random:
r.shuffle(all_instances)
retained_instances = self.get_num_data(
all_instances.__iter__())
else:
retained_instances = []
self._log("Keeping only %s percent of training data" %
self.train_percentage_selected,
level=logging.DEBUG)
# Retain only *percentage_selected* percent of the data
for label, instances in all_instances.iteritems():
# enable random choice of samples
r.shuffle(instances)
if not self.reduce_class or \
self.train_percentage_selected == 100:
end_index = int(round(len(instances) *
self.train_percentage_selected / 100))
elif not (self.reduce_class == label):
end_index = len(instances)
else: # self.reduce_class==label--> reduction needed
end_index = int(round(len(instances) *
self.train_percentage_selected / 100))
retained_instances.extend(zip(instances[0:end_index],
[label]*end_index))
if self.random:
# mix up samples between the different labels
r.shuffle(retained_instances)
# Compute a generator the yields the train data and
# encapsulate it in an object that memoizes its outputs and
# provides a "fresh" method that returns a new generator that will
# yield the same sequence
train_data_generator = ((self.execute(data), label)
for (data, label) in retained_instances)
self.data_for_training = MemoizeGenerator(train_data_generator,
caching=self.caching)
self._log("Data for training finished", level=logging.DEBUG)
# Return a fresh copy of the generator
return self.data_for_training.fresh()
def request_data_for_testing(self):
""" Returns data for testing of subsequent nodes
.. todo:: to document
"""
assert(self.input_node is not None)
if self.test_percentage_selected > 100:
self._log("Test percentage of %f reduced to 100." %
self.test_percentage_selected,
level=logging.ERROR)
self.test_percentage_selected = 100
self._log("Data for testing is requested.", level=logging.DEBUG)
if self.test_percentage_selected == 100:
return super(InstanceSelectionNode, self).request_data_for_testing()
# If we haven't computed the data for testing yet
if self.data_for_testing is None:
# Assert that this node has already been trained
assert(not self.is_trainable() or
self.get_remaining_train_phase() == 0)
# Divide available instances according to label
all_instances = defaultdict(list)
for instance, label in self.input_node.request_data_for_testing():
all_instances[label].append(instance)
self._log("Keeping only %s percent of test data" %
self.test_percentage_selected,
level=logging.DEBUG)
r = random.Random(self.run_number)
# Retain only *percentage_selected* percent of the data
retained_instances = []
for label, instances in all_instances.iteritems():
# enable random choice of samples
r.shuffle(instances)
if not self.reduce_class or \
self.test_percentage_selected == 100:
end_index = int(round(len(instances) *
self.test_percentage_selected / 100))
elif not (self.reduce_class == label):
end_index = len(instances)
else: # self.reduce_class==label--> reduction needed
end_index = int(round(len(instances) *
self.test_percentage_selected / 100))
retained_instances.extend(zip(instances[0:end_index],
[label]*end_index))
# mix up samples between the different labels
r.shuffle(retained_instances)
# Compute a generator the yields the test data and
# encapsulate it in an object that memoizes its outputs and
# provides a "fresh" method that returns a new generator that'll
# yield the same sequence
self._log("Producing data for testing.", level=logging.DEBUG)
test_data_generator = ((self.execute(data), label)
for (data, label) in retained_instances)
self.data_for_testing = MemoizeGenerator(test_data_generator,
caching=self.caching)
self._log("Data for testing finished", level=logging.DEBUG)
# Return a fresh copy of the generator
return self.data_for_testing.fresh()
def _execute(self, time_series):
return time_series # We don't do anything with the kept instances
class ReduceOverrepresentedClassNode(BaseNode):
""" Reject instances to balance categories for classification
The node forwards only a reduced number
of the training and test instances of the bigger class
to get a balanced ratio of the
classes. The forwarded instances are selected randomly.
All data of the underrepresented class is
forwarded.
**Parameters**
**Exemplary call**
.. code-block:: yaml
-
node : Reduce_Overrepresented_Class
:Author: <NAME> (<EMAIL>)
:Created: 2010/09/22
"""
def __init__(self, **kwargs):
super(ReduceOverrepresentedClassNode, self).__init__(**kwargs)
def request_data_for_training(self, use_test_data):
""" Returns data for training of subsequent nodes
.. todo:: to document
"""
assert(self.input_node is not None)
self._log("Data for testing is requested.", level=logging.DEBUG)
if self.data_for_training is None:
self._log("Producing data for training.", level=logging.DEBUG)
# Train this node
self.train_sweep(use_test_data)
# Divide available instances according to label
all_instances = defaultdict(list)
for instance, label in self.input_node.request_data_for_training(
use_test_data):
all_instances[label].append(instance)
retained_instances = self.balance_instances(all_instances)
# Compute a generator the yields the test data and
# encapsulate it in an object that memoizes its outputs and
# provides a "fresh" method that returns a new generator that will
# yield the same sequence
self._log("Producing data for testing.", level=logging.DEBUG)
train_data_generator = ((self.execute(data), label)
for (data, label) in retained_instances)
self.data_for_training = MemoizeGenerator(train_data_generator,
caching=self.caching)
self._log("Data for training finished", level=logging.DEBUG)
# Return a fresh copy of the generator
return self.data_for_training.fresh()
def request_data_for_testing(self):
""" Returns data for testing of subsequent nodes
.. todo:: to document
"""
assert(self.input_node is not None)
self._log("Data for testing is requested.", level=logging.DEBUG)
# If we haven't computed the data for testing yet
if self.data_for_testing is None:
# Assert that this node has already been trained
assert(not self.is_trainable() or
self.get_remaining_train_phase() == 0)
# Divide available instances according to label
all_instances = defaultdict(list)
for instance, label in self.input_node.request_data_for_testing():
all_instances[label].append(instance)
retained_instances = self.balance_instances(all_instances)
# Compute a generator the yields the test data and
# encapsulate it in an object that memoizes its outputs and
# provides | |
+ m.x1094 + m.x1124 + m.x1164 + m.x1176 + m.x1186 + m.x1200 + m.x1226
+ m.x1234 <= 244)
m.c33 = Constraint(expr= m.x89 + m.x97 + m.x113 + m.x120 + m.x129 + m.x141 + m.x160 + m.x176 + m.x183 + m.x191
+ m.x201 + m.x208 + m.x215 + m.x223 + m.x230 + m.x242 + m.x252 + m.x259 + m.x283 + m.x290
+ m.x302 + m.x310 + m.x320 + m.x327 + m.x344 + m.x352 + m.x359 + m.x385 + m.x392 + m.x404
+ m.x412 + m.x422 + m.x429 + m.x446 + m.x458 + m.x466 + m.x476 + m.x492 + m.x519 + m.x531
+ m.x541 + m.x548 + m.x555 + m.x573 + m.x580 + m.x592 + m.x602 + m.x618 + m.x625 + m.x652
+ m.x664 + m.x681 + m.x698 + m.x710 + m.x726 + m.x733 + m.x750 + m.x758 + m.x768 + m.x775
+ m.x792 + m.x800 + m.x810 + m.x817 + m.x836 + m.x848 + m.x860 + m.x868 + m.x886 + m.x902
+ m.x920 + m.x927 + m.x939 + m.x949 + m.x957 + m.x964 + m.x976 + m.x984 + m.x1003 + m.x1011
+ m.x1018 + m.x1045 + m.x1071 + m.x1165 + m.x1196 + m.x1235 <= 190)
m.c34 = Constraint(expr= m.x121 + m.x130 + m.x161 + m.x216 + m.x231 + m.x291 + m.x328 + m.x360 + m.x393 + m.x447
+ m.x493 + m.x520 + m.x556 + m.x581 + m.x626 + m.x653 + m.x699 + m.x734 + m.x776 + m.x818
+ m.x837 + m.x849 + m.x903 + m.x928 + m.x950 + m.x965 + m.x1004 + m.x1046 + m.x1102 + m.x1135
+ m.x1146 + m.x1166 + m.x1177 + m.x1209 <= 176)
m.c35 = Constraint(expr= m.x98 + m.x104 + m.x142 + m.x151 + m.x169 + m.x177 + m.x192 + m.x202 + m.x224 + m.x243
+ m.x253 + m.x266 + m.x274 + m.x284 + m.x311 + m.x321 + m.x336 + m.x368 + m.x376 + m.x386
+ m.x413 + m.x423 + m.x436 + m.x467 + m.x477 + m.x483 + m.x501 + m.x509 + m.x532 + m.x542
+ m.x564 + m.x574 + m.x593 + m.x603 + m.x609 + m.x634 + m.x642 + m.x672 + m.x688 + m.x711
+ m.x717 + m.x742 + m.x759 + m.x769 + m.x784 + m.x801 + m.x826 + m.x869 + m.x878 + m.x887
+ m.x893 + m.x911 + m.x921 + m.x940 + m.x958 + m.x985 + m.x994 + m.x1012 + m.x1026 + m.x1035
+ m.x1072 + m.x1095 + m.x1157 + m.x1210 + m.x1236 + m.x1244 <= 34)
m.c36 = Constraint(expr= m.x90 + m.x122 + m.x131 + m.x162 + m.x184 + m.x217 + m.x232 + m.x275 + m.x292 + m.x303
+ m.x329 + m.x345 + m.x361 + m.x377 + m.x394 + m.x405 + m.x437 + m.x448 + m.x459 + m.x494
+ m.x510 + m.x521 + m.x557 + m.x565 + m.x582 + m.x627 + m.x643 + m.x654 + m.x665 + m.x689
+ m.x700 + m.x735 + m.x751 + m.x777 + m.x793 + m.x819 + m.x827 + m.x838 + m.x850 + m.x861
+ m.x904 + m.x912 + m.x929 + m.x951 + m.x966 + m.x977 + m.x1005 + m.x1019 + m.x1036 + m.x1047
+ m.x1062 + m.x1136 + m.x1167 + m.x1178 + m.x1227 <= 105)
m.c37 = Constraint(expr= m.x91 + m.x99 + m.x123 + m.x132 + m.x163 + m.x170 + m.x185 + m.x203 + m.x218 + m.x233
+ m.x254 + m.x267 + m.x276 + m.x293 + m.x304 + m.x322 + m.x330 + m.x337 + m.x346 + m.x362
+ m.x369 + m.x378 + m.x395 + m.x406 + m.x424 + m.x438 + m.x449 + m.x460 + m.x478 + m.x495
+ m.x502 + m.x511 + m.x522 + m.x543 + m.x558 + m.x566 + m.x583 + m.x604 + m.x628 + m.x635
+ m.x644 + m.x655 + m.x666 + m.x690 + m.x701 + m.x712 + m.x736 + m.x743 + m.x752 + m.x770
+ m.x778 + m.x785 + m.x794 + m.x820 + m.x828 + m.x839 + m.x851 + m.x862 + m.x905 + m.x913
+ m.x930 + m.x952 + m.x967 + m.x978 + m.x1006 + m.x1020 + m.x1037 + m.x1048 + m.x1083 + m.x1211
+ m.x1217 <= 177)
m.c38 = Constraint(expr= m.x92 + m.x100 + m.x105 + m.x114 + m.x133 + m.x152 + m.x171 + m.x186 + m.x204 + m.x209
+ m.x234 + m.x255 + m.x260 + m.x268 + m.x294 + m.x305 + m.x323 + m.x338 + m.x347 + m.x353
+ m.x370 + m.x396 + m.x407 + m.x425 + m.x430 + m.x450 + m.x461 + m.x479 + m.x484 + m.x503
+ m.x523 + m.x544 + m.x549 + m.x584 + m.x605 + m.x610 + m.x619 + m.x636 + m.x656 + m.x667
+ m.x673 + m.x682 + m.x702 + m.x713 + m.x718 + m.x727 + m.x744 + m.x753 + m.x771 + m.x786
+ m.x795 + m.x811 + m.x840 + m.x852 + m.x863 + m.x879 + m.x894 + m.x931 + m.x968 + m.x979
+ m.x995 + m.x1021 + m.x1027 + m.x1049 + m.x1063 + m.x1084 + m.x1113 + m.x1137 + m.x1168
+ m.x1187 + m.x1197 + m.x1237 <= 110)
m.c39 = Constraint(expr= m.x93 + m.x106 + m.x134 + m.x143 + m.x153 + m.x187 + m.x193 + m.x235 + m.x244 + m.x277
+ m.x295 + m.x306 + m.x312 + m.x348 + m.x379 + m.x397 + m.x408 + m.x414 + m.x439 + m.x451
+ m.x462 + m.x468 + m.x485 + m.x512 + m.x524 + m.x533 + m.x567 + m.x585 + m.x594 + m.x611
+ m.x645 + m.x657 + m.x668 + m.x674 + m.x691 + m.x703 + m.x719 + m.x754 + m.x760 + m.x796
+ m.x802 + m.x829 + m.x841 + m.x853 + m.x864 + m.x870 + m.x880 + m.x895 + m.x914 + m.x932
+ m.x941 + m.x969 + m.x980 + m.x986 + m.x996 + m.x1022 + m.x1028 + m.x1038 + m.x1050 + m.x1096
+ m.x1114 + m.x1138 + m.x1188 <= 20)
m.c40 = Constraint(expr= m.x107 + m.x124 + m.x144 + m.x154 + m.x164 + m.x194 + m.x219 + m.x245 + m.x278 + m.x313
+ m.x331 + m.x363 + m.x380 + m.x415 + m.x440 + m.x469 + m.x486 + m.x496 + m.x513 + m.x534
+ m.x559 + m.x568 + m.x595 + m.x612 + m.x629 + m.x646 + m.x675 + m.x692 + m.x720 + m.x737
+ m.x761 + m.x779 + m.x803 + m.x821 + m.x830 + m.x871 + m.x881 + m.x896 + m.x906 + m.x915
+ m.x942 + m.x953 + m.x987 + m.x997 + m.x1007 + m.x1029 + m.x1039 + m.x1064 + m.x1158 + m.x1189
+ m.x1238 <= 131)
m.c41 = Constraint(expr= m.x101 + m.x115 + m.x135 + m.x145 + m.x178 + m.x195 + m.x205 + m.x210 + m.x225 + m.x236
+ m.x246 + m.x256 + m.x261 + m.x285 + m.x296 + m.x314 + m.x324 + m.x354 + m.x387 + m.x398
+ m.x416 + m.x426 + m.x431 + m.x452 + m.x470 + m.x480 + m.x525 + m.x535 + m.x545 + m.x550
+ m.x575 + m.x586 + m.x596 + m.x606 + m.x620 + m.x658 + m.x683 + m.x704 + m.x714 + m.x728
+ m.x762 + m.x772 + m.x804 + m.x812 + m.x842 + m.x854 + m.x872 + m.x888 + m.x922 + m.x933
+ m.x943 + m.x959 + m.x970 + m.x988 + m.x1013 + m.x1051 + m.x1073 + m.x1103 + m.x1198 <= 200)
m.c42 = Constraint(expr= m.x102 + m.x116 + m.x125 + m.x146 + m.x165 + m.x172 + m.x196 + m.x206 + m.x211 + m.x220
+ m.x247 + m.x257 + m.x262 + m.x269 + m.x315 + m.x325 + m.x332 + m.x339 + m.x355 + m.x364
+ m.x371 + m.x417 + m.x427 + m.x432 + m.x471 + m.x481 + m.x497 + m.x504 + m.x536 + m.x546
+ m.x551 + m.x560 + m.x597 + m.x607 + m.x621 + m.x630 + m.x637 + m.x684 + m.x715 + m.x729
+ m.x738 + m.x745 + m.x763 + m.x773 + m.x780 + m.x787 + m.x805 + m.x813 + m.x822 + m.x873
+ m.x907 + m.x944 + m.x954 + m.x989 + m.x1008 + m.x1074 + m.x1104 + m.x1125 + m.x1147 + m.x1201
| |
<gh_stars>10-100
import unittest
from distutils.version import LooseVersion
import owslib
import pyramid.testing
import pytest
from tests import resources
from tests.utils import (
get_test_weaver_app,
mocked_remote_server_requests_wps1,
setup_config_with_mongodb,
setup_mongodb_jobstore,
setup_mongodb_processstore,
setup_mongodb_servicestore
)
from weaver.config import WEAVER_CONFIGURATION_ADES, WEAVER_CONFIGURATION_HYBRID
from weaver.datatype import Service
from weaver.execute import EXECUTE_CONTROL_OPTION_ASYNC, EXECUTE_TRANSMISSION_MODE_REFERENCE
from weaver.formats import CONTENT_TYPE_APP_JSON, CONTENT_TYPE_APP_NETCDF, CONTENT_TYPE_APP_ZIP, CONTENT_TYPE_TEXT_PLAIN
from weaver.processes.constants import PROCESS_SCHEMA_OGC, PROCESS_SCHEMA_OLD
from weaver.utils import fully_qualified_name
# pylint: disable=C0103,invalid-name
class WpsProviderBase(unittest.TestCase):
remote_provider_name = None
settings = {}
config = None
def fully_qualified_test_process_name(self):
return fully_qualified_name(self).replace(".", "-")
def register_provider(self, clear=True, error=False, data=None):
if clear:
self.service_store.clear_services()
path = "/providers"
data = data or {"id": self.remote_provider_name, "url": resources.TEST_REMOTE_SERVER_URL}
resp = self.app.post_json(path, params=data, headers=self.json_headers, expect_errors=error)
if error:
assert resp.status_code != 201, "Expected provider to fail registration, but erroneously succeeded."
else:
err = resp.json
assert resp.status_code == 201, "Expected failed provider registration to succeed. Error:\n{}".format(err)
return resp
@classmethod
def setUpClass(cls):
cls.config = setup_config_with_mongodb(settings=cls.settings)
cls.app = get_test_weaver_app(config=cls.config)
cls.json_headers = {"Accept": CONTENT_TYPE_APP_JSON, "Content-Type": CONTENT_TYPE_APP_JSON}
@classmethod
def tearDownClass(cls):
pyramid.testing.tearDown()
def setUp(self):
# rebuild clean db on each test
self.service_store = setup_mongodb_servicestore(self.config)
self.process_store = setup_mongodb_processstore(self.config)
self.job_store = setup_mongodb_jobstore(self.config)
# pylint: disable=C0103,invalid-name
class WpsRestApiProvidersTest(WpsProviderBase):
remote_provider_name = "test-remote-provider"
settings = {
"weaver.url": "https://localhost",
"weaver.wps_path": "/ows/wps",
"weaver.configuration": WEAVER_CONFIGURATION_HYBRID
}
def test_empty_provider_listing(self):
"""
Ensure schema validation succeeds when providers are empty.
Because of empty items within the list, ``OneOf["name",{detail}]`` cannot resolve which item is applicable.
.. seealso:
- https://github.com/crim-ca/weaver/issues/339
"""
self.service_store.clear_services()
resp = self.app.get("/providers", headers=self.json_headers)
assert resp.status_code == 200
assert resp.content_type == CONTENT_TYPE_APP_JSON
body = resp.json
assert "providers" in body and len(body["providers"]) == 0
@mocked_remote_server_requests_wps1([
resources.TEST_REMOTE_SERVER_URL,
resources.TEST_REMOTE_PROCESS_GETCAP_WPS1_XML,
[resources.TEST_REMOTE_PROCESS_DESCRIBE_WPS1_XML],
])
def test_provider_listing_error_handling_queries(self, mock_responses):
"""
Verify that provider listing handles invalid/unresponsive services as specified by query parameters.
"""
# register valid service
self.register_provider()
# register service reachable but returning invalid XML
invalid_id = self.remote_provider_name + "-invalid"
invalid_url = resources.TEST_REMOTE_SERVER_URL + "/invalid"
with open(resources.TEST_REMOTE_PROCESS_GETCAP_WPS1_XML) as xml:
# inject badly formatted XML in otherwise valid GetCapabilities response
# following causes 'wps.provider' to be 'None', which raises during metadata link generation (no check)
invalid_data = xml.read().replace(
"<ows:ServiceIdentification>",
"<ows:ServiceIdentification> <ows:Title>Double Title <bad></ows:Title>"
)
mocked_remote_server_requests_wps1([invalid_url, invalid_data, []], mock_responses, data=True)
# must store directly otherwise it raises during registration check
# (simulate original service was ok, but was restarted at some point and now has invalid XML)
self.service_store.save_service(Service(name=invalid_id, url=invalid_url))
# register service reachable wit invalid XML but can be recovered since it does not impact structure directly
recover_id = self.remote_provider_name + "-recover"
recover_url = resources.TEST_REMOTE_SERVER_URL + "/recover"
with open(resources.TEST_REMOTE_PROCESS_GETCAP_WPS1_XML) as xml:
# inject badly formatted XML in otherwise valid GetCapabilities response
# following causes 'wps.processes' to be unresolvable, but service definition itself works
recover_data = xml.read().replace(
"<ows:ProcessOffering>",
"<ows:ProcessOffering <wps:random> bad content <!-- --> <info> >"
)
mocked_remote_server_requests_wps1([recover_url, recover_data, []], mock_responses, data=True)
# must store directly otherwise it raises during registration check
# (simulate original service was ok, but was restarted at some point and now has invalid XML)
self.service_store.save_service(Service(name=recover_id, url=recover_url))
# register service unreachable (eg: was reachable at some point but stopped responding)
# must store directly since registration will attempt to check it with failing request
unresponsive_id = self.remote_provider_name + "-unresponsive"
unresponsive_url = resources.TEST_REMOTE_SERVER_URL + "/unresponsive"
unresponsive_caps = unresponsive_url + "?service=WPS&request=GetCapabilities&version=1.0.0"
self.service_store.save_service(Service(name=unresponsive_id, url=unresponsive_caps))
resp = self.app.get("/providers?check=False", headers=self.json_headers)
assert resp.status_code == 200
assert len(resp.json["providers"]) == 4, "All providers should be returned since no check is requested"
resp = self.app.get("/providers?check=True&ignore=True", headers=self.json_headers)
assert resp.status_code == 200
assert len(resp.json["providers"]) == 2, "Unresponsive provider should have been dropped, but not invalid XML"
assert resp.json["providers"][0]["id"] == self.remote_provider_name
assert resp.json["providers"][1]["id"] == recover_id
# error expected to be caused by 'service_store' service, first bad one in the list
resp = self.app.get("/providers?check=True&ignore=False", headers=self.json_headers, expect_errors=True)
assert resp.status_code == 422, "Unprocessable response expected for invalid XML"
assert unresponsive_id in resp.json["description"]
assert "not accessible" in resp.json["cause"]
assert "ConnectionError" in resp.json["error"], "Expected service to have trouble retrieving metadata"
# remove 'unresponsive' service, and recheck, service 'invalid' should now be the problematic one
self.service_store.delete_service(unresponsive_id)
resp = self.app.get("/providers?check=True&ignore=False", headers=self.json_headers, expect_errors=True)
assert resp.status_code == 422, "Unprocessable response expected for invalid XML"
assert invalid_id in resp.json["description"]
assert "attribute" in resp.json["cause"]
assert "AttributeError" in resp.json["error"], "Expected service to have trouble parsing metadata"
# remove 'unresponsive' service, and recheck, now all services are valid/recoverable without error
self.service_store.delete_service(invalid_id)
resp = self.app.get("/providers?check=True&ignore=False", headers=self.json_headers, expect_errors=True)
assert resp.status_code == 200, "Valid service and recoverable XML should result in valid response"
assert len(resp.json["providers"]) == 2
@mocked_remote_server_requests_wps1([
resources.TEST_REMOTE_SERVER_URL,
resources.TEST_REMOTE_PROCESS_GETCAP_WPS1_XML,
[resources.TEST_REMOTE_PROCESS_DESCRIBE_WPS1_XML],
])
def test_register_provider_invalid(self, mock_responses):
"""
Test registration of a service that is reachable but returning invalid XML GetCapabilities schema.
"""
invalid_id = self.remote_provider_name + "-invalid"
invalid_url = resources.TEST_REMOTE_SERVER_URL + "/invalid"
with open(resources.TEST_REMOTE_PROCESS_GETCAP_WPS1_XML) as xml:
# inject badly formatted XML in otherwise valid GetCapabilities response
# following causes 'wps.provider' to be 'None', which raises during metadata link generation (no check)
invalid_data = xml.read().replace(
"<ows:ServiceIdentification>",
"<ows:ServiceIdentification> <ows:Title>Double Title <bad></ows:Title>"
)
mocked_remote_server_requests_wps1([invalid_url, invalid_data, []], mock_responses, data=True)
resp = self.register_provider(clear=True, error=True, data={"id": invalid_id, "url": invalid_url})
assert resp.status_code == 422
assert invalid_id in resp.json["description"]
assert "attribute" in resp.json["cause"]
assert resp.json["error"] == "AttributeError", "Expected service to have trouble parsing metadata"
def test_register_provider_unresponsive(self):
"""
Test registration of a service that is unreachable (cannot obtain XML GetCapabilities because no response).
"""
unresponsive_id = self.remote_provider_name + "-unresponsive"
unresponsive_url = resources.TEST_REMOTE_SERVER_URL + "/unresponsive"
resp = self.register_provider(clear=True, error=True, data={"id": unresponsive_id, "url": unresponsive_url})
assert resp.status_code == 422, "Unprocessable response expected for invalid XML"
assert unresponsive_id in resp.json["description"]
assert "Connection refused" in resp.json["cause"]
assert "ConnectionError" in resp.json["error"], "Expected service to have trouble retrieving metadata"
@mocked_remote_server_requests_wps1([
resources.TEST_REMOTE_SERVER_URL,
resources.TEST_REMOTE_PROCESS_GETCAP_WPS1_XML,
[resources.TEST_REMOTE_PROCESS_DESCRIBE_WPS1_XML],
])
def test_register_provider_recoverable(self, mock_responses):
"""
Test registration of a service that technically has invalid XML GetCapabilities schema, but can be recovered.
.. seealso::
- Parameter ``recover`` from instance :data:`weaver.xml_util.XML_PARSER` allows handling partially bad XML.
- Other test that validates end-to-end definition of recoverable XML provider process.
:class:`tests.functional.test_wps_provider.WpsProviderTest.test_register_finch_with_invalid_escape_chars`
"""
recover_id = self.remote_provider_name + "-recover"
recover_url = resources.TEST_REMOTE_SERVER_URL + "/recover"
with open(resources.TEST_REMOTE_PROCESS_GETCAP_WPS1_XML) as xml:
# inject badly formatted XML in otherwise valid GetCapabilities response
# following causes 'wps.processes' to be unresolvable, but service definition itself works
recover_data = xml.read().replace(
"<ows:ProcessOffering>",
"<ows:ProcessOffering <wps:random> bad content <!-- --> <info> >"
)
mocked_remote_server_requests_wps1([recover_url, recover_data, []], mock_responses, data=True)
resp = self.register_provider(clear=True, error=False, data={"id": recover_id, "url": recover_url})
assert resp.json["id"] == recover_id
assert resp.json["url"] == recover_url
@mocked_remote_server_requests_wps1([
resources.TEST_REMOTE_SERVER_URL,
resources.TEST_REMOTE_PROCESS_GETCAP_WPS1_XML,
[resources.TEST_REMOTE_PROCESS_DESCRIBE_WPS1_XML],
])
def test_register_provider_success(self):
resp = self.register_provider()
# should have fetched extra metadata to populate service definition
assert resp.json["id"] == self.remote_provider_name
assert resp.json["url"] == resources.TEST_REMOTE_SERVER_URL
assert resp.json["title"] == "Mock Remote Server"
assert resp.json["description"] == "Testing"
assert resp.json["public"] is False
@mocked_remote_server_requests_wps1([
resources.TEST_REMOTE_SERVER_URL,
resources.TEST_REMOTE_PROCESS_GETCAP_WPS1_XML,
[resources.TEST_REMOTE_PROCESS_DESCRIBE_WPS1_XML],
])
def test_register_provider_conflict(self):
self.register_provider(clear=True, error=False)
resp = self.register_provider(clear=False, error=True)
assert resp.status_code == 409
@mocked_remote_server_requests_wps1([
resources.TEST_REMOTE_SERVER_URL,
resources.TEST_REMOTE_PROCESS_GETCAP_WPS1_XML,
[resources.TEST_REMOTE_PROCESS_DESCRIBE_WPS1_XML],
])
def test_get_provider_processes(self):
self.register_provider()
path = "/providers/{}/processes".format(self.remote_provider_name)
resp = self.app.get(path, headers=self.json_headers)
assert resp.status_code == 200
assert resp.content_type == CONTENT_TYPE_APP_JSON
assert "processes" in resp.json and isinstance(resp.json["processes"], list)
assert len(resp.json["processes"]) == 2
remote_processes = []
for process in resp.json["processes"]:
assert "id" in process and isinstance(process["id"], str)
assert "title" in process and isinstance(process["title"], str)
assert "version" in process and isinstance(process["version"], str)
assert "keywords" in process and isinstance(process["keywords"], list)
assert "metadata" in process and isinstance(process["metadata"], list)
assert len(process["jobControlOptions"]) == 1
assert EXECUTE_CONTROL_OPTION_ASYNC in process["jobControlOptions"]
remote_processes.append(process["id"])
assert resources.TEST_REMOTE_PROCESS_WPS1_ID in remote_processes
@pytest.mark.xfail(condition=LooseVersion(owslib.__version__) <= LooseVersion("0.25.0"),
reason="OWSLib fix for retrieval of processVersion from DescribeProcess not yet available "
"(https://github.com/geopython/OWSLib/pull/794)")
@mocked_remote_server_requests_wps1([
resources.TEST_REMOTE_SERVER_URL,
resources.TEST_REMOTE_PROCESS_GETCAP_WPS1_XML,
[resources.TEST_REMOTE_PROCESS_DESCRIBE_WPS1_XML],
])
def test_get_provider_process_description_with_version(self):
"""
Test only the version field which depends on a fix from :mod:`OWSLib`.
The process description retrieved from a remote WPS-1 DescribeProcess request should provide
its version converted into JSON schema, for known :data:`weaver.processes.constants.PROCESS_SCHEMAS`
representations.
.. seealso::
- Full description validation (OGC schema): :meth:`test_get_provider_process_description_ogc_schema`
- Full description validation (OLD schema): :meth:`test_get_provider_process_description_old_schema`
- Fix in PR `geopython/OWSLib#794 <https://github.com/geopython/OWSLib/pull/794>`_
"""
path = "/providers/{}/processes/{}".format(self.remote_provider_name, resources.TEST_REMOTE_PROCESS_WPS1_ID)
resp = self.app.get(path, params={"schema": PROCESS_SCHEMA_OLD}, headers=self.json_headers)
assert resp.status_code == 200
assert resp.content_type == CONTENT_TYPE_APP_JSON
proc = resp.json["process"]
resp = self.app.get(path, params={"schema": PROCESS_SCHEMA_OGC}, headers=self.json_headers)
assert resp.status_code == 200
assert resp.content_type == CONTENT_TYPE_APP_JSON
desc = resp.json
assert "version" in proc and isinstance(proc["version"], str) and proc["version"] == "1.0.0"
assert "version" in desc and isinstance(desc["version"], str) and desc["version"] == "1.0.0"
@mocked_remote_server_requests_wps1([
resources.TEST_REMOTE_SERVER_URL,
resources.TEST_REMOTE_PROCESS_GETCAP_WPS1_XML,
[resources.TEST_REMOTE_PROCESS_DESCRIBE_WPS1_XML],
])
def test_get_provider_process_description_old_schema(self):
self.register_provider()
query = {"schema": PROCESS_SCHEMA_OLD}
path = "/providers/{}/processes/{}".format(self.remote_provider_name, resources.TEST_REMOTE_PROCESS_WPS1_ID)
resp = self.app.get(path, params=query, | |
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
#PCO2W
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#PHSEN
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
#SPKIR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name | |
<reponame>riverlane/deltalanguage<filename>deltalanguage/wiring/_decorators.py
"""User facing decorators for building DeltaGraph nodes."""
from __future__ import annotations
from functools import wraps
import logging
from typing import (TYPE_CHECKING,
Callable,
List,
Optional,
Tuple,
Type)
from ._delta_graph import DeltaGraph
from ._node_classes.real_nodes import PythonNode
from ._node_templates import NodeTemplate, InteractiveBodyTemplate
if TYPE_CHECKING:
from deltalanguage.wiring import Latency
def DeltaBlock(
outputs: List[Tuple[str, Type]] = None,
template: NodeTemplate = None,
allow_const: bool = True,
node_key: Optional[str] = None,
name: str = None,
in_port_size: int = 0,
latency: Latency = None,
lvl: int = logging.ERROR,
tags: List[str] = None
):
"""Decorator to turn a function to a block for use in
:py:class:`DeltaGraph`.
If called in the context of :py:class:`DeltaGraph` it will
return a stateless node, which means that the output of such a node is
fully determined by its inputs; exactly as it happens in functional
programming.
The node is evaluated when all compulsory inputs are provided.
By default each input is compulsory, in order to make it optional use
:py:class:`Optional<deltalanguage.data_types.Optional>` wrapper.
.. warning::
If a node does not have compulsory inputs then it will be evaluated
continuesly and this can significantly slow down a runtime simulator.
If node does not have optional inputs consider using
``allow_const=True`` to improve performance.
Parameters
----------
outputs : List[Tuple[str, Type]]
The types and names of the outputs for the node to be made.
template : NodeTemplate
Associate this node constructor with this specfied existing node
template rather than a newly created one.
name : str
The name of the node to be made.
allow_const : bool
If ``True`` and all inputs are constant then the output of this node is
calculated only once, cached, and reused at each request.
This used to reduce the computation load.
node_key : Optional[str]
Keyword argument used for providing the node to the block, in case the
user wants to debug sending & receiving messages in an interactive
console. Note that this should only be used for debugging; for
Deltaflow programs in production it should be sufficient to use the
inputs and return values of a block for communications.
in_port_size : int
The maximum size of the node's in ports.
If 0 then unlimited size.
latency : Latency
The estimated latency for running the body.
lvl : int
Logging level for the node.
.. note::
The function's inputs and outputs have to be typed, otherwise
the decorator will raise an error.
Examples
--------
The decorated function can be used as a normal python function:
.. code-block:: python
>>> import deltalanguage as dl
>>> @dl.DeltaBlock()
... def foo(a: int) -> int:
... return a + 5
>>> foo(5)
10
The exact same function can be used in the context of
:py:class:`DeltaGraph`:
.. code-block:: python
>>> s = dl.lib.StateSaver(object, verbose=True)
>>> with dl.DeltaGraph() as graph:
... foo_out = foo(5)
... s.save_and_exit(foo_out)
>>> dl.DeltaPySimulator(graph).run()
saving 10
Nodes can also have multiple inputs:
.. code-block:: python
>>> @dl.DeltaBlock()
... def bar(a: int, b: int) -> int:
... return a*b
Make sure that inputs of such nodes are not mixed up. For this one can
use keyworded arguments, exactly as in python:
.. code-block:: python
>>> with dl.DeltaGraph() as graph:
... s.save_and_exit(bar(a=9, b=9))
>>> dl.DeltaPySimulator(graph).run()
saving 81
Nodes can send multiple outputs by specifying the names and types of these
outputs in the decorator. When doing this, no output type annotation is
needed.
.. code-block:: python
>>> @dl.DeltaBlock(outputs=[('x', int), ('y', int)])
... def baz(a: int, b: int):
... return a+5, a*b
We can then refer to the different outputs of this node using indexing,
where the index matches the name of the output as specified in the
decorator.
.. code-block:: python
>>> with dl.DeltaGraph() as graph_2:
... baz_out = baz(10, 2)
... s.save_and_exit(bar(baz_out.x, baz_out.y))
>>> dl.DeltaPySimulator(graph_2).run()
saving 300
You can use indexing to reference the output of a node with only one
output, its default name will be ``output``. However, you can also
specify and use another name, by setting the decorator ``outputs``
paramater with just a single output.
"""
def decorator(func):
_outputs = outputs if outputs is not None else []
my_template = NodeTemplate.merge_deltablock(template,
_outputs,
func,
allow_const,
node_key,
name or func.__name__,
in_port_size,
latency,
lvl,
tags)
@wraps(func)
def decorated(*args, **kwargs):
"""If there is currently an active :py:class:`DeltaGraph`,
return a node constructed using the node factory.
Parameters
----------
args
Either arguments to be used to evaluate the function or nodes
to link to this node.
kwargs
Either keyworded arguments to be used to evaluate the function
or nodes to link to this node.
Returns
-------
the result of function evaluation or the created node
"""
if DeltaGraph.stack():
# There is currently an active DeltaGraph so use template
return my_template.call(*args, **kwargs)
else:
# No DeltaGraph active so evaluate function as usual
return func(*args, **kwargs)
decorated.template = my_template
return decorated
return decorator
def DeltaMethodBlock(
outputs: List[Tuple[str, Type]] = None,
template: NodeTemplate = None,
name: str = None,
node_key: Optional[str] = None,
in_port_size: int = 0,
latency: Latency = None,
lvl: int = logging.ERROR,
tags: List[str] = None
):
"""Decorator to turn a class method to a block for use in
:py:class:`DeltaGraph`.
If called in the context of :py:class:`DeltaGraph` it will
return a node that can have an internal state stored in the instance of
the target class. Thus the output determined by not only the inputs but
also by the internal state that can change.
.. warning::
The internal state is a very powerful concept that makes the Deltaflow
language significantly more expressive, but also can lead to
non-deterministic results. Please refer to
`Non-deterministic state history <tutorials/state_history.html>`_
that cover this in detail.
The node is evaluated when all compulsory inputs are provided.
By default each input is compulsory, in order to make it optional use
:py:class:`Optional<deltalanguage.data_types.Optional>` wrapper.
.. warning::
If a node does not have compulsory inputs then it will be evaluated
continuesly and this can significantly slow down a runtime simulator.
Parameters
----------
outputs : List[Tuple[str, Type]]
The types and names of the outputs for the node to be made.
template : NodeTemplate
Associate this node constructor with this specfied existing node
template rather than a newly created one.
name : str
The name of the node to be made.
node_key : Optional[str]
Keyword argument used for providing the node to the block, in case the
user wants to debug sending & receiving messages in an interactive
console. Note that this should only be used for debugging; for
Deltaflow programs in production it should be sufficient to use the
inputs and return values of a block for communications.
in_port_size : int
The maximum size of the node's in ports.
If 0 then unlimited size.
latency : Latency
The estimated latency for running the body.
lvl : int
Logging level for the node.
.. note::
The method's inputs and outputs have to be typed, otherwise
the decorator will raise an error.
Examples
--------
The decorated method can be used as a normal python class method:
.. code-block:: python
>>> import deltalanguage as dl
>>> class MyClass:
...
... def __init__(self, x):
... self.x = x
...
... @dl.DeltaMethodBlock()
... def bar(self, a: int) -> int:
... return self.x + a
>>> my_obj = MyClass(10)
>>> print(my_obj.bar(5))
15
The exact same object can be used in the context of
:py:class:`DeltaGraph`:
.. code-block:: python
>>> s = dl.lib.StateSaver(object, verbose=True)
>>> with dl.DeltaGraph() as graph:
... s.save_and_exit(my_obj.bar(5))
>>> dl.DeltaPySimulator(graph).run()
saving 15
However if the internal state of the object changes (before or during),
the result will change:
.. code-block:: python
>>> my_obj.x = 15
>>> dl.DeltaPySimulator(graph).run()
saving 20
Nodes can also have multiple inputs and outputs exactly as
:py:class:`DeltaBlock`.
"""
def decorator(func):
_outputs = outputs if outputs is not None else []
my_template = NodeTemplate.merge_deltamethod(template,
_outputs,
func,
node_key,
name or func.__name__,
in_port_size,
latency,
lvl,
tags)
@wraps(func)
def decorated(obj, *args, **kwargs):
"""If there is currently an active DeltaGraph, return a node
constructed using the node factory.
Parameters
----------
| |
with this
value cannot be used with Inbound Replication into an MySQL Database
Service instance with High Availability. Mutually exclusive with the
ignore_missing_pks value.
force_innodb - The MySQL Database Service requires use of the InnoDB
storage engine. This option will modify the ENGINE= clause of CREATE
TABLE statements that use incompatible storage engines and replace them
with InnoDB. It will also remove the ROW_FORMAT=FIXED option, as it is
not supported by the InnoDB storage engine.
ignore_missing_pks - Ignore errors caused by tables which do not have
Primary Keys. Dumps created with this value cannot be used in MySQL
Database Service instance with High Availability. Mutually exclusive with
the create_invisible_pks value.
skip_invalid_accounts - Skips accounts which do not have a password or
use authentication methods (plugins) not supported by the MySQL Database
Service.
strip_definers - Strips the "DEFINER=account" clause from views,
routines, events and triggers. The MySQL Database Service requires
special privileges to create these objects with a definer other than the
user loading the schema. By stripping the DEFINER clause, these objects
will be created with that default definer. Views and Routines will
additionally have their SQL SECURITY clause changed from DEFINER to
INVOKER. This ensures that the access permissions of the account querying
or calling these are applied, instead of the user that created them. This
should be sufficient for most users, but if your database security model
requires that views and routines have more privileges than their invoker,
you will need to manually modify the schema before loading it.
Please refer to the MySQL manual for details about DEFINER and SQL
SECURITY.
strip_restricted_grants - Certain privileges are restricted in the MySQL
Database Service. Attempting to create users granting these privileges
would fail, so this option allows dumped GRANT statements to be stripped
of these privileges.
strip_tablespaces - Tablespaces have some restrictions in the MySQL
Database Service. If you'd like to have tables created in their default
tablespaces, this option will strip the TABLESPACE= option from CREATE
TABLE statements.
Additionally, the following changes will always be made to DDL scripts
when the ocimds option is enabled:
- DATA DIRECTORY, INDEX DIRECTORY and ENCRYPTION options in CREATE TABLE
statements will be commented out.
At the time of the release of MySQL Shell 8.0.24, in order to use Inbound
Replication into an MySQL Database Service instance with High
Availability, all tables at the source server need to have Primary Keys.
This needs to be fixed manually before running the dump. Starting with
MySQL 8.0.23 invisible columns may be used to add Primary Keys without
changing the schema compatibility, for more information see:
https://dev.mysql.com/doc/refman/en/invisible-columns.html.
In order to use MySQL Database Service instance with High Availability,
all tables at the MDS server need to have Primary Keys. This can be fixed
automatically using the create_invisible_pks compatibility value.
Please refer to the MySQL Database Service documentation for more
information about restrictions and compatibility.
Dumping to a Bucket in the OCI Object Storage
If the osBucketName option is used, the dump is stored in the specified
OCI bucket, connection is established using the local OCI profile. The
directory structure is simulated within the object name.
The osNamespace, ociConfigFile and ociProfile options cannot be used if
the osBucketName option is set to an empty string.
The osNamespace option overrides the OCI namespace obtained based on the
tenancy ID from the local OCI profile.
Enabling dump loading using preauthenticated requests
To enable loading a dump without requiring an OCI Profile, the dump
operations can automatically generate a preauthenticated request (PAR)
for every file generated on the dump operation, this is done by enabling
the ociParManifest option.
When the ociParManifest option is enabled, a file named "@.manifest.json"
is generated, it contains the PAR for each file generated on the dump.
The manifest is updated as the dump operation progresses.
The ociParManifest option cannot be used if osBucketName is not set. The
default value of this option depends on the dump settings: if ocimds is
enabled and osBucketName is specified then it will be enabled, otherwise
it will be disabled. In any case, if the option is explicitly set to a
value, the user provided value will be used.
When creating PARs, an expiration time is required, it can be defined
through the ociParExpireTime option. If the option is not used, a
predefined expiration time will be used equivalent to a week afer the
dump operation started. The values assigned to this option should be
conformant to RFC3339.
The ociParExpireTime option cannot be used if the ociParManifest option
is not enabled.
EXCEPTIONS
ArgumentError in the following scenarios:
- If any of the input arguments contains an invalid value.
RuntimeError in the following scenarios:
- If there is no open global session.
- If creating the output directory fails.
- If creating or writing to the output file fails.
#@<OUT> util export_table help
NAME
export_table - Exports the specified table to the data dump file.
SYNTAX
util.export_table(table, outputUrl[, options])
WHERE
table: Name of the table to be exported.
outputUrl: Target file to store the data.
options: Dictionary with the export options.
DESCRIPTION
The value of table parameter should be in form of table or schema.table,
quoted using backtick characters when required. If schema is omitted, an
active schema on the global Shell session is used. If there is none, an
exception is raised.
The outputUrl specifies where the dump is going to be stored.
By default, a local file is used, and in this case outputUrl can be
prefixed with file:// scheme. If a relative path is given, the absolute
path is computed as relative to the current working directory. The parent
directory of the output file must exist. If the output file exists, it is
going to be overwritten. The output file is created with the following
access rights (on operating systems which support them): rw-r-----.
The following options are supported:
- fieldsTerminatedBy: string (default: "\t"), fieldsEnclosedBy: char
(default: ''), fieldsEscapedBy: char (default: '\'), linesTerminatedBy:
string (default: "\n") - These options have the same meaning as the
corresponding clauses for SELECT ... INTO OUTFILE. For more information
use \? SQL Syntax/SELECT, (a session is required).
- fieldsOptionallyEnclosed: bool (default: false) - Set to true if the
input values are not necessarily enclosed within quotation marks
specified by fieldsEnclosedBy option. Set to false if all fields are
quoted by character specified by fieldsEnclosedBy option.
- dialect: enum (default: "default") - Setup fields and lines options
that matches specific data file format. Can be used as base dialect and
customized with fieldsTerminatedBy, fieldsEnclosedBy,
fieldsOptionallyEnclosed, fieldsEscapedBy and linesTerminatedBy
options. Must be one of the following values: default, csv, tsv or
csv-unix.
- maxRate: string (default: "0") - Limit data read throughput to maximum
rate, measured in bytes per second per thread. Use maxRate="0" to set
no limit.
- showProgress: bool (default: true if stdout is a TTY device, false
otherwise) - Enable or disable dump progress information.
- defaultCharacterSet: string (default: "utf8mb4") - Character set used
for the dump.
- compression: string (default: "none") - Compression used when writing
the data dump files, one of: "none", "gzip", "zstd".
- osBucketName: string (default: not set) - Use specified OCI bucket for
the location of the dump.
- osNamespace: string (default: not set) - Specifies the namespace where
the bucket is located, if not given it will be obtained using the
tenancy id on the OCI configuration.
- ociConfigFile: string (default: not set) - Use the specified OCI
configuration file instead of the one in the default location.
- ociProfile: string (default: not set) - Use the specified OCI profile
instead of the default one.
Requirements
- MySQL Server 5.7 or newer is required.
- File size limit for files uploaded to the OCI bucket is 1.2 TiB.
- Columns | |
# generated by update to not change manually
import dataclasses as dt
import typing as t
from enum import Enum, Flag
from bungieapi.json import to_json
@dt.dataclass(frozen=True)
class GroupUserInfoCard:
last_seen_display_name: str = dt.field(
metadata={
"description": "This will be the display name the clan server last saw the user as. If the account is an active cross save override, this will be the display name to use. Otherwise, this will match the displayName property."
}
)
last_seen_display_name_type: "BungieMembershipType" = dt.field(
metadata={"description": "The platform of the LastSeenDisplayName"}
)
applicable_membership_types: t.Sequence["BungieMembershipType"] = dt.field(
metadata={
"description": """The list of Membership Types indicating the platforms on which this Membership can be used.
Not in Cross Save = its original membership type. Cross Save Primary = Any membership types it is overridding, and its original membership type Cross Save Overridden = Empty list"""
}
)
bungie_global_display_name: str = dt.field(
metadata={"description": "The bungie global display name, if set."}
)
cross_save_override: "BungieMembershipType" = dt.field(
metadata={
"description": "If there is a cross save override in effect, this value will tell you the type that is overridding this one."
}
)
display_name: str = dt.field(
metadata={
"description": "Display Name the player has chosen for themselves. The display name is optional when the data type is used as input to a platform API."
}
)
icon_path: str = dt.field(metadata={"description": "URL the Icon if available."})
is_public: bool = dt.field(
metadata={"description": "If True, this is a public user membership."}
)
membership_id: int = dt.field(
metadata={
"description": "Membership ID as they user is known in the Accounts service"
}
)
membership_type: "BungieMembershipType" = dt.field(
metadata={
"description": "Type of the membership. Not necessarily the native type."
}
)
supplemental_display_name: str = dt.field(
metadata={
"description": "A platform specific additional display name - ex: psn Real Name, bnet Unique Name, etc."
}
)
bungie_global_display_name_code: t.Optional[int] = dt.field(
default=None,
metadata={"description": "The bungie global display name code, if set."},
)
def to_json(self) -> t.Mapping[str, t.Any]:
return {
"LastSeenDisplayName": to_json(self.last_seen_display_name),
"LastSeenDisplayNameType": to_json(self.last_seen_display_name_type),
"supplementalDisplayName": to_json(self.supplemental_display_name),
"iconPath": to_json(self.icon_path),
"crossSaveOverride": to_json(self.cross_save_override),
"applicableMembershipTypes": to_json(self.applicable_membership_types),
"isPublic": to_json(self.is_public),
"membershipType": to_json(self.membership_type),
"membershipId": to_json(self.membership_id),
"displayName": to_json(self.display_name),
"bungieGlobalDisplayName": to_json(self.bungie_global_display_name),
"bungieGlobalDisplayNameCode": to_json(
self.bungie_global_display_name_code
),
}
@dt.dataclass(frozen=True)
class GroupResponse:
alliance_status: "GroupAllianceStatus"
allied_ids: t.Sequence[int]
current_user_member_map: t.Mapping[str, "GroupMember"] = dt.field(
metadata={
"description": "This property will be populated if the authenticated user is a member of the group. Note that because of account linking, a user can sometimes be part of a clan more than once. As such, this returns the highest member type available."
}
)
current_user_memberships_inactive_for_destiny: bool = dt.field(
metadata={
"description": "A convenience property that indicates if every membership you (the current user) have that is a part of this group are part of an account that is considered inactive - for example, overridden accounts in Cross Save."
}
)
current_user_potential_member_map: t.Mapping[
str, "GroupPotentialMember"
] = dt.field(
metadata={
"description": "This property will be populated if the authenticated user is an applicant or has an outstanding invitation to join. Note that because of account linking, a user can sometimes be part of a clan more than once."
}
)
detail: "GroupV2"
founder: "GroupMember"
group_join_invite_count: int
parent_group: "GroupV2"
def to_json(self) -> t.Mapping[str, t.Any]:
return {
"detail": to_json(self.detail),
"founder": to_json(self.founder),
"alliedIds": to_json(self.allied_ids),
"parentGroup": to_json(self.parent_group),
"allianceStatus": to_json(self.alliance_status),
"groupJoinInviteCount": to_json(self.group_join_invite_count),
"currentUserMembershipsInactiveForDestiny": to_json(
self.current_user_memberships_inactive_for_destiny
),
"currentUserMemberMap": to_json(self.current_user_member_map),
"currentUserPotentialMemberMap": to_json(
self.current_user_potential_member_map
),
}
@dt.dataclass(frozen=True)
class GroupV2:
about: str
allow_chat: bool
avatar_image_index: int
avatar_path: str
banner_path: str
chat_security: "ChatSecuritySetting"
clan_info: "GroupV2ClanInfoAndInvestment"
conversation_id: int
creation_date: str
default_publicity: "GroupPostPublicity"
enable_invitation_messaging_for_admins: bool
features: "GroupFeatures"
group_id: int
group_type: "GroupType"
homepage: "GroupHomepage"
is_default_post_public: bool
is_public: bool
is_public_topic_admin_only: bool
locale: str
member_count: int
membership_id_created: int
membership_option: "MembershipOption"
modification_date: str
motto: str
name: str
tags: t.Sequence[str]
theme: str
ban_expire_date: t.Optional[str] = None
def to_json(self) -> t.Mapping[str, t.Any]:
return {
"groupId": to_json(self.group_id),
"name": to_json(self.name),
"groupType": to_json(self.group_type),
"membershipIdCreated": to_json(self.membership_id_created),
"creationDate": to_json(self.creation_date),
"modificationDate": to_json(self.modification_date),
"about": to_json(self.about),
"tags": to_json(self.tags),
"memberCount": to_json(self.member_count),
"isPublic": to_json(self.is_public),
"isPublicTopicAdminOnly": to_json(self.is_public_topic_admin_only),
"motto": to_json(self.motto),
"allowChat": to_json(self.allow_chat),
"isDefaultPostPublic": to_json(self.is_default_post_public),
"chatSecurity": to_json(self.chat_security),
"locale": to_json(self.locale),
"avatarImageIndex": to_json(self.avatar_image_index),
"homepage": to_json(self.homepage),
"membershipOption": to_json(self.membership_option),
"defaultPublicity": to_json(self.default_publicity),
"theme": to_json(self.theme),
"bannerPath": to_json(self.banner_path),
"avatarPath": to_json(self.avatar_path),
"conversationId": to_json(self.conversation_id),
"enableInvitationMessagingForAdmins": to_json(
self.enable_invitation_messaging_for_admins
),
"banExpireDate": to_json(self.ban_expire_date),
"features": to_json(self.features),
"clanInfo": to_json(self.clan_info),
}
class GroupType(Enum):
GENERAL = 0
CLAN = 1
class ChatSecuritySetting(Enum):
GROUP = 0
ADMINS = 1
class GroupHomepage(Enum):
WALL = 0
FORUM = 1
ALLIANCE_FORUM = 2
class MembershipOption(Enum):
REVIEWED = 0
OPEN = 1
CLOSED = 2
class GroupPostPublicity(Enum):
PUBLIC = 0
ALLIANCE = 1
PRIVATE = 2
@dt.dataclass(frozen=True)
class GroupFeatures:
capabilities: "Capabilities"
host_guided_game_permission_override: "HostGuidedGamesPermissionLevel" = dt.field(
metadata={
"description": """Minimum Member Level allowed to host guided games
Always Allowed: Founder, Acting Founder, Admin
Allowed Overrides: None, Member, Beginner
Default is Member for clans, None for groups, although this means nothing for groups."""
}
)
invite_permission_override: bool = dt.field(
metadata={
"description": """Minimum Member Level allowed to invite new members to group
Always Allowed: Founder, Acting Founder
True means admins have this power, false means they don't
Default is false for clans, true for groups."""
}
)
join_level: "RuntimeGroupMemberType" = dt.field(
metadata={
"description": """Level to join a member at when accepting an invite, application, or joining an open clan
Default is Beginner."""
}
)
maximum_members: int
maximum_memberships_of_group_type: int = dt.field(
metadata={
"description": "Maximum number of groups of this type a typical membership may join. For example, a user may join about 50 General groups with their Bungie.net account. They may join one clan per Destiny membership."
}
)
membership_types: t.Sequence["BungieMembershipType"]
update_banner_permission_override: bool = dt.field(
metadata={
"description": """Minimum Member Level allowed to update banner
Always Allowed: Founder, Acting Founder
True means admins have this power, false means they don't
Default is false for clans, true for groups."""
}
)
update_culture_permission_override: bool = dt.field(
metadata={
"description": """Minimum Member Level allowed to update group culture
Always Allowed: Founder, Acting Founder
True means admins have this power, false means they don't
Default is false for clans, true for groups."""
}
)
def to_json(self) -> t.Mapping[str, t.Any]:
return {
"maximumMembers": to_json(self.maximum_members),
"maximumMembershipsOfGroupType": to_json(
self.maximum_memberships_of_group_type
),
"capabilities": to_json(self.capabilities),
"membershipTypes": to_json(self.membership_types),
"invitePermissionOverride": to_json(self.invite_permission_override),
"updateCulturePermissionOverride": to_json(
self.update_culture_permission_override
),
"hostGuidedGamePermissionOverride": to_json(
self.host_guided_game_permission_override
),
"updateBannerPermissionOverride": to_json(
self.update_banner_permission_override
),
"joinLevel": to_json(self.join_level),
}
class Capabilities(Flag):
NONE = 0
LEADERBOARDS = 1
CALLSIGN = 2
OPTIONAL_CONVERSATIONS = 4
CLAN_BANNER = 8
D2_INVESTMENT_DATA = 16
TAGS = 32
ALLIANCES = 64
class HostGuidedGamesPermissionLevel(Enum):
"""Used for setting the guided game permission level override (admins and
founders can always host guided games)."""
NONE = 0
BEGINNER = 1
MEMBER = 2
class RuntimeGroupMemberType(Enum):
"""The member levels used by all V2 Groups API.
Individual group types use their own mappings in their native
storage (general uses BnetDbGroupMemberType and D2 clans use
ClanMemberLevel), but they are all translated to this in the runtime
api. These runtime values should NEVER be stored anywhere, so the
values can be changed as necessary.
"""
NONE = 0
BEGINNER = 1
MEMBER = 2
ADMIN = 3
ACTING_FOUNDER = 4
FOUNDER = 5
@dt.dataclass(frozen=True)
class GroupV2ClanInfo:
"""This contract contains clan-specific group information.
It does not include any investment data.
"""
clan_banner_data: "ClanBanner"
clan_callsign: str
def to_json(self) -> t.Mapping[str, t.Any]:
return {
"clanCallsign": to_json(self.clan_callsign),
"clanBannerData": to_json(self.clan_banner_data),
}
@dt.dataclass(frozen=True)
class ClanBanner:
decal_background_color_id: int
decal_color_id: int
decal_id: int
gonfalon_color_id: int
gonfalon_detail_color_id: int
gonfalon_detail_id: int
gonfalon_id: int
def to_json(self) -> t.Mapping[str, t.Any]:
return {
"decalId": to_json(self.decal_id),
"decalColorId": to_json(self.decal_color_id),
"decalBackgroundColorId": to_json(self.decal_background_color_id),
"gonfalonId": to_json(self.gonfalon_id),
"gonfalonColorId": to_json(self.gonfalon_color_id),
"gonfalonDetailId": to_json(self.gonfalon_detail_id),
"gonfalonDetailColorId": to_json(self.gonfalon_detail_color_id),
}
@dt.dataclass(frozen=True)
class GroupV2ClanInfoAndInvestment:
"""The same as GroupV2ClanInfo, but includes any investment data."""
clan_banner_data: "ClanBanner"
clan_callsign: str
d2_clan_progressions: t.Mapping[str, "DestinyProgression"]
def to_json(self) -> t.Mapping[str, t.Any]:
return {
"d2ClanProgressions": to_json(self.d2_clan_progressions),
"clanCallsign": to_json(self.clan_callsign),
"clanBannerData": to_json(self.clan_banner_data),
}
@dt.dataclass(frozen=True)
class GroupUserBase:
bungie_net_user_info: "UserInfoCard"
destiny_user_info: "GroupUserInfoCard"
group_id: int
join_date: str
def to_json(self) -> t.Mapping[str, t.Any]:
return {
"groupId": to_json(self.group_id),
"destinyUserInfo": to_json(self.destiny_user_info),
"bungieNetUserInfo": to_json(self.bungie_net_user_info),
"joinDate": to_json(self.join_date),
}
@dt.dataclass(frozen=True)
class GroupMember:
bungie_net_user_info: "UserInfoCard"
destiny_user_info: "GroupUserInfoCard"
group_id: int
is_online: bool
join_date: str
last_online_status_change: int
member_type: "RuntimeGroupMemberType"
def to_json(self) -> t.Mapping[str, t.Any]:
return {
"memberType": to_json(self.member_type),
"isOnline": to_json(self.is_online),
"lastOnlineStatusChange": to_json(self.last_online_status_change),
"groupId": to_json(self.group_id),
"destinyUserInfo": to_json(self.destiny_user_info),
"bungieNetUserInfo": to_json(self.bungie_net_user_info),
"joinDate": to_json(self.join_date),
}
class GroupAllianceStatus(Enum):
UNALLIED = 0
PARENT = 1
CHILD = 2
@dt.dataclass(frozen=True)
class GroupPotentialMember:
bungie_net_user_info: "UserInfoCard"
destiny_user_info: "GroupUserInfoCard"
group_id: int
join_date: str
potential_status: "GroupPotentialMemberStatus"
def to_json(self) -> t.Mapping[str, t.Any]:
return {
"potentialStatus": to_json(self.potential_status),
"groupId": to_json(self.group_id),
"destinyUserInfo": to_json(self.destiny_user_info),
"bungieNetUserInfo": to_json(self.bungie_net_user_info),
"joinDate": to_json(self.join_date),
}
class GroupPotentialMemberStatus(Enum):
NONE = | |
# roc should be: batch x multi_v x X x Y
roc = weighted_multi_smooth_v[:, :, n, ...]
yc = torch.sum(roc * t_weights, dim=1)
elif kernel_weighting_type == 'w_K':
# roc should be: batch x multi_v x X x Y
roc = torch.transpose(multi_smooth_v[:, :, n, ...], 0, 1)
yc = torch.sum(roc * t_weights, dim=1)
else:
raise ValueError('Unknown weighting_type: {}'.format(kernel_weighting_type))
localized_v[:, n, ...] = yc # localized_v is: batch x channels x X x Y
localized_v = localized_v.cpu().numpy()
if visualize:
norm_localized_v = (localized_v[0, 0, ...] ** 2 + localized_v[0, 1, ...] ** 2) ** 0.5
plt.clf()
plt.subplot(121)
plt.imshow(norm_localized_v)
plt.axis('image')
plt.colorbar()
plt.subplot(121)
plt.quiver(m[0,1,...],m[0,0,...])
plt.axis('equal')
plt.show()
return localized_v
def compute_map_from_v(localized_v,sz,spacing):
# now compute the deformation that belongs to this velocity field
params = pars.ParameterDict()
params['number_of_time_steps'] = 40
advectionMap = fm.AdvectMap( sz[2:], spacing )
pars_to_pass = utils.combine_dict({'v':AdaptVal(torch.from_numpy(localized_v))}, dict() )
integrator = rk.RK4(advectionMap.f, advectionMap.u, pars_to_pass, params)
tFrom = 0.
tTo = 1.
phi0 = AdaptVal(torch.from_numpy(utils.identity_map_multiN(sz,spacing)))
phi1 = integrator.solve([phi0], tFrom, tTo )[0]
return phi0,phi1
def add_texture(im_orig,texture_gaussian_smoothness=0.1,texture_magnitude=0.3):
# do this separately for each integer intensity level
levels = np.unique((np.floor(im_orig)).astype('int'))
im = np.zeros_like(im_orig)
for current_level in levels:
sz = im_orig.shape
rand_noise = np.random.random(sz[2:]).astype('float32')-0.5
rand_noise = rand_noise.view().reshape(sz)
r_params = pars.ParameterDict()
r_params['smoother']['type'] = 'gaussian'
r_params['smoother']['gaussian_std'] = texture_gaussian_smoothness
s_r = sf.SmootherFactory(sz[2::], spacing).create_smoother(r_params)
rand_noise_smoothed = s_r.smooth(AdaptVal(torch.from_numpy(rand_noise))).detach().cpu().numpy()
rand_noise_smoothed /= rand_noise_smoothed.max()
rand_noise_smoothed *= texture_magnitude
c_indx = (im_orig>=current_level-0.5)
im[c_indx] = im_orig[c_indx] + rand_noise_smoothed[c_indx]
return im
def create_random_image_pair(weights_not_fluid,weights_fluid,weights_neutral,weight_smoothing_std,multi_gaussian_stds,
kernel_weighting_type,
randomize_momentum_on_circle,randomize_in_sectors,
put_weights_between_circles,
start_with_fluid_weight,
use_random_source,
use_fixed_source,
add_texture_to_image,
texture_gaussian_smoothness,
texture_magnitude,
nr_of_circles_to_generate,
circle_extent,
sz,spacing,
nr_of_angles=10,multiplier_factor=1.0,momentum_smoothing=0.05,
visualize=False,visualize_warped=False,print_warped_name=None,
publication_figures_directory=None,
image_pair_nr=None):
nr_of_rings = nr_of_circles_to_generate
extent = circle_extent
randomize_factor = 0.25
randomize_radii = not use_fixed_source
smooth_initial_momentum = True
# create ordered set of weights
multi_gaussian_weights = []
for r in range(nr_of_rings):
if r%2==0:
if start_with_fluid_weight:
multi_gaussian_weights.append(weights_fluid)
else:
multi_gaussian_weights.append(weights_not_fluid)
else:
if start_with_fluid_weight:
multi_gaussian_weights.append(weights_not_fluid)
else:
multi_gaussian_weights.append(weights_fluid)
rings_at = _compute_ring_radii(extent=extent, nr_of_rings=nr_of_rings, randomize_radii=randomize_radii, randomize_factor=randomize_factor)
weights_orig,ring_im_orig,std_im_orig = create_rings(rings_at,multi_gaussian_weights=multi_gaussian_weights,
default_multi_gaussian_weights=weights_neutral,
multi_gaussian_stds=multi_gaussian_stds,
put_weights_between_circles=put_weights_between_circles,
kernel_weighting_type=kernel_weighting_type,
sz=sz,spacing=spacing,
visualize=visualize)
if weight_smoothing_std is not None:
if weight_smoothing_std>0:
s_m_params = pars.ParameterDict()
s_m_params['smoother']['type'] = 'gaussian'
s_m_params['smoother']['gaussian_std'] = weight_smoothing_std
# smooth the weights
smoother = sf.SmootherFactory(weights_orig.shape[2::], spacing).create_smoother(s_m_params)
#weights_old = np.zeros_like(weights_orig)
#weights_old[:] = weights_orig
weights_orig = (smoother.smooth(AdaptVal(torch.from_numpy(weights_orig)))).detach().cpu().numpy()
# make sure they are strictly positive
weights_orig[weights_orig<0] = 0
if publication_figures_directory is not None:
plt.clf()
plt.imshow(ring_im_orig[0,0,...],origin='lower')
plt.axis('off')
plt.savefig(os.path.join(publication_figures_directory,'ring_im_orig_{:d}.pdf'.format(image_pair_nr)),bbox_inches='tight',pad_inches=0)
plt.clf()
plt.imshow(std_im_orig,origin='lower')
plt.axis('off')
plt.colorbar()
plt.savefig(os.path.join(publication_figures_directory,'std_im_orig_{:d}.pdf'.format(image_pair_nr)),bbox_inches='tight',pad_inches=0)
id_c = utils.centered_identity_map_multiN(sz, spacing, dtype='float32')
m_orig = create_momentum(ring_im_orig, centered_map=id_c, randomize_momentum_on_circle=randomize_momentum_on_circle,
randomize_in_sectors=randomize_in_sectors,
smooth_initial_momentum=smooth_initial_momentum,
sz=sz, spacing=spacing,
nr_of_angles=nr_of_angles,
multiplier_factor=multiplier_factor,
momentum_smoothing=momentum_smoothing,
publication_figures_directory=publication_figures_directory,
publication_prefix='circle_init',
image_pair_nr=image_pair_nr)
localized_v_orig = compute_localized_velocity_from_momentum(m=m_orig,weights=weights_orig,multi_gaussian_stds=multi_gaussian_stds,sz=sz,spacing=spacing,kernel_weighting_type=kernel_weighting_type)
if publication_figures_directory is not None:
plt.clf()
plt.imshow(ring_im_orig[0, 0, ...], origin='lower')
subsampled_quiver(localized_v_orig[0,1,...],localized_v_orig[0,0,...],color='red', scale=1, subsample=3)
plt.axis('image')
plt.axis('off')
plt.savefig(os.path.join(publication_figures_directory,'{:s}_{:d}.pdf'.format('localized_v_orig', image_pair_nr)),bbox_inches='tight',pad_inches=0)
phi0_orig,phi1_orig = compute_map_from_v(localized_v_orig,sz,spacing)
if add_texture_to_image:
ring_im = add_texture(ring_im_orig,texture_gaussian_smoothness=texture_gaussian_smoothness,texture_magnitude=texture_magnitude)
if publication_figures_directory is not None:
plt.clf()
plt.imshow(ring_im[0, 0, ...],origin='lower')
plt.axis('off')
plt.savefig(os.path.join(publication_figures_directory, 'ring_im_orig_textured_{:d}.pdf'.format(image_pair_nr)),bbox_inches='tight',pad_inches=0)
# plt.clf()
# plt.subplot(1,2,1)
# plt.imshow(ring_im[0,0,...],clim=(-0.5,2.5))
# plt.colorbar()
# plt.subplot(1,2,2)
# plt.imshow(ring_im_orig[0, 0, ...], clim=(-0.5, 2.5))
# plt.colorbar()
# plt.show()
else:
ring_im = ring_im_orig
# deform image based on this map
I0_source_orig = AdaptVal(torch.from_numpy(ring_im))
I1_warped_orig = utils.compute_warped_image_multiNC(I0_source_orig, phi1_orig, spacing, spline_order=1)
# define the label images
I0_label_orig = AdaptVal(torch.from_numpy(ring_im_orig))
I1_label_orig = utils.get_warped_label_map(I0_label_orig, phi1_orig, spacing )
if publication_figures_directory is not None:
plt.clf()
plt.imshow(I1_label_orig[0, 0, ...].detach().cpu().numpy(),origin='lower')
plt.axis('off')
plt.savefig(os.path.join(publication_figures_directory, 'ring_im_warped_source_{:d}.pdf'.format(image_pair_nr)),bbox_inches='tight',pad_inches=0)
if use_random_source:
# the initially created target will become the source
id_c_warped_t = utils.compute_warped_image_multiNC(AdaptVal(torch.from_numpy(id_c)), phi1_orig, spacing, spline_order=1)
id_c_warped = id_c_warped_t.detach().cpu().numpy()
weights_warped_t = utils.compute_warped_image_multiNC(AdaptVal(torch.from_numpy(weights_orig)), phi1_orig, spacing, spline_order=1)
weights_warped = weights_warped_t.detach().cpu().numpy()
# make sure they are stirctly positive
weights_warped[weights_warped<0] = 0
warped_source_im_orig = I1_label_orig.detach().cpu().numpy()
m_warped_source = create_momentum(warped_source_im_orig, centered_map=id_c_warped, randomize_momentum_on_circle=randomize_momentum_on_circle,
randomize_in_sectors=randomize_in_sectors,
smooth_initial_momentum=smooth_initial_momentum,
sz=sz, spacing=spacing,
nr_of_angles=nr_of_angles,
multiplier_factor=multiplier_factor,
momentum_smoothing=momentum_smoothing,
publication_figures_directory=publication_figures_directory,
publication_prefix='random_source',
image_pair_nr=image_pair_nr)
localized_v_warped = compute_localized_velocity_from_momentum(m=m_warped_source, weights=weights_warped,
multi_gaussian_stds=multi_gaussian_stds, sz=sz,
spacing=spacing,kernel_weighting_type=kernel_weighting_type)
if publication_figures_directory is not None:
plt.clf()
plt.imshow(warped_source_im_orig[0, 0, ...], origin='lower')
subsampled_quiver(localized_v_warped[0, 1, ...], localized_v_warped[0, 0, ...], color='red', scale=1,subsample=3)
plt.axis('image')
plt.axis('off')
plt.savefig(os.path.join(publication_figures_directory,'{:s}_{:d}.pdf'.format('random_source_localized_v', image_pair_nr)),bbox_inches='tight',pad_inches=0)
phi0_w, phi1_w = compute_map_from_v(localized_v_warped, sz, spacing)
if add_texture_to_image:
warped_source_im = add_texture(warped_source_im_orig,texture_gaussian_smoothness=texture_gaussian_smoothness,texture_magnitude=texture_magnitude)
if publication_figures_directory is not None:
plt.clf()
plt.imshow(ring_im[0, 0, ...],origin='lower')
plt.axis('off')
plt.savefig(os.path.join(publication_figures_directory, 'random_source_im_textured_{:d}.pdf'.format(image_pair_nr)),bbox_inches='tight',pad_inches=0)
else:
warped_source_im = warped_source_im_orig
# deform these images based on the new map
# deform image based on this map
I0_source_w = AdaptVal(torch.from_numpy(warped_source_im))
I1_warped_w = utils.compute_warped_image_multiNC(I0_source_w, phi1_w, spacing, spline_order=1)
# define the label images
I0_label_w = AdaptVal(torch.from_numpy(warped_source_im_orig))
I1_label_w = utils.get_warped_label_map(I0_label_w, phi1_w, spacing)
if use_random_source:
I0_source = I0_source_w
I1_warped = I1_warped_w
I0_label = I0_label_w
I1_label = I1_label_w
m = m_warped_source
phi0 = phi0_w
phi1 = phi1_w
weights = weights_warped
else:
I0_source = I0_source_orig
I1_warped = I1_warped_orig
I0_label = I0_label_orig
I1_label = I1_label_orig
m = m_orig
phi0 = phi0_orig
phi1 = phi1_orig
weights = weights_orig
std_im = compute_overall_std(weights,multi_gaussian_stds,kernel_weighting_type=kernel_weighting_type)
if visualize_warped:
plt.clf()
# plot original image, warped image, and grids
plt.subplot(3,4,1)
plt.imshow(I0_source[0,0,...].detach().cpu().numpy())
plt.title('source')
plt.subplot(3,4,2)
plt.imshow(I1_warped[0,0,...].detach().cpu().numpy())
plt.title('warped = target')
plt.subplot(3,4,3)
plt.imshow(I0_source[0,0,...].detach().cpu().numpy())
plt.contour(phi0[0,0,...].detach().cpu().numpy(), np.linspace(-1, 1, 40), colors='r', linestyles='solid')
plt.contour(phi0[0,1,...].detach().cpu().numpy(), np.linspace(-1, 1, 40), colors='r', linestyles='solid')
plt.subplot(3,4,4)
plt.imshow(I1_warped[0,0,...].detach().cpu().numpy())
plt.contour(phi1[0,0,...].detach().cpu().numpy(), np.linspace(-1, 1, 40), colors='r', linestyles='solid')
plt.contour(phi1[0,1,...].detach().cpu().numpy(), np.linspace(-1, 1, 40), colors='r', linestyles='solid')
nr_of_weights = weights.shape[1]
for cw in range(nr_of_weights):
plt.subplot(3,4,5+cw)
if kernel_weighting_type=='w_K_w':
plt.imshow(weights[0, cw, ...]**2, vmin=0.0, vmax=1.0)
else:
plt.imshow(weights[0, cw, ...], vmin=0.0, vmax=1.0)
plt.title('w: std' + str(multi_gaussian_stds[cw]))
plt.colorbar()
plt.subplot(3,4,12)
plt.imshow(std_im)
plt.title('std')
plt.colorbar()
if print_warped_name is not None:
plt.savefig(print_warped_name)
else:
plt.show()
if publication_figures_directory is not None:
plt.clf()
plt.imshow(I0_source[0, 0, ...].detach().cpu().numpy(),origin='lower')
plt.axis('image')
plt.axis('off')
plt.savefig(os.path.join(publication_figures_directory, '{:s}_{:d}.pdf'.format('source_image', image_pair_nr)),bbox_inches='tight',pad_inches=0)
plt.clf()
plt.imshow(I1_warped[0, 0, ...].detach().cpu().numpy(),origin='lower')
plt.axis('image')
plt.axis('off')
plt.savefig(os.path.join(publication_figures_directory, '{:s}_{:d}.pdf'.format('target_image', image_pair_nr)),bbox_inches='tight',pad_inches=0)
plt.clf()
plt.imshow(I0_source[0, 0, ...].detach().cpu().numpy(),origin='lower')
plt.contour(phi0[0, 0, ...].detach().cpu().numpy(), np.linspace(-1, 1, 40), colors='r', linestyles='solid')
plt.contour(phi0[0, 1, ...].detach().cpu().numpy(), np.linspace(-1, 1, 40), colors='r', linestyles='solid')
plt.axis('image')
plt.axis('off')
plt.savefig(os.path.join(publication_figures_directory, '{:s}_{:d}.pdf'.format('source_image_with_grid', image_pair_nr)),bbox_inches='tight',pad_inches=0)
plt.clf()
plt.imshow(I1_warped[0, 0, ...].detach().cpu().numpy(),origin='lower')
plt.contour(phi1[0, 0, ...].detach().cpu().numpy(), np.linspace(-1, 1, 40), colors='r', linestyles='solid')
plt.contour(phi1[0, 1, ...].detach().cpu().numpy(), np.linspace(-1, 1, 40), colors='r', linestyles='solid')
plt.axis('image')
plt.axis('off')
plt.savefig(os.path.join(publication_figures_directory,'{:s}_{:d}.pdf'.format('target_image_with_grid', image_pair_nr)), bbox_inches='tight',pad_inches=0)
plt.clf()
plt.imshow(std_im,origin='lower')
plt.colorbar()
plt.axis('image')
plt.axis('off')
plt.savefig(os.path.join(publication_figures_directory, '{:s}_{:d}.pdf'.format('std_im_source', image_pair_nr)),bbox_inches='tight',pad_inches=0)
return I0_source.detach().cpu().numpy(), I1_warped.detach().cpu().numpy(), weights, \
I0_label.detach().cpu().numpy(), I1_label.detach().cpu().numpy(), phi1.detach().cpu().numpy(), m
def get_parameter_value(command_line_par,params, params_name, default_val, params_description):
if command_line_par is None:
ret = params[(params_name, default_val, params_description)]
else:
params[params_name]=command_line_par
ret = command_line_par
return ret
def get_parameter_value_flag(command_line_par,params, params_name, default_val, params_description):
if command_line_par==default_val:
ret = params[(params_name, default_val, params_description)]
else:
params[params_name]=command_line_par
ret = command_line_par
return ret
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Creates a synthetic registration results')
parser.add_argument('--config', required=False, default=None, help='The main json configuration file that can be used to define the settings')
parser.add_argument('--output_directory', required=False, default='synthetic_example_out', help='Where the output was stored (now this will be the input directory)')
parser.add_argument('--nr_of_pairs_to_generate', required=False, default=10, type=int, help='number of image pairs to generate')
parser.add_argument('--nr_of_circles_to_generate', required=False, default=None, type=int, help='number of circles to generate in an image') #2
parser.add_argument('--circle_extent', required=False, default=None, type=float, help='Size of largest circle; image is [-0.5,0.5]^2') # 0.25
parser.add_argument('--seed', required=False, type=int, default=None, help='Sets the random seed which affects data shuffling')
parser.add_argument('--create_publication_figures', action='store_true', help='If set writes out figures illustrating the generation approach of first example')
parser.add_argument('--use_fixed_source', action='store_true', help='if set the source image is fixed; like a fixed atlas image')
parser.add_argument('--use_random_source', action='store_true', help='if set then inital source is warped randomly, otherwise it is circular')
parser.add_argument('--no_texture', action='store_true',help='if set then no texture is used, otherwise (default) texture is generated')
parser.add_argument('--texture_gaussian_smoothness', required=False, type=float, default=None, help='Gaussian standard deviation used to smooth a random image to create texture.')
parser.add_argument('--texture_magnitude', required=False, type=float, default=None, help='Magnitude of the texture')
parser.add_argument('--do_not_randomize_momentum', action='store_true', help='if set, momentum is deterministic')
parser.add_argument('--do_not_randomize_in_sectors', action='store_true', help='if set and randomize momentum is on, momentum is only randomized uniformly over circles')
parser.add_argument('--put_weights_between_circles', action='store_true', help='if set, the weights will change in-between circles, otherwise they will be colocated with the circles')
parser.add_argument('--start_with_fluid_weight', action='store_true', help='if set then the innermost circle is not fluid, otherwise it is fluid')
parser.add_argument('--weight_smoothing_std',required=False,default=0.02,type=float,help='Standard deviation to smooth the weights with; to assure sufficient regularity')
parser.add_argument('--stds', required=False,type=str, default=None, help='standard deviations for the multi-Gaussian; default=[0.01,0.05,0.1,0.2]')
parser.add_argument('--weights_not_fluid', required=False,type=str, default=None, help='weights for a non fluid circle; default=[0,0,0,1]')
parser.add_argument('--weights_fluid', required=False,type=str, default=None, help='weights for a fluid circle; default=[0.2,0.5,0.2,0.1]')
parser.add_argument('--weights_background', required=False,type=str, default=None, help='weights for the background; default=[0,0,0,1]')
parser.add_argument('--kernel_weighting_type', required=False, type=str, default=None, help='Which kernel weighting to use for integration. Specify as [w_K|w_K_w|sqrt_w_K_sqrt_w]; w_K is the default')
parser.add_argument('--nr_of_angles', required=False, default=None, type=int, help='number of angles for randomize in sector') #10
parser.add_argument('--multiplier_factor', required=False, default=None, type=float, help='value the random momentum is multiplied by') #1.0
parser.add_argument('--momentum_smoothing', required=False, default=None, type=int, help='how much the randomly generated momentum is smoothed') #0.05
parser.add_argument('--sz', required=False, type=str, default=None, help='Desired size of synthetic example; default=[128,128]')
args = parser.parse_args()
if args.seed is not None:
print('Setting the random seed to {:}'.format(args.seed))
random.seed(args.seed)
torch.manual_seed(args.seed)
params = pars.ParameterDict()
if args.config is not None:
# load the configuration
params.load_JSON(args.config)
visualize = True
visualize_warped = True
print_images = True
nr_of_pairs_to_generate = args.nr_of_pairs_to_generate
nr_of_circles_to_generate = get_parameter_value(args.nr_of_circles_to_generate, params, 'nr_of_circles_to_generate', 2, 'number of circles for | |
att_rp4_status,
create_rp2_resp, att_rp2_status,
create_rp4_resp, att_rp4_status,
deploy_rp1_rp3_resp, deploy_rp4_rp7_resp,
att_rp2_status, att_rp4_status,
]
if ('test_dcnm_srp_merged_update_existing_unauth_err' == self._testMethodName):
have_rp2_resp = self.payloads_data.get('have_rp2_resp')
have_rp4_resp = self.payloads_data.get('have_rp4_resp')
att_rp2_status = self.payloads_data.get('attach_rp2_resp')
att_rp4_status = self.payloads_data.get('attach_rp4_resp')
create_rp2_resp = self.payloads_data.get('create_rp2_resp')
create_rp4_resp = self.payloads_data.get('create_rp4_resp')
deploy_rp1_rp3_resp = self.payloads_data.get('deploy_rp1_rp3_resp')
deploy_rp4_rp7_resp = self.payloads_data.get('deploy_rp4_rp7_resp')
att_rp2_status = self.payloads_data.get('attach_rp2_resp')
att_rp4_status = self.payloads_data.get('attach_rp4_resp')
create_rp4_resp_unauth_err = self.payloads_data.get('create_rp4_resp_unauth_err')
self.run_dcnm_send.side_effect = [have_rp2_resp, have_rp4_resp,
att_rp2_status, att_rp4_status,
create_rp2_resp, att_rp2_status,
create_rp4_resp_unauth_err,
create_rp4_resp, att_rp4_status,
deploy_rp1_rp3_resp, deploy_rp4_rp7_resp,
att_rp2_status, att_rp4_status,
]
if ('test_dcnm_srp_delete_existing' == self._testMethodName):
have_rp1_resp = self.payloads_data.get('have_rp1_resp')
have_rp2_resp = self.payloads_data.get('have_rp2_resp')
have_rp3_resp = self.payloads_data.get('have_rp3_resp')
have_rp4_resp = self.payloads_data.get('have_rp4_resp')
have_rp5_resp = self.payloads_data.get('have_rp5_resp')
have_rp6_resp = self.payloads_data.get('have_rp6_resp')
have_rp7_resp = self.payloads_data.get('have_rp7_resp')
det_rp1_rp3_resp = self.payloads_data.get('detach_rp1_rp3_resp')
det_rp4_rp7_resp = self.payloads_data.get('detach_rp4_rp7_resp')
delete_rp1_resp = self.payloads_data.get('delete_rp1_resp')
delete_rp2_resp = self.payloads_data.get('delete_rp2_resp')
delete_rp3_resp = self.payloads_data.get('delete_rp3_resp')
delete_rp4_resp = self.payloads_data.get('delete_rp4_resp')
delete_rp5_resp = self.payloads_data.get('delete_rp5_resp')
delete_rp6_resp = self.payloads_data.get('delete_rp6_resp')
delete_rp7_resp = self.payloads_data.get('delete_rp7_resp')
deploy_rp1_rp3_resp = self.payloads_data.get('deploy_rp1_rp3_resp')
deploy_rp4_rp7_resp = self.payloads_data.get('deploy_rp4_rp7_resp')
dd_rp1_status = self.payloads_data.get('del_deploy_rp1_resp')
dd_rp2_status = self.payloads_data.get('del_deploy_rp2_resp')
dd_rp3_status = self.payloads_data.get('del_deploy_rp3_resp')
dd_rp4_status = self.payloads_data.get('del_deploy_rp4_resp')
dd_rp5_status = self.payloads_data.get('del_deploy_rp5_resp')
dd_rp6_status = self.payloads_data.get('del_deploy_rp6_resp')
dd_rp7_status = self.payloads_data.get('del_deploy_rp7_resp')
self.run_dcnm_send.side_effect = [have_rp1_resp, have_rp2_resp, have_rp3_resp,
have_rp4_resp, have_rp5_resp, have_rp6_resp,
have_rp7_resp,
det_rp1_rp3_resp, det_rp4_rp7_resp,
deploy_rp1_rp3_resp, deploy_rp4_rp7_resp,
dd_rp1_status, dd_rp2_status, dd_rp3_status,
dd_rp4_status, dd_rp5_status, dd_rp6_status,
dd_rp7_status,
delete_rp1_resp, delete_rp2_resp, delete_rp3_resp,
delete_rp4_resp, delete_rp5_resp, delete_rp6_resp,
delete_rp7_resp]
if ('test_dcnm_srp_delete_existing_no_config' == self._testMethodName):
serv_nodes_resp = self.payloads_data.get('serv_nodes_resp')
have_it_sn1_resp = self.payloads_data.get('have_it_sn1_resp')
have_it_sn2_resp = self.payloads_data.get('have_it_sn2_resp')
det_rp1_rp3_resp = self.payloads_data.get('detach_rp1_rp3_resp')
det_rp4_rp7_resp = self.payloads_data.get('detach_rp4_rp7_resp')
delete_rp1_resp = self.payloads_data.get('delete_rp1_resp')
delete_rp2_resp = self.payloads_data.get('delete_rp2_resp')
delete_rp3_resp = self.payloads_data.get('delete_rp3_resp')
delete_rp4_resp = self.payloads_data.get('delete_rp4_resp')
delete_rp5_resp = self.payloads_data.get('delete_rp5_resp')
delete_rp6_resp = self.payloads_data.get('delete_rp6_resp')
delete_rp7_resp = self.payloads_data.get('delete_rp7_resp')
deploy_rp1_rp3_resp = self.payloads_data.get('deploy_rp1_rp3_resp')
deploy_rp4_rp7_resp = self.payloads_data.get('deploy_rp4_rp7_resp')
dd_rp1_status = self.payloads_data.get('del_deploy_rp1_resp')
dd_rp2_status = self.payloads_data.get('del_deploy_rp2_resp')
dd_rp3_status = self.payloads_data.get('del_deploy_rp3_resp')
dd_rp4_status = self.payloads_data.get('del_deploy_rp4_resp')
dd_rp5_status = self.payloads_data.get('del_deploy_rp5_resp')
dd_rp6_status = self.payloads_data.get('del_deploy_rp6_resp')
dd_rp7_status = self.payloads_data.get('del_deploy_rp7_resp')
self.run_dcnm_send.side_effect = [
serv_nodes_resp,
have_it_sn1_resp, have_it_sn2_resp,
det_rp1_rp3_resp, det_rp4_rp7_resp,
deploy_rp1_rp3_resp, deploy_rp4_rp7_resp,
dd_rp1_status, dd_rp2_status, dd_rp3_status,
dd_rp4_status, dd_rp5_status, dd_rp6_status,
dd_rp7_status,
delete_rp1_resp, delete_rp2_resp, delete_rp3_resp,
delete_rp4_resp, delete_rp5_resp, delete_rp6_resp,
delete_rp7_resp]
if ('test_dcnm_srp_delete_existing_with_node_name' == self._testMethodName):
have_it_sn1_resp = self.payloads_data.get('have_it_sn1_resp')
have_it_sn2_resp = self.payloads_data.get('have_it_sn2_resp')
det_rp1_rp3_resp = self.payloads_data.get('detach_rp1_rp3_resp')
det_rp4_rp7_resp = self.payloads_data.get('detach_rp4_rp7_resp')
delete_rp1_resp = self.payloads_data.get('delete_rp1_resp')
delete_rp2_resp = self.payloads_data.get('delete_rp2_resp')
delete_rp3_resp = self.payloads_data.get('delete_rp3_resp')
delete_rp4_resp = self.payloads_data.get('delete_rp4_resp')
delete_rp5_resp = self.payloads_data.get('delete_rp5_resp')
delete_rp6_resp = self.payloads_data.get('delete_rp6_resp')
delete_rp7_resp = self.payloads_data.get('delete_rp7_resp')
deploy_rp1_rp3_resp = self.payloads_data.get('deploy_rp1_rp3_resp')
deploy_rp4_rp7_resp = self.payloads_data.get('deploy_rp4_rp7_resp')
dd_rp1_status = self.payloads_data.get('del_deploy_rp1_resp')
dd_rp2_status = self.payloads_data.get('del_deploy_rp2_resp')
dd_rp3_status = self.payloads_data.get('del_deploy_rp3_resp')
dd_rp4_status = self.payloads_data.get('del_deploy_rp4_resp')
dd_rp5_status = self.payloads_data.get('del_deploy_rp5_resp')
dd_rp6_status = self.payloads_data.get('del_deploy_rp6_resp')
dd_rp7_status = self.payloads_data.get('del_deploy_rp7_resp')
self.run_dcnm_send.side_effect = [
have_it_sn1_resp, have_it_sn2_resp,
det_rp1_rp3_resp, det_rp4_rp7_resp,
deploy_rp1_rp3_resp, deploy_rp4_rp7_resp,
dd_rp1_status, dd_rp2_status, dd_rp3_status,
dd_rp4_status, dd_rp5_status, dd_rp6_status,
dd_rp7_status,
delete_rp1_resp, delete_rp2_resp, delete_rp3_resp,
delete_rp4_resp, delete_rp5_resp, delete_rp6_resp,
delete_rp7_resp]
if ('test_dcnm_srp_delete_existing_unauth_err' == self._testMethodName):
have_rp1_resp = self.payloads_data.get('have_rp1_resp')
have_rp2_resp = self.payloads_data.get('have_rp2_resp')
have_rp3_resp = self.payloads_data.get('have_rp3_resp')
have_rp4_resp = self.payloads_data.get('have_rp4_resp')
have_rp5_resp = self.payloads_data.get('have_rp5_resp')
have_rp6_resp = self.payloads_data.get('have_rp6_resp')
have_rp7_resp = self.payloads_data.get('have_rp7_resp')
det_rp1_rp3_resp = self.payloads_data.get('detach_rp1_rp3_resp')
det_rp4_rp7_resp = self.payloads_data.get('detach_rp4_rp7_resp')
delete_rp1_resp = self.payloads_data.get('delete_rp1_resp')
delete_rp2_resp = self.payloads_data.get('delete_rp2_resp')
delete_rp3_resp = self.payloads_data.get('delete_rp3_resp')
delete_rp4_resp = self.payloads_data.get('delete_rp4_resp')
delete_rp5_resp = self.payloads_data.get('delete_rp5_resp')
delete_rp6_resp = self.payloads_data.get('delete_rp6_resp')
delete_rp7_resp = self.payloads_data.get('delete_rp7_resp')
deploy_rp1_rp3_resp = self.payloads_data.get('deploy_rp1_rp3_resp')
deploy_rp4_rp7_resp = self.payloads_data.get('deploy_rp4_rp7_resp')
dd_rp1_status = self.payloads_data.get('del_deploy_rp1_resp')
dd_rp2_status = self.payloads_data.get('del_deploy_rp2_resp')
dd_rp3_status = self.payloads_data.get('del_deploy_rp3_resp')
dd_rp4_status = self.payloads_data.get('del_deploy_rp4_resp')
dd_rp5_status = self.payloads_data.get('del_deploy_rp5_resp')
dd_rp6_status = self.payloads_data.get('del_deploy_rp6_resp')
dd_rp7_status = self.payloads_data.get('del_deploy_rp7_resp')
det_rp1_resp_unauth_err = self.payloads_data.get('det_rp1_resp_unauth_err')
deploy_rp4_resp_unauth_err = self.payloads_data.get('deploy_rp4_resp_unauth_err')
delete_rp7_resp_unauth_err = self.payloads_data.get('delete_rp7_resp_unauth_err')
self.run_dcnm_send.side_effect = [have_rp1_resp, have_rp2_resp, have_rp3_resp,
have_rp4_resp, have_rp5_resp, have_rp6_resp,
have_rp7_resp,
det_rp1_resp_unauth_err,
det_rp1_rp3_resp, det_rp4_rp7_resp,
deploy_rp1_rp3_resp, deploy_rp4_rp7_resp,
dd_rp1_status, dd_rp2_status, dd_rp3_status,
dd_rp4_status, dd_rp5_status, dd_rp6_status,
dd_rp7_status,
delete_rp1_resp, delete_rp2_resp, delete_rp3_resp,
delete_rp4_resp, delete_rp5_resp, delete_rp6_resp,
delete_rp7_resp_unauth_err, deploy_rp4_rp7_resp, delete_rp7_resp]
if ('test_dcnm_srp_delete_existing_and_non_existing' == self._testMethodName):
have_rp1_resp = self.payloads_data.get('have_rp1_resp')
have_rp3_resp = self.payloads_data.get('have_rp3_resp')
have_rp6_resp = self.payloads_data.get('have_rp6_resp')
have_rp7_resp = self.payloads_data.get('have_rp7_resp')
det_rp1_rp3_resp = self.payloads_data.get('detach_rp1_rp3_resp')
det_rp4_rp7_resp = self.payloads_data.get('detach_rp4_rp7_resp')
delete_rp1_resp = self.payloads_data.get('delete_rp1_resp')
delete_rp3_resp = self.payloads_data.get('delete_rp3_resp')
delete_rp6_resp = self.payloads_data.get('delete_rp6_resp')
delete_rp7_resp = self.payloads_data.get('delete_rp7_resp')
deploy_rp1_rp3_resp = self.payloads_data.get('deploy_rp1_rp3_resp')
deploy_rp4_rp7_resp = self.payloads_data.get('deploy_rp4_rp7_resp')
dd_rp1_status = self.payloads_data.get('del_deploy_rp1_resp')
dd_rp3_status = self.payloads_data.get('del_deploy_rp3_resp')
dd_rp6_status = self.payloads_data.get('del_deploy_rp6_resp')
dd_rp7_status = self.payloads_data.get('del_deploy_rp7_resp')
self.run_dcnm_send.side_effect = [have_rp1_resp, [], have_rp3_resp,
[], [], have_rp6_resp,
have_rp7_resp,
det_rp1_rp3_resp, det_rp4_rp7_resp,
deploy_rp1_rp3_resp, deploy_rp4_rp7_resp,
dd_rp1_status, dd_rp3_status,
dd_rp6_status, dd_rp7_status,
delete_rp1_resp, delete_rp3_resp,
delete_rp6_resp, delete_rp7_resp]
if ('test_dcnm_srp_delete_non_existing' == self._testMethodName):
self.run_dcnm_send.side_effect = [[], [], [], [], [], [], []]
if ('test_dcnm_srp_replace_rp1_to_rp3_non_existing' == self._testMethodName):
create_rp1_resp = self.payloads_data.get('create_rp1_resp')
create_rp2_resp = self.payloads_data.get('create_rp2_resp')
create_rp3_resp = self.payloads_data.get('create_rp3_resp')
deploy_rp1_rp3_resp = self.payloads_data.get('deploy_rp1_rp3_resp')
att_rp1_status = self.payloads_data.get('attach_rp1_resp')
att_rp2_status = self.payloads_data.get('attach_rp2_resp')
att_rp3_status = self.payloads_data.get('attach_rp3_resp')
self.run_dcnm_send.side_effect = [[], [], [],
create_rp1_resp, create_rp2_resp, create_rp3_resp,
deploy_rp1_rp3_resp,
att_rp1_status, att_rp2_status, att_rp3_status
]
if ('test_dcnm_srp_replace_rp1_to_rp3_existing' == self._testMethodName):
have_rp1_resp = self.payloads_data.get('have_rp1_resp')
have_rp2_resp = self.payloads_data.get('have_rp2_resp')
have_rp3_resp = self.payloads_data.get('have_rp3_resp')
att_rp1_status = self.payloads_data.get('attach_rp1_resp')
att_rp2_status = self.payloads_data.get('attach_rp2_resp')
att_rp3_status = self.payloads_data.get('attach_rp3_resp')
create_rp1_resp = self.payloads_data.get('create_rp1_resp')
create_rp2_resp = self.payloads_data.get('create_rp2_resp')
create_rp3_resp = self.payloads_data.get('create_rp3_resp')
deploy_rp1_rp3_resp = self.payloads_data.get('deploy_rp1_rp3_resp')
att_rp1_status = self.payloads_data.get('attach_rp1_resp')
att_rp2_status = self.payloads_data.get('attach_rp2_resp')
att_rp3_status = self.payloads_data.get('attach_rp3_resp')
self.run_dcnm_send.side_effect = [have_rp1_resp, have_rp2_resp, have_rp3_resp,
att_rp1_status, att_rp2_status, att_rp3_status,
create_rp1_resp, att_rp1_status,
create_rp2_resp, att_rp2_status,
create_rp3_resp, att_rp3_status,
deploy_rp1_rp3_resp,
att_rp1_status, att_rp2_status, att_rp3_status
]
if ('test_dcnm_srp_replace_rp1_to_rp3_existing_no_change' == self._testMethodName):
have_rp1_resp = self.payloads_data.get('have_rp1_resp')
have_rp2_resp = self.payloads_data.get('have_rp2_resp')
have_rp3_resp = self.payloads_data.get('have_rp3_resp')
att_rp1_status = self.payloads_data.get('attach_rp1_resp')
att_rp2_status = self.payloads_data.get('attach_rp2_resp')
att_rp3_status = self.payloads_data.get('attach_rp3_resp')
self.run_dcnm_send.side_effect = [have_rp1_resp, have_rp2_resp, have_rp3_resp,
att_rp1_status, att_rp2_status, att_rp3_status]
if ('test_dcnm_srp_override_rp1_rp7_with_new_peerings' == self._testMethodName):
serv_nodes_resp = self.payloads_data.get('serv_nodes_resp')
have_it_sn1_resp = self.payloads_data.get('have_it_sn1_resp')
have_it_sn2_resp = self.payloads_data.get('have_it_sn2_resp')
det_rp1_rp3_resp = self.payloads_data.get('detach_rp1_rp3_resp')
det_rp4_rp7_resp = self.payloads_data.get('detach_rp4_rp7_resp')
create_rp1_resp = self.payloads_data.get('create_rp1_resp')
create_rp4_resp = self.payloads_data.get('create_rp4_resp')
delete_rp1_resp = self.payloads_data.get('delete_rp1_resp')
delete_rp2_resp = self.payloads_data.get('delete_rp2_resp')
delete_rp3_resp = self.payloads_data.get('delete_rp3_resp')
delete_rp4_resp = self.payloads_data.get('delete_rp4_resp')
delete_rp5_resp = self.payloads_data.get('delete_rp5_resp')
delete_rp6_resp = self.payloads_data.get('delete_rp6_resp')
delete_rp7_resp = self.payloads_data.get('delete_rp7_resp')
deploy_rp1_rp3_resp = self.payloads_data.get('deploy_rp1_rp3_resp')
deploy_rp4_rp7_resp = self.payloads_data.get('deploy_rp4_rp7_resp')
dd_rp1_status = self.payloads_data.get('del_deploy_rp1_resp')
dd_rp2_status = self.payloads_data.get('del_deploy_rp2_resp')
dd_rp3_status = self.payloads_data.get('del_deploy_rp3_resp')
dd_rp4_status = self.payloads_data.get('del_deploy_rp4_resp')
dd_rp5_status = self.payloads_data.get('del_deploy_rp5_resp')
dd_rp6_status = self.payloads_data.get('del_deploy_rp6_resp')
dd_rp7_status = self.payloads_data.get('del_deploy_rp7_resp')
deploy_rp_ovr1_resp = self.payloads_data.get('deploy_rp_ovr1_resp')
deploy_rp_ovr4_resp = self.payloads_data.get('deploy_rp_ovr4_resp')
att_rp1_status = self.payloads_data.get('attach_rp1_resp')
att_rp4_status = self.payloads_data.get('attach_rp4_resp')
self.run_dcnm_send.side_effect = [[], [],
serv_nodes_resp,
have_it_sn1_resp, have_it_sn2_resp,
create_rp1_resp, create_rp4_resp,
det_rp1_rp3_resp, det_rp4_rp7_resp,
deploy_rp1_rp3_resp, deploy_rp4_rp7_resp,
dd_rp1_status, dd_rp2_status, dd_rp3_status,
dd_rp4_status, dd_rp5_status, dd_rp6_status,
dd_rp7_status,
delete_rp1_resp, delete_rp2_resp, delete_rp3_resp,
delete_rp4_resp, delete_rp5_resp, delete_rp6_resp,
delete_rp7_resp, deploy_rp_ovr1_resp, deploy_rp_ovr4_resp,
att_rp1_status, att_rp4_status
]
if ('test_dcnm_srp_override_with_existing_peering' == self._testMethodName):
serv_nodes_resp = self.payloads_data.get('serv_nodes_resp')
have_rp6_resp = self.payloads_data.get('have_rp6_resp')
have_it_sn1_resp = self.payloads_data.get('have_it_sn1_resp')
have_it_sn2_resp = self.payloads_data.get('have_it_sn2_resp')
det_rp1_rp3_resp = self.payloads_data.get('detach_rp1_rp3_resp')
det_rp4_rp7_resp = self.payloads_data.get('detach_rp4_rp7_resp')
att_rp6_status = self.payloads_data.get('attach_rp6_resp')
delete_rp1_resp = self.payloads_data.get('delete_rp1_resp')
delete_rp2_resp = self.payloads_data.get('delete_rp2_resp')
delete_rp3_resp = self.payloads_data.get('delete_rp3_resp')
delete_rp4_resp = self.payloads_data.get('delete_rp4_resp')
delete_rp5_resp = self.payloads_data.get('delete_rp5_resp')
delete_rp7_resp = self.payloads_data.get('delete_rp7_resp')
dd_rp1_status = self.payloads_data.get('del_deploy_rp1_resp')
dd_rp2_status = self.payloads_data.get('del_deploy_rp2_resp')
dd_rp3_status = self.payloads_data.get('del_deploy_rp3_resp')
dd_rp4_status = self.payloads_data.get('del_deploy_rp4_resp')
dd_rp5_status = self.payloads_data.get('del_deploy_rp5_resp')
dd_rp7_status = self.payloads_data.get('del_deploy_rp7_resp')
deploy_rp1_rp3_resp = self.payloads_data.get('deploy_rp1_rp3_resp')
deploy_rp4_rp7_resp = self.payloads_data.get('deploy_rp4_rp7_resp')
self.run_dcnm_send.side_effect = [have_rp6_resp,
serv_nodes_resp,
have_it_sn1_resp,
have_it_sn2_resp,
att_rp6_status,
det_rp1_rp3_resp, det_rp4_rp7_resp,
deploy_rp1_rp3_resp, deploy_rp4_rp7_resp,
dd_rp1_status, dd_rp2_status, dd_rp3_status,
dd_rp4_status, dd_rp5_status, dd_rp7_status,
delete_rp1_resp, delete_rp2_resp, delete_rp3_resp,
delete_rp4_resp, delete_rp5_resp, delete_rp7_resp
]
if ('test_dcnm_srp_override_with_existing_peering_updated' == self._testMethodName):
serv_nodes_resp = self.payloads_data.get('serv_nodes_resp')
have_rp6_resp = self.payloads_data.get('have_rp6_resp')
have_it_sn1_resp = self.payloads_data.get('have_it_sn1_resp')
have_it_sn2_resp = self.payloads_data.get('have_it_sn2_resp')
create_rp6_resp = self.payloads_data.get('create_rp6_resp')
att_rp6_status = self.payloads_data.get('attach_rp6_resp')
det_rp1_rp3_resp = self.payloads_data.get('detach_rp1_rp3_resp')
det_rp4_rp7_resp = self.payloads_data.get('detach_rp4_rp7_resp')
delete_rp1_resp = self.payloads_data.get('delete_rp1_resp')
delete_rp2_resp = self.payloads_data.get('delete_rp2_resp')
delete_rp3_resp = self.payloads_data.get('delete_rp3_resp')
delete_rp4_resp = self.payloads_data.get('delete_rp4_resp')
delete_rp5_resp = self.payloads_data.get('delete_rp5_resp')
delete_rp7_resp = self.payloads_data.get('delete_rp7_resp')
dd_rp1_status = self.payloads_data.get('del_deploy_rp1_resp')
dd_rp2_status = self.payloads_data.get('del_deploy_rp2_resp')
dd_rp3_status = self.payloads_data.get('del_deploy_rp3_resp')
dd_rp4_status = self.payloads_data.get('del_deploy_rp4_resp')
dd_rp5_status = self.payloads_data.get('del_deploy_rp5_resp')
dd_rp7_status = self.payloads_data.get('del_deploy_rp7_resp')
deploy_rp1_rp3_resp = self.payloads_data.get('deploy_rp1_rp3_resp')
deploy_rp4_rp7_resp = self.payloads_data.get('deploy_rp4_rp7_resp')
att_rp6_status = self.payloads_data.get('attach_rp6_resp')
self.run_dcnm_send.side_effect = [have_rp6_resp,
serv_nodes_resp,
have_it_sn1_resp,
have_it_sn2_resp,
att_rp6_status,
create_rp6_resp,
att_rp6_status,
det_rp1_rp3_resp, det_rp4_rp7_resp,
deploy_rp1_rp3_resp, deploy_rp4_rp7_resp,
dd_rp1_status, dd_rp2_status, dd_rp3_status,
dd_rp4_status, dd_rp5_status, dd_rp7_status,
delete_rp1_resp, delete_rp2_resp, delete_rp3_resp,
delete_rp4_resp, delete_rp5_resp, delete_rp7_resp,
deploy_rp4_rp7_resp,
att_rp6_status
]
if ('test_dcnm_srp_override_with_no_config' == self._testMethodName):
serv_nodes_resp = self.payloads_data.get('serv_nodes_resp')
have_it_sn1_resp = self.payloads_data.get('have_it_sn1_resp')
have_it_sn2_resp = self.payloads_data.get('have_it_sn2_resp')
det_rp1_rp3_resp = self.payloads_data.get('detach_rp1_rp3_resp')
det_rp4_rp7_resp = self.payloads_data.get('detach_rp4_rp7_resp')
delete_rp1_resp = self.payloads_data.get('delete_rp1_resp')
delete_rp2_resp = self.payloads_data.get('delete_rp2_resp')
delete_rp3_resp = self.payloads_data.get('delete_rp3_resp')
delete_rp4_resp = self.payloads_data.get('delete_rp4_resp')
delete_rp5_resp = self.payloads_data.get('delete_rp5_resp')
delete_rp6_resp = self.payloads_data.get('delete_rp6_resp')
delete_rp7_resp = self.payloads_data.get('delete_rp7_resp')
dd_rp1_status = self.payloads_data.get('del_deploy_rp1_resp')
dd_rp2_status = self.payloads_data.get('del_deploy_rp2_resp')
dd_rp3_status = self.payloads_data.get('del_deploy_rp3_resp')
dd_rp4_status = self.payloads_data.get('del_deploy_rp4_resp')
dd_rp5_status = self.payloads_data.get('del_deploy_rp5_resp')
dd_rp6_status = self.payloads_data.get('del_deploy_rp6_resp')
dd_rp7_status = self.payloads_data.get('del_deploy_rp7_resp')
deploy_rp1_rp3_resp = self.payloads_data.get('deploy_rp1_rp3_resp')
deploy_rp4_rp7_resp = self.payloads_data.get('deploy_rp4_rp7_resp')
self.run_dcnm_send.side_effect = [serv_nodes_resp,
have_it_sn1_resp, have_it_sn2_resp,
det_rp1_rp3_resp, det_rp4_rp7_resp,
deploy_rp1_rp3_resp, deploy_rp4_rp7_resp,
dd_rp1_status, dd_rp2_status, dd_rp3_status,
dd_rp4_status, dd_rp5_status, dd_rp6_status,
dd_rp7_status,
delete_rp1_resp, delete_rp2_resp, delete_rp3_resp,
delete_rp4_resp, delete_rp5_resp, delete_rp6_resp,
delete_rp7_resp
]
if ('test_dcnm_srp_query_non_existing' == self._testMethodName):
self.run_dcnm_send.side_effect = [[],[]]
if ('test_dcnm_srp_query_with_service_nodes' == self._testMethodName):
have_it_sn1_resp = self.payloads_data.get('have_it_sn1_resp')
have_it_sn2_resp = self.payloads_data.get('have_it_sn2_resp')
self.run_dcnm_send.side_effect = [have_it_sn1_resp, have_it_sn2_resp]
if ('test_dcnm_srp_query_with_peer_names' == self._testMethodName):
have_rp1_resp = self.payloads_data.get('have_rp1_resp')
have_rp2_resp = self.payloads_data.get('have_rp2_resp')
have_rp3_resp = self.payloads_data.get('have_rp3_resp')
have_rp4_resp = self.payloads_data.get('have_rp4_resp')
have_rp5_resp = self.payloads_data.get('have_rp5_resp')
have_rp6_resp = self.payloads_data.get('have_rp6_resp')
have_rp7_resp = self.payloads_data.get('have_rp7_resp')
self.run_dcnm_send.side_effect = [have_rp1_resp, have_rp2_resp, have_rp3_resp,
have_rp4_resp, have_rp5_resp, have_rp6_resp,
have_rp7_resp]
def load_fixtures(self, response=None, device=''):
# Load srp related side-effects
self.load_srp_fixtures ()
#################################### FIXTURES END ############################
#################################### TEST-CASES ##############################
def test_dcnm_srp_merged_new (self):
# load the json from playbooks
self.config_data = loadPlaybookData('dcnm_srp_configs')
self.payloads_data = loadPlaybookData('dcnm_srp_payloads')
# load required config data
self.playbook_config = self.config_data.get('create_rp1_rp7_config')
set_module_args(dict(state='merged',
attach=True,
deploy=True,
fabric='mmudigon',
service_fabric='external',
config=self.playbook_config))
result = self.execute_module(changed=True, failed=False)
self.assertEqual(len(result["diff"][0]["merged"]) , 7)
self.assertEqual(len(result["diff"][0]["deleted"]) , 0)
self.assertEqual(len(result["diff"][0]["modified"]) , 0)
self.assertEqual(len(result["diff"][0]["query"]) , 0)
self.assertEqual(len(result["diff"][0]["deploy"]) , 7)
# Validate create and deploy responses
for resp in result["response"]:
self.assertEqual(resp["RETURN_CODE"], 200)
def test_dcnm_srp_merged_new_no_opt_elems (self):
# load the json from playbooks
self.config_data = loadPlaybookData('dcnm_srp_configs')
self.payloads_data = loadPlaybookData('dcnm_srp_payloads')
# load required config data
self.playbook_config = self.config_data.get('create_rp1_rp7_config_no_opt_elems')
set_module_args(dict(state='merged',
attach=True,
deploy=True,
fabric='mmudigon',
service_fabric='external',
config=self.playbook_config))
result = self.execute_module(changed=True, failed=False)
self.assertEqual(len(result["diff"][0]["merged"]) , 7)
self.assertEqual(len(result["diff"][0]["deleted"]) , 0)
self.assertEqual(len(result["diff"][0]["modified"]) , 0)
self.assertEqual(len(result["diff"][0]["query"]) , 0)
self.assertEqual(len(result["diff"][0]["deploy"]) , 7)
# Validate create and deploy responses
for resp in result["response"]:
self.assertEqual(resp["RETURN_CODE"], 200)
def test_dcnm_srp_merged_existing_no_opt_elems (self):
# load the json | |
= self.CreateBucket()
obj_uri = self.CreateObject(bucket_uri=bucket_uri, contents=b'foo')
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(
['-o', option, 'ls', suri(bucket_uri)], return_stdout=True)
self.assertEqual('%s\n' % obj_uri, stdout)
if os.name == 'posix':
self.assertTrue(os.path.exists(filepath))
mode = oct(stat.S_IMODE(os.stat(filepath).st_mode))
# Assert that only user has read/write permission
self.assertEqual(oct(0o600), mode)
_Check1()
def test_one_object_with_l(self):
"""Tests listing one object with -l."""
obj_uri = self.CreateObject(contents=b'foo')
stdout = self.RunGsUtil(['ls', '-l', suri(obj_uri)], return_stdout=True)
output_items = stdout.split()
self.assertTrue(output_items[0].isdigit())
# Throws exception if time string is not formatted correctly.
time.strptime(stdout.split()[1], '%Y-%m-%dT%H:%M:%SZ')
self.assertEqual(output_items[2], suri(obj_uri))
def test_one_object_with_L(self):
"""Tests listing one object with -L."""
obj_uri = self.CreateObject(contents=b'foo')
# Ensure that creation and update don't take place in the same second.
time.sleep(2)
# Check that the creation time, rather than the updated time, is displayed.
self.RunGsUtil(['setmeta', '-h', 'x-goog-meta-foo:bar', suri(obj_uri)])
find_time_created_re = re.compile(
r'^\s*Creation time:\s+(?P<time_created_val>.+)$', re.MULTILINE)
find_time_updated_re = re.compile(
r'^\s*Update time:\s+(?P<time_updated_val>.+)$', re.MULTILINE)
stdout = self.RunGsUtil(['ls', '-L', suri(obj_uri)], return_stdout=True)
time_created_match = re.search(find_time_created_re, stdout)
time_updated_match = re.search(find_time_updated_re, stdout)
time_created = time_created_match.group('time_created_val')
self.assertIsNotNone(time_created)
time_created = time.strptime(time_created, '%a, %d %b %Y %H:%M:%S %Z')
if self.test_api == ApiSelector.XML:
# XML API has no concept of updated time.
self.assertIsNone(time_updated_match)
elif self.test_api == ApiSelector.JSON:
time_updated = time_updated_match.group('time_updated_val')
self.assertIsNotNone(time_updated)
time_updated = time.strptime(time_updated, '%a, %d %b %Y %H:%M:%S %Z')
self.assertGreater(time_updated, time_created)
def test_subdir(self):
"""Tests listing a bucket subdirectory."""
bucket_uri = self.CreateBucket(test_objects=1)
k1_uri = bucket_uri.clone_replace_name('foo')
k1_uri.set_contents_from_string('baz')
k2_uri = bucket_uri.clone_replace_name('dir/foo')
k2_uri.set_contents_from_string('bar')
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(['ls', '%s/dir' % suri(bucket_uri)],
return_stdout=True)
self.assertEqual('%s\n' % suri(k2_uri), stdout)
stdout = self.RunGsUtil(['ls', suri(k1_uri)], return_stdout=True)
self.assertEqual('%s\n' % suri(k1_uri), stdout)
_Check1()
def test_subdir_nocontents(self):
"""Tests listing a bucket subdirectory using -d.
Result will display subdirectory names instead of contents. Uses a wildcard
to show multiple matching subdirectories.
"""
bucket_uri = self.CreateBucket(test_objects=1)
k1_uri = bucket_uri.clone_replace_name('foo')
k1_uri.set_contents_from_string('baz')
k2_uri = bucket_uri.clone_replace_name('dir/foo')
k2_uri.set_contents_from_string('bar')
k3_uri = bucket_uri.clone_replace_name('dir/foo2')
k3_uri.set_contents_from_string('foo')
k4_uri = bucket_uri.clone_replace_name('dir2/foo3')
k4_uri.set_contents_from_string('foo2')
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(
['ls', '-d', '%s/dir*' % suri(bucket_uri)], return_stdout=True)
self.assertEqual(
'%s/dir/\n%s/dir2/\n' % (suri(bucket_uri), suri(bucket_uri)), stdout)
stdout = self.RunGsUtil(['ls', suri(k1_uri)], return_stdout=True)
self.assertEqual('%s\n' % suri(k1_uri), stdout)
_Check1()
def test_versioning(self):
"""Tests listing a versioned bucket."""
bucket1_uri = self.CreateBucket(test_objects=1)
bucket2_uri = self.CreateVersionedBucket(test_objects=1)
self.AssertNObjectsInBucket(bucket1_uri, 1, versioned=True)
bucket_list = list(bucket1_uri.list_bucket())
objuri = [
bucket1_uri.clone_replace_key(key).versionless_uri
for key in bucket_list
][0]
self.RunGsUtil(['cp', objuri, suri(bucket2_uri)])
self.RunGsUtil(['cp', objuri, suri(bucket2_uri)])
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check2():
stdout = self.RunGsUtil(['ls', '-a', suri(bucket2_uri)],
return_stdout=True)
self.assertNumLines(stdout, 3)
stdout = self.RunGsUtil(['ls', '-la', suri(bucket2_uri)],
return_stdout=True)
self.assertIn('%s#' % bucket2_uri.clone_replace_name(bucket_list[0].name),
stdout)
self.assertIn('metageneration=', stdout)
_Check2()
def test_etag(self):
"""Tests that listing an object with an etag."""
bucket_uri = self.CreateBucket()
obj_uri = self.CreateObject(bucket_uri=bucket_uri, contents=b'foo')
# TODO: When testcase setup can use JSON, match against the exact JSON
# etag.
etag = obj_uri.get_key().etag.strip('"\'')
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(['ls', '-l', suri(bucket_uri)],
return_stdout=True)
if self.test_api == ApiSelector.XML:
self.assertNotIn(etag, stdout)
else:
self.assertNotIn('etag=', stdout)
_Check1()
def _Check2():
stdout = self.RunGsUtil(['ls', '-le', suri(bucket_uri)],
return_stdout=True)
if self.test_api == ApiSelector.XML:
self.assertIn(etag, stdout)
else:
self.assertIn('etag=', stdout)
_Check2()
def _Check3():
stdout = self.RunGsUtil(['ls', '-ale', suri(bucket_uri)],
return_stdout=True)
if self.test_api == ApiSelector.XML:
self.assertIn(etag, stdout)
else:
self.assertIn('etag=', stdout)
_Check3()
def test_labels(self):
"""Tests listing on a bucket with a label/tagging configuration."""
bucket_uri = self.CreateBucket()
bucket_suri = suri(bucket_uri)
stdout = self.RunGsUtil(['ls', '-Lb', bucket_suri], return_stdout=True)
# No labels are present by default.
self.assertRegex(stdout, r'Labels:\s+None')
# Add a label and check that it shows up.
self.RunGsUtil(['label', 'ch', '-l', 'labelkey:labelvalue', bucket_suri])
stdout = self.RunGsUtil(['ls', '-Lb', bucket_suri], return_stdout=True)
label_regex = re.compile(r'Labels:\s+\{\s+"labelkey":\s+"labelvalue"\s+\}',
re.MULTILINE)
self.assertRegex(stdout, label_regex)
@SkipForS3('S3 bucket configuration values are not supported via ls.')
def test_location_constraint(self):
"""Tests listing a bucket with location constraint."""
bucket_uri = self.CreateBucket()
bucket_suri = suri(bucket_uri)
# No location constraint should be shown for `-lb`
stdout = self.RunGsUtil(['ls', '-lb', bucket_suri], return_stdout=True)
self.assertNotIn('Location constraint:', stdout)
# Default location constraint is US
stdout = self.RunGsUtil(['ls', '-Lb', bucket_suri], return_stdout=True)
# Default location may vary between test environments; test that some
# non-whitespace character is present after the whitespace:
self.assertRegex(stdout, r'Location constraint:\s+\S')
# TODO(b/135700569): Stop skipping this once this field is available to all
# projects.
@unittest.skip('b/135700569')
@SkipForXML('Location type not available when using the GCS XML API.')
@SkipForS3('Location type not printed for S3 buckets.')
def test_location_type(self):
"""Tests listing a bucket with location constraint."""
bucket_uri = self.CreateBucket()
bucket_suri = suri(bucket_uri)
# No location type should be shown for `-lb`
stdout = self.RunGsUtil(['ls', '-lb', bucket_suri], return_stdout=True)
self.assertNotIn('Location type:', stdout)
# Default location type may vary between test environments; test that some
# non-whitespace character is present after the whitespace:
stdout = self.RunGsUtil(['ls', '-Lb', bucket_suri], return_stdout=True)
self.assertRegex(stdout, r'Location type:\s+\S')
@SkipForS3('S3 bucket configuration values are not supported via ls.')
def test_logging(self):
"""Tests listing a bucket with logging config."""
bucket_uri = self.CreateBucket()
bucket_suri = suri(bucket_uri)
# No logging info
stdout = self.RunGsUtil(['ls', '-lb', bucket_suri], return_stdout=True)
self.assertNotIn('Logging configuration', stdout)
# Logging configuration is absent by default
stdout = self.RunGsUtil(['ls', '-Lb', bucket_suri], return_stdout=True)
self.assertIn('Logging configuration:\t\tNone', stdout)
# Enable and check
self.RunGsUtil(['logging', 'set', 'on', '-b', bucket_suri, bucket_suri])
stdout = self.RunGsUtil(['ls', '-Lb', bucket_suri], return_stdout=True)
self.assertIn('Logging configuration:\t\tPresent', stdout)
# Disable and check
self.RunGsUtil(['logging', 'set', 'off', bucket_suri])
stdout = self.RunGsUtil(['ls', '-Lb', bucket_suri], return_stdout=True)
self.assertIn('Logging configuration:\t\tNone', stdout)
@SkipForS3('S3 bucket configuration values are not supported via ls.')
def test_web(self):
"""Tests listing a bucket with website config."""
bucket_uri = self.CreateBucket()
bucket_suri = suri(bucket_uri)
# No website configuration
stdout = self.RunGsUtil(['ls', '-lb', bucket_suri], return_stdout=True)
self.assertNotIn('Website configuration', stdout)
# Website configuration is absent by default
stdout = self.RunGsUtil(['ls', '-Lb', bucket_suri], return_stdout=True)
self.assertIn('Website configuration:\t\tNone', stdout)
# Initialize and check
self.RunGsUtil(['web', 'set', '-m', 'google.com', bucket_suri])
stdout = self.RunGsUtil(['ls', '-Lb', bucket_suri], return_stdout=True)
self.assertIn('Website configuration:\t\tPresent', stdout)
# Clear and check
self.RunGsUtil(['web', 'set', bucket_suri])
stdout = self.RunGsUtil(['ls', '-Lb', bucket_suri], return_stdout=True)
self.assertIn('Website configuration:\t\tNone', stdout)
@SkipForS3('S3 bucket configuration values are not supported via ls.')
@SkipForXML('Requester Pays is not supported for the XML API.')
def test_requesterpays(self):
"""Tests listing a bucket with requester pays (billing) config."""
bucket_uri = self.CreateBucket()
bucket_suri = suri(bucket_uri)
# No requester pays configuration
stdout = self.RunGsUtil(['ls', '-lb', bucket_suri], return_stdout=True)
self.assertNotIn('Requester Pays enabled', stdout)
# Requester Pays configuration is absent by default
stdout = self.RunGsUtil(['ls', '-Lb', bucket_suri], return_stdout=True)
self.assertIn('Requester Pays enabled:\t\tNone', stdout)
# Initialize and check
self.RunGsUtil(['requesterpays', 'set', 'on', bucket_suri])
stdout = self.RunGsUtil(['ls', '-Lb', bucket_suri], return_stdout=True)
self.assertIn('Requester Pays enabled:\t\tTrue', stdout)
# Clear and check
self.RunGsUtil(['requesterpays', 'set', 'off', bucket_suri])
stdout = self.RunGsUtil(['ls', '-Lb', bucket_suri], return_stdout=True)
self.assertIn('Requester Pays enabled:\t\tFalse', stdout)
def test_list_sizes(self):
"""Tests various size listing options."""
bucket_uri = self.CreateBucket()
self.CreateObject(bucket_uri=bucket_uri, contents=b'x' * 2048)
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(['ls', '-l', suri(bucket_uri)],
return_stdout=True)
self.assertIn('2048', stdout)
_Check1()
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check2():
stdout = self.RunGsUtil(['ls', '-L', suri(bucket_uri)],
return_stdout=True)
self.assertIn('2048', stdout)
_Check2()
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check3():
stdout = self.RunGsUtil(['ls', '-al', suri(bucket_uri)],
return_stdout=True)
self.assertIn('2048', stdout)
_Check3()
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check4():
stdout = self.RunGsUtil(['ls', '-lh', suri(bucket_uri)],
return_stdout=True)
self.assertIn('2 KiB', stdout)
_Check4()
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check5():
stdout = self.RunGsUtil(['ls', '-alh', suri(bucket_uri)],
return_stdout=True)
self.assertIn('2 KiB', stdout)
_Check5()
@unittest.skipIf(IS_WINDOWS,
'Unicode handling on Windows requires mods to site-packages')
def test_list_unicode_filename(self):
"""Tests listing an object with a unicode filename."""
# Note: This test fails on Windows (command.exe). I was able to get ls to
# output Unicode filenames correctly by hacking the UniStream class code
# shown at
# http://stackoverflow.com/questions/878972/windows-cmd-encoding-change-causes-python-crash/3259271
# into the start of gslib/commands/ls.py, along with no-op flush and
# isastream functions (as an experiment). However, even with that change,
# the current test still fails, since it also needs to run that
# stdout/stderr-replacement code. That UniStream class replacement really
# needs to be added to the site-packages on Windows python.
object_name = u'Аудиоархив'
bucket_uri = self.CreateVersionedBucket()
key_uri = self.CreateObject(bucket_uri=bucket_uri,
contents=b'foo',
object_name=object_name)
self.AssertNObjectsInBucket(bucket_uri, 1, versioned=True)
stdout = self.RunGsUtil(['ls', '-ael', suri(key_uri)], | |
<filename>modin/pandas/test/test_dataframe.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pytest
import io
import numpy as np
import pandas
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
import matplotlib
import modin.pandas as pd
from modin.pandas.utils import to_pandas
from modin.pandas.series import SeriesView
from numpy.testing import assert_array_equal
import sys
from .utils import (
random_state,
RAND_LOW,
RAND_HIGH,
df_equals,
df_is_empty,
arg_keys,
name_contains,
test_data_values,
test_data_keys,
numeric_dfs,
no_numeric_dfs,
test_func_keys,
test_func_values,
query_func_keys,
query_func_values,
agg_func_keys,
agg_func_values,
numeric_agg_funcs,
quantiles_keys,
quantiles_values,
indices_keys,
indices_values,
axis_keys,
axis_values,
bool_arg_keys,
bool_arg_values,
int_arg_keys,
int_arg_values,
)
pd.DEFAULT_NPARTITIONS = 4
# Force matplotlib to not use any Xwindows backend.
matplotlib.use("Agg")
if sys.version_info[0] < 3:
PY2 = True
else:
PY2 = False
# Test inter df math functions
def inter_df_math_helper(modin_df, pandas_df, op):
try:
pandas_result = getattr(pandas_df, op)(pandas_df)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(modin_df)
else:
modin_result = getattr(modin_df, op)(modin_df)
df_equals(modin_result, pandas_result)
try:
pandas_result = getattr(pandas_df, op)(4)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(4)
else:
modin_result = getattr(modin_df, op)(4)
df_equals(modin_result, pandas_result)
try:
pandas_result = getattr(pandas_df, op)(4.0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(4.0)
else:
modin_result = getattr(modin_df, op)(4.0)
df_equals(modin_result, pandas_result)
frame_data = {
"{}_other".format(modin_df.columns[0]): [0, 2],
modin_df.columns[0]: [0, 19],
modin_df.columns[1]: [1, 1],
}
modin_df2 = pd.DataFrame(frame_data)
pandas_df2 = pandas.DataFrame(frame_data)
try:
pandas_result = getattr(pandas_df, op)(pandas_df2)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(modin_df2)
else:
modin_result = getattr(modin_df, op)(modin_df2)
df_equals(modin_result, pandas_result)
list_test = random_state.randint(RAND_LOW, RAND_HIGH, size=(modin_df.shape[1]))
try:
pandas_result = getattr(pandas_df, op)(list_test, axis=1)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(list_test, axis=1)
else:
modin_result = getattr(modin_df, op)(list_test, axis=1)
df_equals(modin_result, pandas_result)
list_test = random_state.randint(RAND_LOW, RAND_HIGH, size=(modin_df.shape[0]))
try:
pandas_result = getattr(pandas_df, op)(list_test, axis=0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(list_test, axis=0)
else:
modin_result = getattr(modin_df, op)(list_test, axis=0)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_add(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
inter_df_math_helper(modin_df, pandas_df, "add")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_div(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
inter_df_math_helper(modin_df, pandas_df, "div")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_divide(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
inter_df_math_helper(modin_df, pandas_df, "divide")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_floordiv(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
inter_df_math_helper(modin_df, pandas_df, "floordiv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_mod(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
inter_df_math_helper(modin_df, pandas_df, "mod")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_mul(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
inter_df_math_helper(modin_df, pandas_df, "mul")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_multiply(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
inter_df_math_helper(modin_df, pandas_df, "multiply")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_pow(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# TODO: Revert to others once we have an efficient way of preprocessing for positive
# values
try:
pandas_df = pandas_df.abs()
except Exception:
pass
else:
modin_df = modin_df.abs()
inter_df_math_helper(modin_df, pandas_df, "pow")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_sub(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
inter_df_math_helper(modin_df, pandas_df, "sub")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_subtract(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
inter_df_math_helper(modin_df, pandas_df, "subtract")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_truediv(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
inter_df_math_helper(modin_df, pandas_df, "truediv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___div__(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
inter_df_math_helper(modin_df, pandas_df, "__div__")
# END test inter df math functions
# Test comparison of inter operation functions
def comparison_inter_ops_helper(modin_df, pandas_df, op):
try:
pandas_result = getattr(pandas_df, op)(pandas_df)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(modin_df)
else:
modin_result = getattr(modin_df, op)(modin_df)
df_equals(modin_result, pandas_result)
try:
pandas_result = getattr(pandas_df, op)(4)
except TypeError:
with pytest.raises(TypeError):
getattr(modin_df, op)(4)
else:
modin_result = getattr(modin_df, op)(4)
df_equals(modin_result, pandas_result)
try:
pandas_result = getattr(pandas_df, op)(4.0)
except TypeError:
with pytest.raises(TypeError):
getattr(modin_df, op)(4.0)
else:
modin_result = getattr(modin_df, op)(4.0)
df_equals(modin_result, pandas_result)
frame_data = {
"{}_other".format(modin_df.columns[0]): [0, 2],
modin_df.columns[0]: [0, 19],
modin_df.columns[1]: [1, 1],
}
modin_df2 = pd.DataFrame(frame_data)
pandas_df2 = pandas.DataFrame(frame_data)
try:
pandas_result = getattr(pandas_df, op)(pandas_df2)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(modin_df2)
else:
modin_result = getattr(modin_df, op)(modin_df2)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_eq(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
comparison_inter_ops_helper(modin_df, pandas_df, "eq")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ge(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
comparison_inter_ops_helper(modin_df, pandas_df, "ge")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_gt(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
comparison_inter_ops_helper(modin_df, pandas_df, "gt")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_le(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
comparison_inter_ops_helper(modin_df, pandas_df, "le")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_lt(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
comparison_inter_ops_helper(modin_df, pandas_df, "lt")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ne(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
comparison_inter_ops_helper(modin_df, pandas_df, "ne")
# END test comparison of inter operation functions
# Test dataframe right operations
def inter_df_math_right_ops_helper(modin_df, pandas_df, op):
try:
pandas_result = getattr(pandas_df, op)(4)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(4)
else:
modin_result = getattr(modin_df, op)(4)
df_equals(modin_result, pandas_result)
try:
pandas_result = getattr(pandas_df, op)(4.0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(4.0)
else:
modin_result = getattr(modin_df, op)(4.0)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_radd(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
inter_df_math_right_ops_helper(modin_df, pandas_df, "radd")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rdiv(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
inter_df_math_right_ops_helper(modin_df, pandas_df, "rdiv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rfloordiv(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
inter_df_math_right_ops_helper(modin_df, pandas_df, "rfloordiv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rmod(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
inter_df_math_right_ops_helper(modin_df, pandas_df, "rmod")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rmul(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
inter_df_math_right_ops_helper(modin_df, pandas_df, "rmul")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rpow(request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# TODO: Revert to others once we have an efficient way of preprocessing for positive values
# We need to check that negative integers are not used efficiently
if "100x100" not in request.node.name:
inter_df_math_right_ops_helper(modin_df, pandas_df, "rpow")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rsub(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
inter_df_math_right_ops_helper(modin_df, pandas_df, "rsub")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rtruediv(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
inter_df_math_right_ops_helper(modin_df, pandas_df, "rtrudiv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rsub__(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
inter_df_math_right_ops_helper(modin_df, pandas_df, "__rsub__")
# END test dataframe right operations
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_abs(request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.abs()
except Exception as e:
with pytest.raises(type(e)):
modin_df.abs()
else:
modin_result = modin_df.abs()
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_add_prefix(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
test_prefix = "TEST"
new_modin_df = modin_df.add_prefix(test_prefix)
new_pandas_df = pandas_df.add_prefix(test_prefix)
df_equals(new_modin_df.columns, new_pandas_df.columns)
@pytest.mark.skip(
reason="We do not have support to check if a UDF can only take in numeric functions"
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("testfunc", test_func_values, ids=test_func_keys)
def test_applymap(request, data, testfunc):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.applymap(testfunc)
except Exception as e:
with pytest.raises(type(e)):
modin_df.applymap(testfunc)
else:
modin_result = modin_df.applymap(testfunc)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("testfunc", test_func_values, ids=test_func_keys)
def test_applymap_numeric(request, data, testfunc):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
try:
pandas_result = pandas_df.applymap(testfunc)
except Exception as e:
with pytest.raises(type(e)):
modin_df.applymap(testfunc)
else:
modin_result = modin_df.applymap(testfunc)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_add_suffix(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
test_suffix = "TEST"
new_modin_df = modin_df.add_suffix(test_suffix)
new_pandas_df = pandas_df.add_suffix(test_suffix)
df_equals(new_modin_df.columns, new_pandas_df.columns)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_at(request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# We skip nan datasets because nan != nan
if "nan" not in request.node.name:
key1 = modin_df.columns[0]
# Scaler
assert modin_df.at[0, key1] == pandas_df.at[0, key1]
# Series
df_equals(modin_df.loc[0].at[key1], pandas_df.loc[0].at[key1])
# Write Item
modin_df_copy = modin_df.copy()
pandas_df_copy = pandas_df.copy()
modin_df_copy.at[1, key1] = modin_df.at[0, key1]
pandas_df_copy.at[1, key1] = pandas_df.at[0, key1]
df_equals(modin_df_copy, pandas_df_copy)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_axes(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
for modin_axis, pd_axis in zip(modin_df.axes, pandas_df.axes):
assert np.array_equal(modin_axis, pd_axis)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_copy(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
# pandas_df is unused but there so there won't be confusing list comprehension
# stuff in the pytest.mark.parametrize
new_modin_df = modin_df.copy()
assert new_modin_df is not modin_df
assert np.array_equal(
new_modin_df._query_compiler.data.partitions,
modin_df._query_compiler.data.partitions,
)
assert new_modin_df is not modin_df
df_equals(new_modin_df, modin_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dtypes(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.dtypes, pandas_df.dtypes)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ftypes(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.ftypes, pandas_df.ftypes)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("key", indices_values, ids=indices_keys)
def test_get(data, key):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.get(key), pandas_df.get(key))
df_equals(
modin_df.get(key, default="default"), pandas_df.get(key, default="default")
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_get_dtype_counts(data):
modin_result = pd.DataFrame(data).get_dtype_counts().sort_index()
pandas_result = pandas.DataFrame(data).get_dtype_counts().sort_index()
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize(
"dummy_na", bool_arg_values, ids=arg_keys("dummy_na", bool_arg_keys)
)
@pytest.mark.parametrize(
"drop_first", bool_arg_values, ids=arg_keys("drop_first", bool_arg_keys)
)
def test_get_dummies(request, data, dummy_na, drop_first):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas.get_dummies(
pandas_df, dummy_na=dummy_na, drop_first=drop_first
)
except Exception as e:
with pytest.raises(type(e)):
pd.get_dummies(modin_df, dummy_na=dummy_na, drop_first=drop_first)
else:
modin_result = pd.get_dummies(
modin_df, dummy_na=dummy_na, drop_first=drop_first
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_get_ftype_counts(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.get_ftype_counts(), pandas_df.get_ftype_counts())
@pytest.mark.skip(
reason="We do not have support to check if a UDF can only take in numeric functions"
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_agg(data, axis, func):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.agg(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.agg(func, axis)
else:
modin_result = modin_df.agg(func, axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_agg_numeric(request, data, axis, func):
if name_contains(request.node.name, numeric_agg_funcs) and name_contains(
request.node.name, numeric_dfs
):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.agg(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.agg(func, axis)
else:
modin_result = | |
pady=7, sticky='we')
row += 1
f = Tkinter.Frame(parent)
f.grid(column=0, row=row, sticky='ew')
row += 1
l = Tkinter.Label(f, text='To cite Segger or learn more about it press the Help button', fg="blue")
l.grid(column=0, row=0, sticky='w')
dummyFrame = Tkinter.Frame(parent, relief='groove', borderwidth=1)
Tkinter.Frame(dummyFrame).pack()
dummyFrame.grid(row=row,column=0,columnspan=3, pady=3, sticky='we')
row += 1
global msg
msg = Tkinter.Label(parent, width = 20, anchor = 'w', justify = 'left', fg="red")
msg.grid(column=0, row=row, sticky='ew')
self.msg = msg
umsg ( 'Select an open density map in the field above and press Segment!' )
row += 1
vlist = VolumeViewer.volume_list()
if vlist:
self.SetMapMenu(vlist[0])
for m in regions.segmentations() :
v = m.volume_data()
if v and m.display :
self.SetCurrentSegmentation ( m )
try : self.SetMapMenu ( v )
except : pass
chimera.openModels.addRemoveHandler(self.ModelClosed, None)
if dev_menus :
self.optionsPanel.set(True)
self.shortcutsPanelShownVar.set(True)
self.toolsPanelShownVar.set(True)
def SetColorLevel(self):
smod = self.CurrentSegmentation()
if smod is None:
return
s = self.colorLevel
lev = int(s.value())
regions = [r for r in smod.all_regions()
if hasattr(r, 'color_level') and r.color_level >= lev and
(r.preg is None or r.preg.color_level < lev)]
smod.color_density(regions)
return
# TODO: Unused code adjusts region surface colors.
if not hasattr(smod, 'contact_grouping'):
cg = smod.contact_grouping = regions.contact_grouping(smod)
smod.region_count = len(smod.childless_regions())
cg = smod.contact_grouping
range = (smod.region_count - len(cg), smod.region_count)
if s.range() != range:
s.set_range(range[0], range[1], step = 1)
p = max(0, smod.region_count - int(s.value()))
cs, pairs = regions.connected_subsets(cg[:p])
# Reset colors
for r in smod.regions:
sp = r.surface_piece
if sp:
sp.color = r.color
# Color groups
for rlist in cs:
r0 = rlist[0]
sp0 = r0.surface_piece
if sp0:
c = sp0.color
for r in rlist[1:]:
sp = r.surface_piece
if sp:
sp.color = c
def SetColorLevelRange(self):
smod = self.CurrentSegmentation()
if smod is None:
return
clevels = [r.color_level for r in smod.all_regions()
if hasattr(r, 'color_level')]
clmin = min(clevels)
clmax = max(clevels)
cl = self.colorLevel
cl.set_range(clmin, clmax, step = 1)
cl.set_value(clmin)
def SetContactGrouping(self):
smod = self.CurrentSegmentation()
if smod is None:
return
s = self.colorLevel
lev = int(s.value())
regions = [r for r in smod.all_regions()
if hasattr(r, 'color_level') and r.color_level < lev]
smod.remove_regions(regions, update_surfaces = True)
self.RegsDispUpdate()
def ColorDensity(self):
smod = self.CurrentSegmentation()
if smod is None:
return
smod.color_density()
def Options(self) :
self.optionsPanel.set (not self.optionsPanel.get())
def Shortcuts (self) :
print "shortcuts"
self.shortcutsPanelShownVar.set ( not self.shortcutsPanelShownVar.get() )
def Tools (self) :
print "tools"
self.toolsPanelShownVar.set ( not self.toolsPanelShownVar.get() )
def Log ( self ) :
import Idle
Idle.start_shell()
def RSeg ( self ) :
import Segger.rseg_dialog
reload ( Segger.rseg_dialog )
Segger.rseg_dialog.show_dialog()
def ISeg ( self ) :
import Segger.iseg_dialog
reload ( Segger.iseg_dialog )
Segger.iseg_dialog.show_dialog()
def SSE ( self ) :
# self.ssePanelShownVar.set ( not self.ssePanelShownVar.get() )
import Segger.sse_dialog
reload ( Segger.sse_dialog )
Segger.sse_dialog.show_sse_dialog()
def SegLoop ( self ) :
# self.ssePanelShownVar.set ( not self.ssePanelShownVar.get() )
import Segger.segloop_dialog
reload ( Segger.segloop_dialog )
Segger.segloop_dialog.show_dialog()
def SegMod0 ( self ) :
# self.ssePanelShownVar.set ( not self.ssePanelShownVar.get() )
import Segger.segmod_dialog
reload ( Segger.segmod_dialog )
Segger.segmod_dialog.show_dialog()
def SegNA ( self ) :
# self.ssePanelShownVar.set ( not self.ssePanelShownVar.get() )
import Segger.segna_dialog
reload ( Segger.segna_dialog )
Segger.segna_dialog.show_dialog()
def Ar ( self ) :
# self.ssePanelShownVar.set ( not self.ssePanelShownVar.get() )
import Segger.ar_dialog
reload ( Segger.ar_dialog )
Segger.ar_dialog.show_dialog()
def Spr ( self ) :
# self.ssePanelShownVar.set ( not self.ssePanelShownVar.get() )
import Segger.spr_dialog
reload ( Segger.spr_dialog )
Segger.spr_dialog.show_dialog()
def Vr ( self ) :
# self.ssePanelShownVar.set ( not self.ssePanelShownVar.get() )
import Segger.vr_dialog
reload ( Segger.vr_dialog )
Segger.vr_dialog.show_dialog()
def CamMono ( self ) :
chimera.viewer.camera.setMode ( "mono" )
def CamSBS ( self ) :
chimera.viewer.camera.setMode ( "DTI side-by-side stereo" )
def ProMod ( self ) :
# self.ssePanelShownVar.set ( not self.ssePanelShownVar.get() )
import Segger.promod_dialog
reload ( Segger.promod_dialog )
Segger.promod_dialog.show_dialog()
def ModelZ ( self ) :
# self.ssePanelShownVar.set ( not self.ssePanelShownVar.get() )
import Segger.modelz
reload ( Segger.modelz )
Segger.modelz.show_dialog()
def MapQ ( self ) :
# self.ssePanelShownVar.set ( not self.ssePanelShownVar.get() )
import Segger.mapq
reload ( Segger.mapq )
Segger.mapq.show_dialog()
def BioMovie ( self ) :
# self.ssePanelShownVar.set ( not self.ssePanelShownVar.get() )
import Segger.biomovie
reload ( Segger.biomovie )
Segger.biomovie.show_dialog()
def SWIM ( self ) :
# self.ssePanelShownVar.set ( not self.ssePanelShownVar.get() )
import Segger.SWIM
reload ( Segger.SWIM )
Segger.SWIM.show_dialog()
def SegMod ( self ) :
# self.ssePanelShownVar.set ( not self.ssePanelShownVar.get() )
import Segger.SegMod
reload ( Segger.SegMod )
Segger.SegMod.show_dialog()
def MDFF ( self ) :
# self.ssePanelShownVar.set ( not self.ssePanelShownVar.get() )
import Segger.mdff_dialog
reload ( Segger.mdff_dialog )
Segger.mdff_dialog.show_dialog()
def PiFold ( self ) :
# self.ssePanelShownVar.set ( not self.ssePanelShownVar.get() )
import Segger.PiFold
reload ( Segger.PiFold )
Segger.PiFold.show_dialog()
def Animate ( self ) :
# self.ssePanelShownVar.set ( not self.ssePanelShownVar.get() )
import Segger.animate_dialog
reload ( Segger.animate_dialog )
Segger.animate_dialog.close_animate_dialog ()
Segger.animate_dialog.show_dialog()
def FlexFit ( self ) :
# self.ssePanelShownVar.set ( not self.ssePanelShownVar.get() )
import Segger.flexfit_dialog
reload ( Segger.flexfit_dialog )
Segger.flexfit_dialog.show_dialog()
def Tomolog ( self ) :
# self.ssePanelShownVar.set ( not self.ssePanelShownVar.get() )
import Segger.tomolog_dialog
reload ( Segger.tomolog_dialog )
Segger.tomolog_dialog.show_dialog()
def GeoSeg ( self ) :
# self.ssePanelShownVar.set ( not self.ssePanelShownVar.get() )
import Segger.geoseg_dialog
reload ( Segger.geoseg_dialog )
Segger.geoseg_dialog.show_dialog()
def MapCOM ( self ) :
dmap = self.SegmentationMap()
import axes
pts, weights = axes.map_points ( dmap )
if len(pts) == 0 :
print " - no pts at this threshold?"
return
COM, U, S, V = axes.prAxes ( pts )
print "com:", COM
#chimera.viewer.camera.center = chimera.Point ( COM[0], COM[1], COM[2] )
#xf = chimera.Xform.translation ( chimera.Vector( -COM[0], -COM[1], -COM[2] ) )
#dmap.openState.xform = xf
p = chimera.Point ( COM[0], COM[1], COM[2] )
chimera.openModels.cofr = dmap.openState.xform.apply ( p )
moveCam = 1
if moveCam :
p0 = numpy.array ( chimera.viewer.camera.center )
p1 = numpy.array ( chimera.openModels.cofr )
for i in range (10) :
f = float(i) / 9.0
f1, f2 = 2.0*f*f*f-3.0*f*f+1.0, 3*f*f-2*f*f*f
P = p0 * f1 + p1 * f2
chimera.viewer.camera.center = (P[0],P[1],P[2])
print ".",
print ""
def OpenSegmentation(self):
dmap = self.SegmentationMap()
dir = os.path.dirname(dmap.data.path) if dmap else None
import segfile
segfile.show_open_dialog(dir, self.OpenSegFiles)
def OpenSegFiles(self, paths_and_types, open = True):
from chimera import tasks, CancelOperation
task = tasks.Task('Loading segmentation', modal = True)
smods = []
try:
import segfile
reload (segfile)
for path, ftype in paths_and_types:
if ftype == 'Segmentation':
try:
smod = segfile.read_segmentation(path, open, task)
except CancelOperation:
break
elif ftype == 'Old regions file':
dmap = self.SegmentationMap()
if dmap is None:
from chimera.replyobj import error
from os.path import basename
error('Segmentation map must be open before opening old-style segmentation file\n\n\t%s\n\nbecause file does not contain grid size and spacing.' % basename(path))
return
import regionsfile
smod = regionsfile.ReadRegionsFile ( path, dmap )
smods.append(smod)
if len(smods) == 0:
umsg ( "No segmentation was loaded." )
return
for smod in smods:
smod.open_map()
# TODO: Can't control whether marker model is opened.
smod = smods[-1]
self.SetCurrentSegmentation(smod)
v = smod.volume_data()
if v:
self.SetMapMenu(v)
else :
umsg ( "Volume data not found" )
try:
self.RegsDispUpdate (task)
except CancelOperation:
pass
finally:
task.finished()
for s in smods:
mname = os.path.basename(getattr(s, 'map_path', 'unknown'))
umsg('Opened segmentation %s of map %s, grid size (%d,%d,%d)'
% ((s.name, mname) + tuple(s.grid_size())))
return smods
def SaveSegmentation(self):
smod = self.CurrentSegmentation()
if smod:
map = smod.volume_data()
if map == None :
umsg ( "Map not found - please associate a map first" )
return
if hasattr(smod, 'path') and smod.path:
import segfile
segfile.write_segmentation(smod, smod.path)
umsg ( "Saved" )
else:
self.SaveSegmentationAs()
else :
umsg ( "No segmentation selected" )
def SaveSegmentationAs(self):
smod = self.CurrentSegmentation()
if smod:
import segfile
segfile.show_save_dialog(smod, self.path_changed_cb)
def path_changed_cb(self, seg):
if seg is self.CurrentSegmentation():
seg.name = os.path.basename(seg.path)
self.regions_file.set(seg.name)
def ModelClosed(self, trigger, n, mlist):
# Clear menus that are showing closed models.
if self.cur_dmap in mlist:
self.SetMapMenu(None)
if self.cur_seg in mlist:
self.cur_seg = None
self.regions_file.set('')
def MapMenu ( self ) :
self.mb.menu.delete ( 0, 'end' ) # Clear menu
from VolumeViewer import Volume
mlist = OML(modelTypes = [Volume])
for m in mlist :
self.mb.menu.add_radiobutton ( label="%s (%d)"%(m.name, m.id), variable=self.dmap,
command=lambda m=m: self.MapSelected(m) )
def SetMapMenu (self, dmap):
if dmap == None :
self.dmap.set('')
else :
self.dmap.set( "%s (%d)" % (dmap.name, dmap.id) )
self.cur_dmap = dmap
#print "Set map menu to ", dmap.name
def MapSelected ( self, dmap ) :
self.cur_dmap = | |
list(padding[1]), list(padding[2])])
else:
x = _padding(x, padding[0], 2)
x = _padding(x, padding[1], 3)
x = _padding(x, padding[2], 4)
else:
if num_dynamic_axis > 0:
assert len(base_shape) == 4
if hasattr(C, 'pad'):
x = C.pad(x, pattern=[list(padding[0]), list(padding[1]), list(padding[2]), [0, 0]])
else:
x = _padding(x, padding[0], 0)
x = _padding(x, padding[1], 1)
x = _padding(x, padding[2], 2)
else:
assert len(base_shape) == 5
if hasattr(C, 'pad'):
x = C.pad(x, pattern=[[0, 0], list(padding[0]), list(padding[1]), list(padding[2]), [0, 0]])
else:
x = _padding(x, padding[0], 1)
x = _padding(x, padding[1], 2)
x = _padding(x, padding[2], 3)
return x
def one_hot(indices, num_classes):
return C.one_hot(indices, num_classes)
def get_value(x):
if isinstance(
x,
C.variables.Parameter) or isinstance(
x,
C.variables.Constant):
return x.value
else:
return eval(x)
def batch_get_value(xs):
result = []
for x in xs:
if (isinstance(x, C.variables.Parameter) or
isinstance(x, C.variables.Constant)):
result.append(x.value)
else:
result.append(eval(x))
return result
def set_value(x, value):
if (isinstance(x, C.variables.Parameter) or
isinstance(x, C.variables.Constant)):
if isinstance(value, (float, int)):
value = np.full(x.shape, value, dtype=floatx())
x.value = value
else:
raise NotImplementedError
def print_tensor(x, message=''):
return C.user_function(
LambdaFunc(x,
when=lambda x: True,
execute=lambda x: print(message)))
def batch_set_value(tuples):
for t in tuples:
x = t[0]
value = t[1]
if isinstance(value, np.ndarray) is False:
value = np.asarray(value)
if isinstance(x, C.variables.Parameter):
x.value = value
else:
raise NotImplementedError
def stop_gradient(variables):
if isinstance(variables, (list, tuple)):
return map(C.stop_gradient, variables)
else:
return C.stop_gradient(variables)
def switch(condition, then_expression, else_expression):
ndim_cond = ndim(condition)
ndim_expr = ndim(then_expression)
if ndim_cond > ndim_expr:
raise ValueError('Rank of condition should be less'
' than or equal to rank of then and'
' else expressions. ndim(condition)=' +
str(ndim_cond) + ', ndim(then_expression)'
'=' + str(ndim_expr))
elif ndim_cond < ndim_expr:
shape_expr = int_shape(then_expression)
ndim_diff = ndim_expr - ndim_cond
for i in range(ndim_diff):
condition = expand_dims(condition)
condition = tile(condition, shape_expr[ndim_cond + i])
return C.element_select(condition,
then_expression,
else_expression)
def elu(x, alpha=1.):
res = C.elu(x)
if alpha == 1:
return res
else:
return C.element_select(C.greater(x, 0), res, alpha * res)
def in_top_k(predictions, targets, k):
_targets = C.one_hot(targets, predictions.shape[-1])
result = C.classification_error(predictions, _targets, topN=k)
return 1 - C.reshape(result, shape=())
def conv2d_transpose(x, kernel, output_shape, strides=(1, 1),
padding='valid', data_format=None):
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
x = _preprocess_conv2d_input(x, data_format)
kernel = _preprocess_conv2d_kernel(kernel, data_format)
padding = _preprocess_border_mode(padding)
strides = (1,) + strides
# cntk output_shape does not include batch axis
output_shape = output_shape[1:]
# in keras2, need handle output shape in different format
if data_format == 'channels_last':
shape = list(output_shape)
shape[0] = output_shape[2]
shape[1] = output_shape[0]
shape[2] = output_shape[1]
output_shape = tuple(shape)
x = C.convolution_transpose(
kernel,
x,
strides,
auto_padding=[
False,
padding,
padding],
output_shape=output_shape)
return _postprocess_conv2d_output(x, data_format)
def identity(x, name=None):
if name is None:
name = '%s_alias' % x.name
return C.alias(x, name=name)
def _preprocess_conv2d_input(x, data_format):
if data_format == 'channels_last':
# TF uses the last dimension as channel dimension,
# instead of the 2nd one.
# TH input shape: (samples, input_depth, rows, cols)
# TF input shape: (samples, rows, cols, input_depth)
x = C.transpose(x, (2, 0, 1))
return x
def _preprocess_conv2d_kernel(kernel, data_format):
# As of Keras 2.0.0, all kernels are normalized
# on the format `(rows, cols, input_depth, depth)`,
# independently of `data_format`.
# CNTK expects `(depth, input_depth, rows, cols)`.
kernel = C.transpose(kernel, (3, 2, 0, 1))
return kernel
def _preprocess_border_mode(padding):
if padding == 'same':
padding = True
elif padding == 'valid':
padding = False
else:
raise ValueError('Invalid border mode: ' + str(padding))
return padding
def _postprocess_conv2d_output(x, data_format):
if data_format == 'channels_last':
x = C.transpose(x, (1, 2, 0))
return x
def _preprocess_conv3d_input(x, data_format):
if data_format == 'channels_last':
# TF uses the last dimension as channel dimension,
# instead of the 2nd one.
# TH input shape: (samples, input_depth, conv_dim1, conv_dim2, conv_dim3)
# TF input shape: (samples, conv_dim1, conv_dim2, conv_dim3,
# input_depth)
x = C.transpose(x, (3, 0, 1, 2))
return x
def _preprocess_conv3d_kernel(kernel, dim_ordering):
kernel = C.transpose(kernel, (4, 3, 0, 1, 2))
return kernel
def _postprocess_conv3d_output(x, dim_ordering):
if dim_ordering == 'channels_last':
x = C.transpose(x, (1, 2, 3, 0))
return x
def _get_dynamic_axis_num(x):
if hasattr(x, 'dynamic_axes'):
return len(x.dynamic_axes)
else:
return 0
def _contain_seqence_axis(x):
if _get_dynamic_axis_num(x) > 1:
return x.dynamic_axes[1] == C.Axis.default_dynamic_axis()
else:
return False
def get_num_dynamic_axis(x):
return _get_dynamic_axis_num(x)
def _reduce_on_axis(x, axis, reduce_fun_name):
if isinstance(axis, list):
for a in axis:
if isinstance(a, C.Axis) \
and a != C.Axis.default_batch_axis() \
and hasattr(C.sequence, reduce_fun_name):
x = getattr(C.sequence, reduce_fun_name)(x, a)
else:
x = getattr(C, reduce_fun_name)(x, a)
else:
x = getattr(C, reduce_fun_name)(x, axis)
return x
def _reshape_sequence(x, time_step):
tmp_shape = list(int_shape(x))
tmp_shape[1] = time_step
return reshape(x, tmp_shape)
def local_conv1d(inputs, kernel, kernel_size, strides, data_format=None):
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
stride = strides[0]
kernel_shape = int_shape(kernel)
output_length, feature_dim, filters = kernel_shape
xs = []
for i in range(output_length):
slice_length = slice(i * stride,
i * stride + kernel_size[0])
xs.append(reshape(inputs[:, slice_length, :],
(-1, 1, feature_dim)))
x_aggregate = concatenate(xs, axis=1)
# transpose kernel to output_filters first, to apply broadcast
weight = permute_dimensions(kernel, (2, 0, 1))
# Shape: (batch, filters, output_length, input_length * kernel_size)
output = x_aggregate * weight
# Shape: (batch, filters, output_length)
output = sum(output, axis=3)
# Shape: (batch, output_length, filters)
return permute_dimensions(output, (0, 2, 1))
def local_conv2d(inputs,
kernel,
kernel_size,
strides,
output_shape,
data_format=None):
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
stride_row, stride_col = strides
output_row, output_col = output_shape
kernel_shape = int_shape(kernel)
_, feature_dim, filters = kernel_shape
xs = []
for i in range(output_row):
for j in range(output_col):
slice_row = slice(i * stride_row,
i * stride_row + kernel_size[0])
slice_col = slice(j * stride_col,
j * stride_col + kernel_size[1])
if data_format == 'channels_first':
xs.append(reshape(inputs[:, :, slice_row, slice_col],
(-1, 1, feature_dim)))
else:
xs.append(reshape(inputs[:, slice_row, slice_col, :],
(-1, 1, feature_dim)))
x_aggregate = concatenate(xs, axis=1)
# transpose kernel to put filters first
weight = permute_dimensions(kernel, (2, 0, 1))
# shape: batch, filters, output_length, input_length * kernel_size
output = x_aggregate * weight
# shape: batch, filters, output_length
output = sum(output, axis=3)
# shape: batch, filters, row, col
output = reshape(output,
(-1, filters, output_row, output_col))
if data_format == 'channels_last':
# shape: batch, row, col, filters
output = permute_dimensions(output, (0, 2, 3, 1))
return output
def reverse(x, axes):
if isinstance(axes, int):
axes = [axes]
cntk_axes = _normalize_axis(axes, x)
begin_index = [0 for _ in cntk_axes]
end_index = [0 for _ in cntk_axes]
strides = [-1 for _ in cntk_axes]
return C.slice(x, cntk_axes, begin_index, end_index, strides)
def _reshape_batch(x, shape):
# there is a bug in cntk 2.1's unpack_batch implementation
if hasattr(C, 'unpack_batch') and _get_cntk_version() >= 2.2:
const_a = C.unpack_batch(x)
const_a = C.reshape(const_a, shape)
return C.to_batch(const_a)
else:
return C.user_function(ReshapeBatch(x, shape[1:]))
def _get_cntk_version():
version = C.__version__
if version.endswith('+'):
version = version[:-1]
# for hot fix, ignore all the . except the first one.
if len(version) > 2 and version[1] == '.':
version = version[:2] + version[2:].replace('.', '')
try:
return float(version)
except:
warnings.warn(
'CNTK backend warning: CNTK version not detected. '
'Will using CNTK 2.0 GA as default.')
return float(2.0)
class ReshapeBatch(C.ops.functions.UserFunction):
def __init__(self, input, shape, name='reshape_with_batch'):
super(ReshapeBatch, self).__init__([input], as_numpy=False, name=name)
self.from_shape = input.shape
self.target_shape = shape
def infer_outputs(self):
batch_axis = C.Axis.default_batch_axis()
return [
C.output_variable(
self.target_shape,
self.inputs[0].dtype,
[batch_axis])]
def forward(self, arguments, device=None, outputs_to_retain=None):
num_element = arguments.shape()[0] * np.prod(np.asarray(self.from_shape))
num_static_element = np.prod(np.asarray(self.target_shape))
num_batch = int(num_element / num_static_element)
result = arguments.data().as_shape((num_batch,) + self.target_shape)
return None, C.cntk_py.Value(result)
def backward(self, state, root_gradients):
grad_array_view = root_gradients.data()
num_element = root_gradients.shape()[0] * np.prod(np.asarray(self.target_shape))
num_static_element = np.prod(np.asarray(self.from_shape))
num_old_batch = int(num_element / num_static_element)
return C.cntk_py.Value(
grad_array_view.as_shape(
(num_old_batch,) + self.from_shape))
class ConvertToBatch(C.ops.functions.UserFunction):
"""Converts input first axis to CNTK batch axis.
We may introduce this operation in CNTK native
implementation later.
# Arguments
inputs: a cntk variable (parameter/constant)
name: name of this node
"""
def __init__(self, input, name='convert_to_batch'):
super(ConvertToBatch, self).__init__([input], as_numpy=False, name=name)
def infer_outputs(self):
batch_axis = C.Axis.default_batch_axis()
return [
C.output_variable(
self.inputs[0].shape[1:],
self.inputs[0].dtype,
[batch_axis])]
def forward(self, arguments, device=None, outputs_to_retain=None):
return None, C.cntk_py.Value(arguments.data())
def backward(self, state, root_gradients):
return C.cntk_py.Value(root_gradients.data())
class ConvertToStatic(C.ops.functions.UserFunction):
"""Converts input first axis to CNTK static axis.
We may introduce this operation in CNTK native
implementation later.
# Arguments
inputs: a cntk tensor which has batch axis
batch_size: size of batch axis.
name: name of this node.
"""
def __init__(self, input, batch_size, name='convert_to_static'):
| |
i) for i in known_items])
unknown_is_not_known = claripy.And(*[self._unknown_item.key != i.key for i in known_items])
unknown_items_result = Implies(self.is_not_overfull(state), Implies(unknown_is_not_known, pred(state, self._unknown_item)))
# TODO try with just (since we don't really need the weird length=1 case)
#unknown_items_result = Implies(unknown_is_not_known, pred(state, self._unknown_item))
result = claripy.BoolS(self.meta.name + "_forall")
state.solver.add(result == claripy.And(known_items_result, unknown_items_result))
self.add_invariant_conjunction(state, pred.with_expr(lambda e, i: Implies(result, e)))
LOGEND(state)
return result
# === Merging ===
# Two-phase merging, so that we avoid spending lots of (solver) time on a merge only to realize the very next map can't be merged and the effort was wasted
def can_merge(self, others):
if all(utils.structural_eq(self, o) for o in others):
return True
assert all(o.meta.key_size == self.meta.key_size and o.meta.value_size == self.meta.value_size for o in others), "Different meta???"
self_ver = self.version()
if any(o.version() != self_ver for o in others):
#print("Different versions", self, [x.version() for x in [self] + others])
return False
if any(not utils.structural_eq(self._invariants, o._invariants) for o in others):
#print("Different invariants", self)
return False
if any(not utils.structural_eq(self._length, o._length) for o in others):
#print("Different length", self, [x._length for x in [self] + others])
return False
if any(not utils.structural_eq(self._unknown_item, o._unknown_item) for o in others):
#print("Different unknown item", self, [x._unknown_item for x in [self] + others])
return False
if self_ver > 0 and not self._previous.can_merge([o._previous for o in others]):
#print("Cannot merge previous", self)
return False
if self_ver != 0 and any(len(o._known_items) != len(self._known_items) for o in others):
return False
max_to_add = 0
#all_to_add = []
for o in others:
(_, _, to_add) = utils.structural_diff(self._known_items, o._known_items)
max_to_add = max(max_to_add, len(to_add))
#all_to_add.append([i.key for i in to_add])
if max_to_add > 6:
#print("too many items to add", self, max_to_add, all_to_add)
return False
#print("merging, max_to_add=", max_to_add, " #known_items=", [len(x._known_items) for x in [self] + others])
return True
# This assumes the solvers have already been merged
def merge(self, state, others, other_states, merge_conditions):
if all(utils.structural_eq(self, o) for o in others):
return self
if self._previous is None:
for (o, mc) in zip(others, merge_conditions[1:]):
(only_left, both, only_right) = utils.structural_diff(self._known_items, o._known_items)
# Items that were only here are subject to the invariant if the merge condition holds
for i in only_left:
state.solver.add(*[Implies(mc, inv(state, i)) for inv in self.invariant_conjunctions()])
# Other items must be those of the other state if the merge condition holds
for i in both + only_right:
(v, p) = self.get(state, i.key)
state.solver.add(Implies(mc, (v == i.value) & (p == i.present)))
# Basic map invariants have to hold no matter what
for (n, i) in enumerate(self._known_items):
state.solver.add(*[Implies(i.key == oi.key, (i.value == oi.value) & (i.present == oi.present)) for oi in self._known_items[(n+1):] + [self._unknown_item]])
state.solver.add(self.is_not_overfull(state))
return self
else:
# we know lengths are the same due to can_merge
self._known_items = [MapItem(
claripy.ite_cases(zip(merge_conditions, [o._known_items[i].key for o in others]), self._known_items[i].key),
claripy.ite_cases(zip(merge_conditions, [o._known_items[i].value for o in others]), self._known_items[i].value),
claripy.ite_cases(zip(merge_conditions, [o._known_items[i].present for o in others]), self._known_items[i].present)
) for i in range(len(self._known_items))]
self._previous = self._previous.merge(state, [o._previous for o in others], other_states, merge_conditions)
return self
# === Private API, also used by invariant inference ===
# TODO sort out what's actually private and not; verif also uses stuff...
def __init__(self, meta, length, invariants, known_items, _previous=None, _unknown_item=None, _known_items_cache=None):
# "length" is symbolic, and may be larger than len(items) if there are items that are not exactly known
# "invariants" is a list of conjunctions that represents unknown items: each is a lambda that takes (state, item) and returns a Boolean expression
# "items" contains exactly known items, which do not have to obey the invariants
self.meta = meta
self._length = length
self._invariants = invariants
self._known_items = known_items
self._previous = _previous
if _unknown_item is None:
_unknown_item = MapItem(
claripy.BVS(self.meta.name + "_uk", self.meta.key_size),
claripy.BVS(self.meta.name + "_uv", self.meta.value_size),
claripy.BoolS(self.meta.name + "_up")
)
self._unknown_item = _unknown_item
self._known_items_cache = _known_items_cache or (0, [])
def version(self):
if self._previous is None: return 0
else: return 1 + self._previous.version()
def oldest_version(self):
if self._previous is None: return self
return self._previous.oldest_version()
def invariant_conjunctions(self):
if self._previous is None:
return self._invariants.copy()
return self._previous.invariant_conjunctions()
def add_invariant_conjunction(self, state, inv):
if not isinstance(inv, MapInvariant): # TODO we really need types to avoid that sort of thing
inv = MapInvariant.new(state, self.meta, inv)
if self._previous is None:
state.solver.add(inv(state, self._unknown_item))
self._invariants.append(inv)
else:
self._previous.add_invariant_conjunction(state, inv)
def known_items(self, only_set=False): # only_set is used for optimizations, e.g., don't check the invariant on items from get where it holds by design
def transform_previous(items):
return [
MapItem(
i.key,
claripy.ite_cases([(i.key == ki.key, ki.value) for ki in self._known_items], i.value),
claripy.ite_cases([(i.key == ki.key, ki.present) for ki in self._known_items], i.present)
)
for i in items
if all(not i.key.structurally_match(ki.key) for ki in self._known_items)
]
if only_set:
if self._previous is None:
return []
return self._known_items + transform_previous(self._previous.known_items(only_set=True))
if self._previous is None:
return self._known_items
# Optimization: Cache known items from previous layers and only do incremental updates, in case there are many
# 'count' increases any time a known item is added to any layer, so we can cache the result based on it
previous_items = self._previous.known_items()
(cached_count, cached_items) = self._known_items_cache
if len(previous_items) != cached_count:
extra_items = transform_previous(self._previous.known_items()[cached_count:])
self._known_items_cache = (len(previous_items), cached_items + extra_items)
return self._known_items + self._known_items_cache[1]
def add_item(self, item):
if self._previous is None:
self._known_items.append(item)
else:
self._previous.add_item(item)
def with_item_layer(self, item, length_change):
# Optimization: "Flatten" layers if it's safe to do so
if self._previous is not None:
if not item.key.symbolic and all(not i.key.symbolic and not i.key.structurally_match(item.key) for i in self._known_items):
return Map(
self.meta,
self._length + length_change,
self._invariants,
self._known_items + [item],
_previous=self._previous,
_unknown_item=self._unknown_item,
_known_items_cache=self._known_items_cache
)
return Map(
self.meta,
self._length + length_change,
[], # no extra invariants, just use the ones in _previous
[item],
_previous=self,
_unknown_item=self._unknown_item,
)
def is_definitely_empty(self):
l = self.length()
return l.structurally_match(claripy.BVV(0, l.size()))
def is_not_overfull(self, state):
l = self.length()
known_items = self.known_items()
# Optimization: If the map length is concrete and there are definitely not too many items, don't even compute the known length
if utils.definitely_true(state.solver, len(known_items) <= l):
return claripy.true
#print("overfull? rip=", state.regs.rip, " minlen=", state.solver.min(l), " actlen=", len(known_items), " items=", [i.key for i in known_items])
known_len = claripy.BVV(0, l.size())
known_keys = []
for item in known_items:
key_is_new = claripy.And(*[item.key != k for k in known_keys])
known_keys.append(item.key)
known_len = known_len + claripy.If(key_is_new & item.present, claripy.BVV(1, l.size()), claripy.BVV(0, l.size()))
return known_len <= l
def __copy__(self):
return self.__deepcopy__({})
def __deepcopy__(self, memo):
result = Map(
self.meta, # immutable
self._length, # immutable
copy.copy(self._invariants), # contents are immutable
copy.copy(self._known_items), # contents are immutable
copy.deepcopy(self._previous, memo),
self._unknown_item, # immutable
self._known_items_cache # immutable
)
memo[id(self)] = result
return result
def __repr__(self):
return f"<Map {self.meta.name} v{self.version()}>"
def _asdict(self): # pretend we are a namedtuple so functions that expect one will work (e.g. utils.structural_eq)
return {'meta': self.meta, '_length': self._length, '_invariants': self._invariants, '_known_items': self._known_items, '_previous': self._previous, '_unknown_item': self._unknown_item}
class GhostMapsPlugin(SimStatePlugin):
# === Public API ===
def new(self, key_size, value_size, name, _length=None, _invariants=None): # TODO new_havoced instead of _length/_invariants?
obj = claripy.BVS(name, self.state.sizes.ptr)
self[obj] = Map.new(self.state, key_size, value_size, name, _length=_length, _invariants=_invariants)
return obj
def new_array(self, key_size, value_size, length, name, obj=None): # obj so we can create arrays at existing points for BPF... (in general the whole GhostMapsPlugin API is dubious)
if obj is None:
obj = claripy.BVS(name, self.state.sizes.ptr)
self[obj] = Map.new_array(self.state, key_size, value_size, length, name)
return obj
def length(self, obj):
return self[obj].length()
def key_size(self, obj):
return self[obj].meta.key_size
def value_size(self, obj):
return self[obj].meta.value_size
def get(self, obj, key, conditioned_value=None, condition=claripy.true, version=None):
return self[obj].get(self.state, key, conditioned_value=conditioned_value, condition=condition, version=version)
def set(self, obj, key, value):
self[obj] = self[obj].set(self.state, key, value)
def remove(self, obj, key):
self[obj] = self[obj].remove(self.state, key)
def forall(self, obj, pred):
return self[obj].forall(self.state, pred)
# === Import and Export ===
def get_all(self):
return [(obj.ast, m) for (obj, m) in self._maps.items()]
# === Private API, including for invariant inference ===
def __getitem__(self, obj):
return self._maps[obj.cache_key]
def __setitem__(self, obj, map):
self._maps[obj.cache_key] = map
# === Angr stuff ===
def __init__(self, _maps={}):
SimStatePlugin.__init__(self)
self._maps = _maps
@SimStatePlugin.memo
def copy(self, memo):
return GhostMapsPlugin(_maps={k: copy.deepcopy(v, memo) for (k, v) in self._maps.items()}) # no need to deepcopy the keys
def merge_triage(self, others):
triaged = [[self]]
for other in others:
for candidate in triaged:
if candidate[0].can_merge([other]):
candidate.append(other)
break
else:
triaged.append([other])
#print("triaged", [len(x) for | |
running from ``0`` to
``self.n_Vrepresentation()-1``. If present, the
V-representation object at the given index will be
returned. Without an argument, returns the list of all
V-representation objects.
EXAMPLES::
sage: p = polytopes.simplex(4, project=True)
sage: p.Vrepresentation(0)
A vertex at (0.7071067812, 0.4082482905, 0.2886751346, 0.2236067977)
sage: p.Vrepresentation(0) == p.Vrepresentation() [0]
True
"""
if index is None:
return self._Vrepresentation
else:
return self._Vrepresentation[index]
@cached_method
def n_Vrepresentation(self):
"""
Return the number of objects that make up the
V-representation of the polyhedron.
OUTPUT:
Integer.
EXAMPLES::
sage: p = polytopes.simplex(4)
sage: p.n_Vrepresentation()
5
sage: p.n_Vrepresentation() == p.n_vertices() + p.n_rays() + p.n_lines()
True
"""
return len(self.Vrepresentation())
def Vrep_generator(self):
"""
Returns an iterator over the objects of the V-representation
(vertices, rays, and lines).
EXAMPLES::
sage: p = polytopes.cyclic_polytope(3,4)
sage: vg = p.Vrep_generator()
sage: next(vg)
A vertex at (0, 0, 0)
sage: next(vg)
A vertex at (1, 1, 1)
"""
for V in self.Vrepresentation():
yield V
def inequality_generator(self):
"""
Return a generator for the defining inequalities of the
polyhedron.
OUTPUT:
A generator of the inequality Hrepresentation objects.
EXAMPLES::
sage: triangle = Polyhedron(vertices=[[1,0],[0,1],[1,1]])
sage: for v in triangle.inequality_generator(): print(v)
An inequality (1, 1) x - 1 >= 0
An inequality (0, -1) x + 1 >= 0
An inequality (-1, 0) x + 1 >= 0
sage: [ v for v in triangle.inequality_generator() ]
[An inequality (1, 1) x - 1 >= 0,
An inequality (0, -1) x + 1 >= 0,
An inequality (-1, 0) x + 1 >= 0]
sage: [ [v.A(), v.b()] for v in triangle.inequality_generator() ]
[[(1, 1), -1], [(0, -1), 1], [(-1, 0), 1]]
"""
for H in self.Hrepresentation():
if H.is_inequality():
yield H
@cached_method
def inequalities(self):
"""
Return all inequalities.
OUTPUT:
A tuple of inequalities.
EXAMPLES::
sage: p = Polyhedron(vertices = [[0,0,0],[0,0,1],[0,1,0],[1,0,0],[2,2,2]])
sage: p.inequalities()[0:3]
(An inequality (1, 0, 0) x + 0 >= 0,
An inequality (0, 1, 0) x + 0 >= 0,
An inequality (0, 0, 1) x + 0 >= 0)
sage: p3 = Polyhedron(vertices = Permutations([1,2,3,4]))
sage: ieqs = p3.inequalities()
sage: ieqs[0]
An inequality (0, 1, 1, 1) x - 6 >= 0
sage: list(_)
[-6, 0, 1, 1, 1]
"""
return tuple(self.inequality_generator())
def inequalities_list(self):
"""
Return a list of inequalities as coefficient lists.
.. NOTE::
It is recommended to use :meth:`inequalities` or
:meth:`inequality_generator` instead to iterate over the
list of :class:`Inequality` objects.
EXAMPLES::
sage: p = Polyhedron(vertices = [[0,0,0],[0,0,1],[0,1,0],[1,0,0],[2,2,2]])
sage: p.inequalities_list()[0:3]
[[0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]
sage: p3 = Polyhedron(vertices = Permutations([1,2,3,4]))
sage: ieqs = p3.inequalities_list()
sage: ieqs[0]
[-6, 0, 1, 1, 1]
sage: ieqs[-1]
[-3, 0, 1, 0, 1]
sage: ieqs == [list(x) for x in p3.inequality_generator()]
True
"""
return [list(x) for x in self.inequality_generator()]
def equation_generator(self):
"""
Return a generator for the linear equations satisfied by the
polyhedron.
EXAMPLES::
sage: p = polytopes.regular_polygon(8,base_ring=RDF)
sage: p3 = Polyhedron(vertices = [x+[0] for x in p.vertices()], base_ring=RDF)
sage: next(p3.equation_generator())
An equation (0.0, 0.0, 1.0) x + 0.0 == 0
"""
for H in self.Hrepresentation():
if H.is_equation():
yield H
@cached_method
def equations(self):
"""
Return all linear constraints of the polyhedron.
OUTPUT:
A tuple of equations.
EXAMPLES::
sage: test_p = Polyhedron(vertices = [[1,2,3,4],[2,1,3,4],[4,3,2,1],[3,4,1,2]])
sage: test_p.equations()
(An equation (1, 1, 1, 1) x - 10 == 0,)
"""
return tuple(self.equation_generator())
def equations_list(self):
"""
Return the linear constraints of the polyhedron. As with
inequalities, each constraint is given as [b -a1 -a2 ... an]
where for variables x1, x2,..., xn, the polyhedron satisfies
the equation b = a1*x1 + a2*x2 + ... + an*xn.
.. NOTE::
It is recommended to use :meth:`equations` or
:meth:`equation_generator()` instead to iterate over the
list of
:class:`~sage.geometry.polyhedron.representation.Equation`
objects.
EXAMPLES::
sage: test_p = Polyhedron(vertices = [[1,2,3,4],[2,1,3,4],[4,3,2,1],[3,4,1,2]])
sage: test_p.equations_list()
[[-10, 1, 1, 1, 1]]
"""
return [list(eq) for eq in self.equation_generator()]
def vertices_list(self):
"""
Return a list of vertices of the polyhedron.
.. NOTE::
It is recommended to use :meth:`vertex_generator` instead to
iterate over the list of :class:`Vertex` objects.
.. WARNING::
If the polyhedron has lines, return the vertices
of the ``Vrepresentation``. However, the represented polyhedron
has no 0-dimensional faces (i.e. vertices)::
sage: P = Polyhedron(rays=[[1,0,0]],lines=[[0,1,0]])
sage: P.vertices_list()
[[0, 0, 0]]
sage: P.faces(0)
()
EXAMPLES::
sage: triangle = Polyhedron(vertices=[[1,0],[0,1],[1,1]])
sage: triangle.vertices_list()
[[0, 1], [1, 0], [1, 1]]
sage: a_simplex = Polyhedron(ieqs = [
....: [0,1,0,0,0],[0,0,1,0,0],[0,0,0,1,0],[0,0,0,0,1]
....: ], eqns = [[1,-1,-1,-1,-1]])
sage: a_simplex.vertices_list()
[[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]
sage: a_simplex.vertices_list() == [list(v) for v in a_simplex.vertex_generator()]
True
"""
return [list(x) for x in self.vertex_generator()]
def vertex_generator(self):
"""
Return a generator for the vertices of the polyhedron.
.. WARNING::
If the polyhedron has lines, return a generator for the vertices
of the ``Vrepresentation``. However, the represented polyhedron
has no 0-dimensional faces (i.e. vertices)::
sage: P = Polyhedron(rays=[[1,0,0]],lines=[[0,1,0]])
sage: list(P.vertex_generator())
[A vertex at (0, 0, 0)]
sage: P.faces(0)
()
EXAMPLES::
sage: triangle = Polyhedron(vertices=[[1,0],[0,1],[1,1]])
sage: for v in triangle.vertex_generator(): print(v)
A vertex at (0, 1)
A vertex at (1, 0)
A vertex at (1, 1)
sage: v_gen = triangle.vertex_generator()
sage: next(v_gen) # the first vertex
A vertex at (0, 1)
sage: next(v_gen) # the second vertex
A vertex at (1, 0)
sage: next(v_gen) # the third vertex
A vertex at (1, 1)
sage: try: next(v_gen) # there are only three vertices
....: except StopIteration: print("STOP")
STOP
sage: type(v_gen)
<... 'generator'>
sage: [ v for v in triangle.vertex_generator() ]
[A vertex at (0, 1), A vertex at (1, 0), A vertex at (1, 1)]
"""
for V in self.Vrepresentation():
if V.is_vertex():
yield V
@cached_method
def vertices(self):
"""
Return all vertices of the polyhedron.
OUTPUT:
A tuple of vertices.
.. WARNING::
If the polyhedron has lines, return the vertices
of the ``Vrepresentation``. However, the represented polyhedron
has no 0-dimensional faces (i.e. vertices)::
sage: P = Polyhedron(rays=[[1,0,0]],lines=[[0,1,0]])
sage: P.vertices()
(A vertex at (0, 0, 0),)
sage: P.faces(0)
()
EXAMPLES::
sage: triangle = Polyhedron(vertices=[[1,0],[0,1],[1,1]])
sage: triangle.vertices()
(A vertex at (0, 1), A vertex at (1, 0), A vertex at (1, 1))
sage: a_simplex = Polyhedron(ieqs = [
....: [0,1,0,0,0],[0,0,1,0,0],[0,0,0,1,0],[0,0,0,0,1]
....: ], eqns = [[1,-1,-1,-1,-1]])
sage: a_simplex.vertices()
(A vertex at (1, 0, 0, 0), A vertex at (0, 1, 0, 0),
A vertex at (0, 0, 1, 0), A vertex at (0, 0, 0, 1))
"""
return tuple(self.vertex_generator())
@cached_method
def vertices_matrix(self, base_ring=None):
"""
Return the coordinates of the vertices as the columns of a matrix.
INPUT:
- ``base_ring`` -- A ring or ``None`` (default). The base ring
of the returned matrix. If not specified, the base ring of
the polyhedron is used.
OUTPUT:
A matrix over ``base_ring`` whose columns are the coordinates
of the vertices. A ``TypeError`` is raised if the coordinates
cannot be converted to ``base_ring``.
.. WARNING::
If the polyhedron has lines, return the coordinates of the vertices
of the ``Vrepresentation``. However, the represented polyhedron
has no 0-dimensional faces (i.e. vertices)::
sage: P = Polyhedron(rays=[[1,0,0]],lines=[[0,1,0]])
sage: P.vertices_matrix()
[0]
[0]
[0]
sage: P.faces(0)
()
EXAMPLES::
sage: triangle = Polyhedron(vertices=[[1,0],[0,1],[1,1]])
sage: triangle.vertices_matrix()
[0 1 1]
[1 0 1]
sage: (triangle/2).vertices_matrix()
[ 0 1/2 1/2]
[1/2 0 1/2]
sage: (triangle/2).vertices_matrix(ZZ)
Traceback (most recent call last):
...
TypeError: no conversion of this rational to integer
TESTS:
Check that :trac:`28828` is fixed::
sage: P.vertices_matrix().is_immutable()
True
"""
if base_ring is None:
base_ring = self.base_ring()
m = matrix(base_ring, self.ambient_dim(), self.n_vertices())
for i, v in enumerate(self.vertices()):
for j in range(self.ambient_dim()):
m[j, i] = v[j]
m.set_immutable()
return m
def an_affine_basis(self):
"""
Return vertices that are a basis for the affine
span of the polytope.
This basis is obtained by considering a maximal chain of faces
in the face lattice and picking for each cover relation
one vertex that is in the difference. Thus this method
is independent of the concrete realization of the polytope.
EXAMPLES::
sage: P = polytopes.cube()
sage: P.an_affine_basis()
[A vertex at (-1, -1, -1),
A vertex at (1, | |
= []
from mist.api.machines.models import Machine
all_machines = Machine.objects(cloud=self.cloud, missing_since=None)
for machine in all_machines:
if machine.extra.get('tags', {}).get('type') == 'hypervisor':
machines.append(machine)
return machines
def _list_machines__update_generic_machine_state(self, machine):
# Defaults
machine.unreachable_since = None
machine.state = config.STATES[NodeState.RUNNING.value]
# If any of the probes has succeeded, then state is running
if (
machine.ssh_probe and not machine.ssh_probe.unreachable_since or
machine.ping_probe and not machine.ping_probe.unreachable_since
):
machine.state = config.STATES[NodeState.RUNNING.value]
# If ssh probe failed, then unreachable since then
if machine.ssh_probe and machine.ssh_probe.unreachable_since:
machine.unreachable_since = machine.ssh_probe.unreachable_since
machine.state = config.STATES[NodeState.UNKNOWN.value]
# Else if ssh probe has never succeeded and ping probe failed,
# then unreachable since then
elif (not machine.ssh_probe and
machine.ping_probe and machine.ping_probe.unreachable_since):
machine.unreachable_since = machine.ping_probe.unreachable_since
machine.state = config.STATES[NodeState.UNKNOWN.value]
def _list_machines__machine_actions(self, machine, node_dict):
super(LibvirtComputeController, self)._list_machines__machine_actions(
machine, node_dict)
machine.actions.clone = True
machine.actions.undefine = False
if node_dict['state'] is NodeState.TERMINATED.value:
# In libvirt a terminated machine can be started.
machine.actions.start = True
machine.actions.undefine = True
machine.actions.rename = True
if node_dict['state'] is NodeState.RUNNING.value:
machine.actions.suspend = True
if node_dict['state'] is NodeState.SUSPENDED.value:
machine.actions.resume = True
def _list_machines__generic_machine_actions(self, machine):
super(LibvirtComputeController,
self)._list_machines__generic_machine_actions(machine)
machine.actions.rename = True
machine.actions.start = False
machine.actions.stop = False
machine.actions.destroy = False
machine.actions.reboot = False
def _list_machines__postparse_machine(self, machine, node_dict):
updated = False
xml_desc = node_dict['extra'].get('xml_description')
if xml_desc:
escaped_xml_desc = escape(xml_desc)
if machine.extra.get('xml_description') != escaped_xml_desc:
machine.extra['xml_description'] = escaped_xml_desc
updated = True
import xml.etree.ElementTree as ET
root = ET.fromstring(unescape(xml_desc))
devices = root.find('devices')
# TODO: rethink image association
vnfs = []
hostdevs = devices.findall('hostdev') + \
devices.findall('interface[@type="hostdev"]')
for hostdev in hostdevs:
address = hostdev.find('source').find('address')
vnf_addr = '%s:%s:%s.%s' % (
address.attrib.get('domain').replace('0x', ''),
address.attrib.get('bus').replace('0x', ''),
address.attrib.get('slot').replace('0x', ''),
address.attrib.get('function').replace('0x', ''),
)
vnfs.append(vnf_addr)
if machine.extra.get('vnfs', []) != vnfs:
machine.extra['vnfs'] = vnfs
updated = True
# Number of CPUs allocated to guest.
if 'processors' in machine.extra and \
machine.extra.get('cpus', []) != machine.extra['processors']:
machine.extra['cpus'] = machine.extra['processors']
updated = True
# set machine's parent
hypervisor = machine.extra.get('hypervisor', '')
if hypervisor:
try:
from mist.api.machines.models import Machine
parent = Machine.objects.get(cloud=machine.cloud,
name=hypervisor)
except Machine.DoesNotExist:
# backwards compatibility
hypervisor = hypervisor.replace('.', '-')
try:
parent = Machine.objects.get(cloud=machine.cloud,
machine_id=hypervisor)
except me.DoesNotExist:
parent = None
if machine.parent != parent:
machine.parent = parent
updated = True
return updated
def _list_machines__get_machine_extra(self, machine, node_dict):
extra = copy.copy(node_dict['extra'])
# make sure images_location is not overridden
extra.update({'images_location': machine.extra.get('images_location')})
return extra
def _list_machines__get_size(self, node):
return None
def _list_machines__get_custom_size(self, node):
if not node.get('size'):
return
from mist.api.clouds.models import CloudSize
updated = False
try:
_size = CloudSize.objects.get(
cloud=self.cloud, external_id=node['size'].get('name'))
except me.DoesNotExist:
_size = CloudSize(cloud=self.cloud,
external_id=node['size'].get('name'))
updated = True
if int(_size.ram or 0) != int(node['size'].get('ram', 0)):
_size.ram = int(node['size'].get('ram'))
updated = True
if _size.cpus != node['size'].get('extra', {}).get('cpus'):
_size.cpus = node['size'].get('extra', {}).get('cpus')
updated = True
if _size.disk != int(node['size'].get('disk')):
_size.disk = int(node['size'].get('disk'))
name = ""
if _size.cpus:
name += '%s CPUs, ' % _size.cpus
if _size.ram:
name += '%dMB RAM' % _size.ram
if _size.disk:
name += f', {_size.disk}GB disk.'
if _size.name != name:
_size.name = name
updated = True
if updated:
_size.save()
return _size
def _list_machines__get_location(self, node):
return node['extra'].get('hypervisor').replace('.', '-')
def list_sizes(self, persist=True):
return []
def _list_locations__fetch_locations(self, persist=True):
"""
We refer to hosts (KVM hypervisors) as 'location' for
consistency purpose.
"""
from mist.api.machines.models import Machine
# FIXME: query parent for better performance
hosts = Machine.objects(cloud=self.cloud,
missing_since=None,
parent=None)
locations = [NodeLocation(id=host.machine_id,
name=host.name,
country='', driver=None,
extra=copy.deepcopy(host.extra))
for host in hosts]
return locations
def list_images_single_host(self, host):
driver = self._get_host_driver(host)
return driver.list_images(location=host.extra.get(
'images_location', {}))
async def list_images_all_hosts(self, hosts, loop):
images = [
loop.run_in_executor(None, self.list_images_single_host, host)
for host in hosts
]
return await asyncio.gather(*images)
def _list_images__fetch_images(self, search=None):
from mist.api.machines.models import Machine
hosts = Machine.objects(cloud=self.cloud, parent=None,
missing_since=None)
try:
loop = asyncio.get_event_loop()
if loop.is_closed():
raise RuntimeError('loop is closed')
except RuntimeError:
asyncio.set_event_loop(asyncio.new_event_loop())
loop = asyncio.get_event_loop()
all_images = loop.run_until_complete(self.list_images_all_hosts(hosts,
loop))
return [image for host_images in all_images for image in host_images]
def _list_images__postparse_image(self, image, image_libcloud):
locations = []
if image_libcloud.extra.get('host', ''):
host_name = image_libcloud.extra.get('host')
from mist.api.clouds.models import CloudLocation
try:
host = CloudLocation.objects.get(cloud=self.cloud,
name=host_name)
locations.append(host.id)
except me.DoesNotExist:
host_name = host_name.replace('.', '-')
try:
host = CloudLocation.objects.get(cloud=self.cloud,
external_id=host_name)
locations.append(host.id)
except me.DoesNotExist:
pass
image.extra.update({'locations': locations})
def _get_libcloud_node(self, machine, no_fail=False):
assert self.cloud == machine.cloud
machine_type = machine.extra.get('tags', {}).get('type')
host = machine if machine_type == 'hypervisor' else machine.parent
driver = self._get_host_driver(host)
for node in driver.list_nodes():
if node.id == machine.machine_id:
return node
if no_fail:
return Node(machine.machine_id, name=machine.machine_id,
state=0, public_ips=[], private_ips=[],
driver=self.connection)
raise MachineNotFoundError(
"Machine with machine_id '%s'." % machine.machine_id
)
def _reboot_machine(self, machine, node):
hypervisor = node.extra.get('tags', {}).get('type', None)
if hypervisor == 'hypervisor':
# issue an ssh command for the libvirt hypervisor
try:
hostname = node.public_ips[0] if \
node.public_ips else \
node.private_ips[0]
command = '$(command -v sudo) shutdown -r now'
# todo move it up
from mist.api.methods import ssh_command
ssh_command(self.cloud.owner, self.cloud.id,
node.id, hostname, command)
return True
except MistError as exc:
log.error("Could not ssh machine %s", machine.name)
raise
except Exception as exc:
log.exception(exc)
# FIXME: Do not raise InternalServerError!
raise InternalServerError(exc=exc)
else:
node.reboot()
def _rename_machine(self, machine, node, name):
if machine.extra.get('tags', {}).get('type') == 'hypervisor':
machine.name = name
machine.save()
from mist.api.helpers import trigger_session_update
trigger_session_update(machine.owner.id, ['clouds'])
else:
self._get_host_driver(machine).ex_rename_node(node, name)
def remove_machine(self, machine):
from mist.api.machines.models import KeyMachineAssociation
KeyMachineAssociation.objects(machine=machine).delete()
machine.missing_since = datetime.datetime.now()
machine.save()
if machine.machine_type == 'hypervisor':
self.cloud.hosts.remove(machine.id)
self.cloud.save()
if amqp_owner_listening(self.cloud.owner.id):
old_machines = [m.as_dict() for m in
self.cloud.ctl.compute.list_cached_machines()]
new_machines = self.cloud.ctl.compute.list_machines()
self.cloud.ctl.compute.produce_and_publish_patch(
old_machines, new_machines)
def _start_machine(self, machine, node):
driver = self._get_host_driver(machine)
return driver.ex_start_node(node)
def _stop_machine(self, machine, node):
driver = self._get_host_driver(machine)
return driver.ex_stop_node(node)
def _resume_machine(self, machine, node):
driver = self._get_host_driver(machine)
return driver.ex_resume_node(node)
def _destroy_machine(self, machine, node):
driver = self._get_host_driver(machine)
return driver.destroy_node(node)
def _suspend_machine(self, machine, node):
driver = self._get_host_driver(machine)
return driver.ex_suspend_node(node)
def _undefine_machine(self, machine, node, delete_domain_image=False):
if machine.extra.get('active'):
raise BadRequestError('Cannot undefine an active domain')
driver = self._get_host_driver(machine)
result = driver.ex_undefine_node(node)
if delete_domain_image and result:
xml_description = node.extra.get('xml_description', '')
if xml_description:
index1 = xml_description.index("source file") + 13
index2 = index1 + xml_description[index1:].index('\'')
image_path = xml_description[index1:index2]
driver._run_command("rm {}".format(image_path))
return result
def _clone_machine(self, machine, node, name, resume):
driver = self._get_host_driver(machine)
return driver.ex_clone_node(node, new_name=name)
def _list_sizes__get_cpu(self, size):
return size.extra.get('cpu')
def _generate_plan__parse_networks(self, auth_context, networks_dict,
location):
"""
Parse network interfaces.
- If networks_dict is empty, no network interface will be configured.
- If only `id` or `name` is given, the interface will be
configured by DHCP.
- If `ip` is given, it will be statically assigned to the interface and
optionally `gateway` and `primary` attributes will be used.
"""
from mist.api.methods import list_resources
from libcloud.utils.networking import is_valid_ip_address
if not networks_dict:
return None
ret_dict = {
'networks': [],
}
networks = networks_dict.get('networks', [])
for net in networks:
network_id = net.get('id') or net.get('name')
if not network_id:
raise BadRequestError('network id or name is required')
try:
[network], _ = list_resources(auth_context, 'network',
search=network_id,
cloud=self.cloud.id,
limit=1)
except ValueError:
raise NotFoundError('Network does not exist')
nid = {
'network_name': network.name
}
if net.get('ip'):
if is_valid_ip_address(net['ip']):
nid['ip'] = net['ip']
else:
raise BadRequestError('IP given is invalid')
if net.get('gateway'):
if is_valid_ip_address(net['gateway']):
nid['gateway'] = net['gateway']
else:
raise BadRequestError('Gateway IP given is invalid')
if net.get('primary'):
nid['primary'] = net['primary']
ret_dict['networks'].append(nid)
if networks_dict.get('vnfs'):
ret_dict['vnfs'] = networks_dict['vnfs']
return ret_dict
def _generate_plan__parse_disks(self, auth_context, disks_dict):
ret_dict = {
'disk_size': disks_dict.get('disk_size', 4),
}
if disks_dict.get('disk_path'):
ret_dict['disk_path'] = disks_dict.get('disk_path')
return ret_dict
def _create_machine__get_image_object(self, image):
from mist.api.images.models import CloudImage
try:
cloud_image = CloudImage.objects.get(id=image)
except me.DoesNotExist:
raise NotFoundError('Image does not exist')
return cloud_image.external_id
def _create_machine__get_size_object(self, size):
if isinstance(size, dict):
return size
from mist.api.clouds.models import CloudSize
try:
cloud_size = CloudSize.objects.get(id=size)
except me.DoesNotExist:
raise NotFoundError('Size does not exist')
return {'cpus': cloud_size.cpus, 'ram': cloud_size.ram}
def _create_machine__get_location_object(self, location):
from mist.api.clouds.models import CloudLocation
try:
cloud_location = CloudLocation.objects.get(id=location)
except me.DoesNotExist:
raise NotFoundError('Location does not exist')
return cloud_location.external_id
def _create_machine__compute_kwargs(self, plan):
from mist.api.machines.models import Machine
kwargs = super()._create_machine__compute_kwargs(plan)
location_id = kwargs.pop('location')
try:
host = Machine.objects.get(
cloud=self.cloud, machine_id=location_id)
except me.DoesNotExist:
raise MachineCreationError("The host specified does not exist")
driver = self._get_host_driver(host)
kwargs['driver'] = driver
size = kwargs.pop('size')
kwargs['cpu'] = size['cpus']
kwargs['ram'] = size['ram']
if kwargs.get('auth'):
kwargs['public_key'] = kwargs.pop('auth').public
kwargs['disk_size'] = plan['disks'].get('disk_size')
kwargs['disk_path'] = plan['disks'].get('disk_path')
kwargs['networks'] = plan.get('networks', {}).get('networks', [])
kwargs['vnfs'] = plan.get('networks', {}).get('vnfs', [])
kwargs['cloud_init'] = plan.get('cloudinit')
return kwargs
def _create_machine__create_node(self, kwargs):
driver = kwargs.pop('driver')
node = driver.create_node(**kwargs)
return node
class OnAppComputeController(BaseComputeController):
def _connect(self, **kwargs):
return get_driver(Provider.ONAPP)(key=self.cloud.username,
secret=self.cloud.apikey,
host=self.cloud.host,
verify=self.cloud.verify)
def _list_machines__machine_actions(self, machine, node_dict):
super(OnAppComputeController, self)._list_machines__machine_actions(
machine, node_dict)
machine.actions.resize = True
if node_dict['state'] is NodeState.RUNNING.value:
machine.actions.suspend = | |
<gh_stars>1-10
# modified may 2011 to name components (map/ped) as RgeneticsData to align with default base_name
# otherwise downstream tools fail
# modified march 2011 to remove post execution hook
# pedigree data faker
# specifically designed for scalability testing of
# Shaun Purcel's PLINK package
# derived from <NAME>'s original suggestion
# allele frequency spectrum and random mating added
# ross lazarus me fecit january 13 2007
# copyright ross lazarus 2007
# without psyco
# generates about 10k snp genotypes in 2k subjects (666 trios) per minute or so.
# so 500k (a billion genotypes), at about 4 trios/min will a couple of hours to generate
# psyco makes it literally twice as quick!!
# all rights reserved except as granted under the terms of the LGPL
# see http://www.gnu.org/licenses/lgpl.html
# for a copy of the license you receive with this software
# and for your rights and obligations
# especially if you wish to modify or redistribute this code
# january 19 added random missingness inducer
# currently about 15M genos/minute without psyco, 30M/minute with
# so a billion genos should take about 40 minutes with psyco or 80 without...
# added mendel error generator jan 23 rml
import random,sys,time,os,string
from optparse import OptionParser
defbasename="RgeneticsData"
width = 500000
ALLELES = ['1','2','3','4']
prog = os.path.split(sys.argv[0])[-1]
debug = 0
"""Natural-order sorting, supporting embedded numbers.
# found at http://lists.canonical.org/pipermail/kragen-hacks/2005-October/000419.html
note test code there removed to conserve brain space
foo9bar2 < foo10bar2 < foo10bar10
"""
import random, re, sys
def natsort_key(item):
chunks = re.split('(\d+(?:\.\d+)?)', item)
for ii in range(len(chunks)):
if chunks[ii] and chunks[ii][0] in '0123456789':
if '.' in chunks[ii]: numtype = float
else: numtype = int
# wrap in tuple with '0' to explicitly specify numbers come first
chunks[ii] = (0, numtype(chunks[ii]))
else:
chunks[ii] = (1, chunks[ii])
return (chunks, item)
def natsort(seq):
"Sort a sequence of text strings in a reasonable order."
alist = [item for item in seq]
alist.sort(key=natsort_key)
return alist
def makeUniformMAFdist(low=0.02, high=0.5):
"""Fake a non-uniform maf distribution to make the data
more interesting. Provide uniform 0.02-0.5 distribution"""
MAFdistribution = []
for i in xrange(int(100*low),int(100*high)+1):
freq = i/100.0 # uniform
MAFdistribution.append(freq)
return MAFdistribution
def makeTriangularMAFdist(low=0.02, high=0.5, beta=5):
"""Fake a non-uniform maf distribution to make the data
more interesting - more rare alleles """
MAFdistribution = []
for i in xrange(int(100*low),int(100*high)+1):
freq = (51 - i)/100.0 # large numbers of small allele freqs
for j in range(beta*i): # or i*i for crude exponential distribution
MAFdistribution.append(freq)
return MAFdistribution
def makeFbathead(rslist=[], chromlist=[], poslist=[], width=100000):
"""header row
"""
res = ['%s_%s_%s' % (chromlist[x], poslist[x], rslist[x]) for x in range(len(rslist))]
return ' '.join(res)
def makeMap( width=500000, MAFdistribution=[], useGP=False):
"""make snp allele and frequency tables for consistent generation"""
usegp = 1
snpdb = 'snp126'
hgdb = 'hg18'
alleles = []
freqs = []
rslist = []
chromlist = []
poslist = []
for snp in range(width):
random.shuffle(ALLELES)
alleles.append(ALLELES[0:2]) # need two DIFFERENT alleles!
freqs.append(random.choice(MAFdistribution)) # more rare alleles
if useGP:
try:
import MySQLdb
genome = MySQLdb.Connect('localhost', 'hg18', 'G3gn0m3')
curs = genome.cursor() # use default cursor
except:
if debug:
print 'cannot connect to local copy of golden path'
usegp = 0
if usegp and useGP: # urrrgghh getting snps into chrom offset order is complicated....
curs.execute('use %s' % hgdb)
print 'Collecting %d real rs numbers - this may take a while' % width
# get a random draw of enough reasonable (hapmap) snps with frequency data
s = '''select distinct chrom,chromEnd, name from %s where avHet > 0 and chrom not like '%%random'
group by name order by rand() limit %d''' % (snpdb,width)
curs.execute(s)
reslist = curs.fetchall()
reslist = ['%s\t%09d\t%s' % (x[3:],y,z) for x,y,z in reslist] # get rid of chr
reslist = natsort(reslist)
for s in reslist:
chrom,pos,rs = s.split('\t')
rslist.append(rs)
chromlist.append(chrom)
poslist.append(pos)
else:
chrom = '1'
for snp in range(width):
pos = '%d' % (1000*snp)
rs = 'rs00%d' % snp
rslist.append(rs)
chromlist.append(chrom)
poslist.append(pos)
return alleles,freqs, rslist, chromlist, poslist
def writeMap(fprefix = '', fpath='./', rslist=[], chromlist=[], poslist=[], width = 500000):
"""make a faked plink compatible map file - fbat files
have the map written as a header line"""
outf = '%s.map'% (fprefix)
outf = os.path.join(fpath,outf)
amap = open(outf, 'w')
res = ['%s\t%s\t0\t%s' % (chromlist[x],rslist[x],poslist[x]) for x in range(len(rslist))]
res.append('')
amap.write('\n'.join(res))
amap.close()
def makeMissing(genos=[], missrate = 0.03, missval = '0'):
"""impose some random missingness"""
nsnps = len(genos)
for snp in range(nsnps): # ignore first 6 columns
if random.random() <= missrate:
genos[snp] = '%s %s' % (missval,missval)
return genos
def makeTriomissing(genos=[], missrate = 0.03, missval = '0'):
"""impose some random missingness on a trio - moth eaten like real data"""
for person in (0,1):
nsnps = len(genos[person])
for snp in range(nsnps):
for person in [0,1,2]:
if random.random() <= missrate:
genos[person][snp] = '%s %s' % (missval,missval)
return genos
def makeTriomendel(p1g=(0,0),p2g=(0,0), kiddip = (0,0)):
"""impose some random mendels on a trio
there are 8 of the 9 mating types we can simulate reasonable errors for
Note, since random mating dual het parents can produce any genotype we can't generate an interesting
error for them, so the overall mendel rate will be lower than mendrate, depending on
allele frequency..."""
if p1g[0] <> p1g[1] and p2g[0] <> p2g[1]: # both parents het
return kiddip # cannot simulate a mendel error - anything is legal!
elif (p1g[0] <> p1g[1]): # p1 is het parent so p2 must be hom
if p2g[0] == 0: # - make child p2 opposite hom for error
kiddip = (1,1)
else:
kiddip = (0,0)
elif (p2g[0] <> p2g[1]): # p2 is het parent so p1 must be hom
if p1g[0] == 0: # - make child p1 opposite hom for error
kiddip = (1,1)
else:
kiddip = (0,0)
elif (p1g[0] == p1g[1]): # p1 is hom parent and if we get here p2 must also be hom
if p1g[0] == p2g[0]: # both parents are same hom - make child either het or opposite hom for error
if random.random() <= 0.5:
kiddip = (0,1)
else:
if p1g[0] == 0:
kiddip = (1,1)
else:
kiddip = (0,0)
else: # parents are opposite hom - return any hom as an error
if random.random() <= 0.5:
kiddip = (0,0)
else:
kiddip = (1,1)
return kiddip
def makeFam(width=100, freqs={}, alleles={}, trio=1, missrate=0.03, missval='0', mendrate=0.0):
"""this family is a simple trio, constructed by random mating two random genotypes
TODO: why not generate from chromosomes - eg hapmap
set each haplotype locus according to the conditional
probability implied by the surrounding loci - eg use both neighboring pairs, triplets
and quads as observed in hapmap ceu"""
dadped = '%d 1 0 0 1 1 %s'
mumped = '%d 2 0 0 2 1 %s' # a mother is a mum where I come from :)
kidped = '%d 3 1 2 %d %d %s'
family = [] # result accumulator
sex = random.choice((1,2)) # for the kid
affected = random.choice((1,2))
genos = [[],[],[]] # dad, mum, kid - 0/1 for common,rare initially, then xform to alleles
# parent1...kidn lists of 0/1 for common,rare initially, then xformed to alleles
for snp in xrange(width):
f = freqs[snp]
for i in range(2): # do dad and mum
p = random.random()
a1 = a2 = 0
if p <= f: # a rare allele
a1 = 1
p = random.random()
if p <= f: # a rare allele
a2 = 1
if a1 > a2:
a1,a2 = a2,a1 # so ordering consistent - 00,01,11
dip = (a1,a2)
genos[i].append(dip) # tuples of 0,1
a1 = random.choice(genos[0][snp]) # dad gamete
a2 = random.choice(genos[1][snp]) # mum gamete
if a1 > a2:
a1,a2 = a2,a1 # so ordering consistent - 00,01,11
kiddip = (a1,a2) # NSFW mating!
genos[2].append(kiddip)
if mendrate > 0:
if random.random() <= mendrate:
genos[2][snp] = makeTriomendel(genos[0][snp],genos[1][snp], kiddip)
achoice = alleles[snp]
for g in genos: # now convert to alleles using allele dict
a1 = achoice[g[snp][0]] # get allele letter
a2 = achoice[g[snp][1]]
g[snp] = '%s %s' % (a1,a2)
if missrate > 0:
genos = makeTriomissing(genos=genos,missrate=missrate, missval=missval)
family.append(dadped % (trio,' '.join(genos[0]))) # create a row for | |
import numpy as np
import tensorflow as tf
class Model:
"""Model class
Used for storing methods that generalize to all models.
"""
def __init__(
self,
initial_conditions=None,
model_parameters=None,
final_time=None,
time_steps=None):
self.initial_conditions = initial_conditions
self.model_parameters = model_parameters
self.final_time = final_time
self.time_steps = time_steps
def init_converter(self, arg1: np.array) -> tf.constant:
"""Initial conditions converter.
Converts the initial_conditions ndarray into a Tensorflow
constant n-dimensional tensor.
`tf.constant <https://www.tensorflow.org/api_docs/python/tf/constant>`_
Parameters
----------
arg1
Initial conditions for the system of ODEs to be solved.
Returns
-------
tf.constant
Constant tf.float64 n-d tensor based on initial_conditions
provided.
"""
init_state = tf.constant(arg1, dtype=tf.float64)
return init_state
def ode_solver(self, arg1: tf.stack, arg2: np.ndarray) -> list:
"""Ordinary Differential Equation (ODE) solver.
Uses Tensorflow/Numpy odeint to numerically solve the input system
of ODEs given the provided initial conditions and optional time
array parameters.
`odeint <https://www.tensorflow.org/api_docs/python/tf/contrib/integrate/odeint>`_
Parameters
----------
arg1
Tensorflow stack representing the equations for the system of ODEs.
arg2
Initial conditions for the system of ODEs to be solved.
Returns
-------
list
y: (n+1)-d tensor. Contains the solved value of y for each desired
time point in t.
info_dict: only if full_output=True for odeint, additional info.
"""
t = np.linspace(0, self.final_time, num=self.time_steps)
tensor_state, tensor_info = tf.contrib.integrate.odeint(arg1, self.init_converter(arg2), t, full_output=True)
return [tensor_state, tensor_info]
def tf_session(self, arg1: tf.stack, arg2: np.ndarray) -> np.ndarray:
"""Tensorflow session runner.
Uses a Tensorflow session run to evaluate the provided system of ODEs.
`tf.Session.run <https://www.tensorflow.org/api_docs/python/tf/Session#run>`_
Parameters
----------
arg1
Tensorflow stack representing the equations for the system of ODEs.
arg2
Initial conditions for the system of ODEs to be solved.
Returns
-------
np.ndarray
Returns the transpose of the (n+1)-d state tensor returned from
ode_solver after it's been solved in the Tensorflow session.
"""
sess = tf.Session()
state, info = sess.run(self.ode_solver(arg1, arg2))
output = state.T
return output
def solve(self):
"""Solve
Solves the provided equations in a Tensorflow session with either the provided
or the default initial conditions.
Parameters
----------
self
Current instance state.
Returns
-------
np.ndarray
Returns the solution from the Tensorflow session.
"""
self.solution = self.tf_session(self.equations, self.initial_conditions)
return self.solution
class CoupledDampedSHM(Model):
"""Coupled Damped Simple Harmonic Motion
This system of ODEs models coupled damped simple harmonic motion, such as two carts
on a track coupled to each other and each edge of the track by springs.
"""
def __init__(
self,
initial_conditions=[0.5, 0.1, 0.1, 0.1],
model_parameters=[0.007, 0.27, 0.027, 0.25],
final_time=200,
time_steps=1000):
self.initial_conditions = np.array(initial_conditions)
self.model_parameters = model_parameters
self.final_time = final_time
self.time_steps = time_steps
def equations(self, state, t):
x, y, x1, y1 = tf.unstack(state)
dx = y
dy = -(self.model_parameters[1] / self.model_parameters[3]) * x \
+ (self.model_parameters[2] / self.model_parameters[3]) * x1 \
- (self.model_parameters[0] / self.model_parameters[3]) * y
dx1 = y1
dy1 = (self.model_parameters[2] / self.model_parameters[3]) * x \
- (self.model_parameters[1] / self.model_parameters[3]) * x1 \
- (self.model_parameters[0] / self.model_parameters[3]) * y1
return tf.stack([dx, dy, dx1, dy1])
class DampedSHM(Model):
"""Damped Simple Harmonic Motion
This system of ODEs models damped simple harmonic motion.
"""
def __init__(
self,
initial_conditions=[0.1, 0.1],
model_parameters=[0.035, 0.5, 0.2],
final_time=50,
time_steps=500):
self.initial_conditions = np.array(initial_conditions)
self.model_parameters = model_parameters
self.final_time = final_time
self.time_steps = time_steps
def equations(self, state, t):
x, y = tf.unstack(state)
dx = y
dy = (-self.model_parameters[0] * y - self.model_parameters[1] * x) / self.model_parameters[2]
return tf.stack([dx, dy])
class FitzhughNagumo(Model):
"""Fitzhugh-Nagumo neuron model
This system of ODEs is an implementation of the Fitzhugh-Nagumo
model for the action potential of a point neuron.
"""
def __init__(
self,
initial_conditions=[0.01, 0.01],
model_parameters=[0.75, 0.8, 3, -0.4],
final_time=100,
time_steps=500):
self.initial_conditions = np.array(initial_conditions)
self.model_parameters = model_parameters
self.final_time = final_time
self.time_steps = time_steps
def equations(self, state, t):
v, w = tf.unstack(state)
dv = self.model_parameters[2] * (v + w - (v**3/3) + self.model_parameters[3])
dw = -1/self.model_parameters[2] * (v - self.model_parameters[0] + self.model_parameters[1]*w)
return tf.stack([dv, dw])
class HindmarshRose(Model):
"""Hindmarsh-Rose neuron model
This system of ODEs is an implementation of the Hindmarsh-Rose
model for the action potential of a point neuron.
"""
def __init__(
self,
initial_conditions=[0.1, 0.1, 0.1],
model_parameters=[1., 3., 1., 5., 0.006, 4., 1.3, -1.5],
final_time=100,
time_steps=1000):
self.initial_conditions = np.array(initial_conditions)
self.model_parameters = model_parameters
self.final_time = final_time
self.time_steps = time_steps
def equations(self, state, t):
x, y, z = tf.unstack(state)
dx = y - self.model_parameters[0] * (x ** 3) \
+ (self.model_parameters[1] * (x ** 2)) - z + self.model_parameters[6]
dy = self.model_parameters[2] - self.model_parameters[3] * (x ** 2) - y
dz = self.model_parameters[4] * (self.model_parameters[5] * (x - self.model_parameters[7]) - z)
return tf.stack([dx, dy, dz])
class HodgkinHuxley(Model):
"""Hodgkin-Huxley neuron model
This system of ODEs is an implementation of the Hodgkin-Huxley
model for the action potential of a point neuron.
"""
def __init__(
self,
initial_conditions=[0.1, 0.1, 0.1, 0.1],
model_parameters=[36., 120., 0.3, 12., -115., -10.613, 1., -10.],
final_time=100,
time_steps=1000):
self.initial_conditions = np.array(initial_conditions)
self.model_parameters = model_parameters
self.final_time = final_time
self.time_steps = time_steps
def equations(self, state, t):
i, n, m, h = tf.unstack(state)
# Alpha and beta functions for channel activation functions
alpha_n = (0.01 * (i + 10)) / (tf.exp((i + 10) / 10) - 1)
beta_n = 0.125 * tf.exp(i / 80)
alpha_m = (0.1 * (i + 25)) / (tf.exp((i + 25) / 10) - 1)
beta_m = 4 * tf.exp(i / 18)
alpha_h = (0.07 * tf.exp(i / 20))
beta_h = 1 / (tf.exp((i + 30) / 10) + 1)
# Differential Equations
di = (self.model_parameters[0] * (n ** 4) * (i - self.model_parameters[3])
+ self.model_parameters[1] * (m ** 3) * h * (i - self.model_parameters[4])
+ self.model_parameters[2] * (i - self.model_parameters[5])
- self.model_parameters[7]) * (-1 / self.model_parameters[6])
dn = alpha_n * (1 - n) - beta_n * n
dm = alpha_m * (1 - m) - beta_m * m
dh = alpha_h * (1 - h) - beta_h * h
return tf.stack([di, dn, dm, dh])
def solve(self):
i, n, m, h = self.tf_session(self.equations, self.initial_conditions)
self.solution = -1*i, n, m, h
return self.solution
class HIV(Model):
"""HIV dynamics
This system of ODEs is an implementation of a model for HIV
dynamics in a T-cell population.
"""
def __init__(
self,
initial_conditions=[1000, 0, 1],
model_parameters=[10., 0.02, 0.24, 2.4, 2.4e-5, 100],
final_time=500,
time_steps=500):
self.initial_conditions = np.array(initial_conditions)
self.model_parameters = model_parameters
self.final_time = final_time
self.time_steps = time_steps
def equations(self, state, t):
x1, x2, x3 = tf.unstack(state)
dx1 = -self.model_parameters[1] * x1 - self.model_parameters[4] * x1 * x3 + self.model_parameters[0]
dx2 = -self.model_parameters[3] * x2 + self.model_parameters[4] * x1 * x3
dx3 = self.model_parameters[5] * x2 - self.model_parameters[2] * x3
return tf.stack([dx1, dx2, dx3])
class Lorenz(Model):
"""Lorenz equations
This system of ODEs is an implementation of the Lorenz equations
which model atmospheric convection.
"""
def __init__(
self,
initial_conditions=[0, 2, 20],
model_parameters=[28., 10., 8. / 3.],
final_time=50,
time_steps=5000):
self.initial_conditions = np.array(initial_conditions)
self.model_parameters = model_parameters
self.final_time = final_time
self.time_steps = time_steps
self.state = tf.tensor()
def equations(self, state, t):
x, y, z = tf.unstack(state)
dx = self.model_parameters[1] * (y - x)
dy = x * (self.model_parameters[0] - z) - y
dz = x * y - self.model_parameters[2] * z
return tf.stack([dx, dy, dz])
class MorrisLecar(Model):
"""Morris-Lecar neuron model
This system of ODEs is an implementation of the Morris-Lecar
model for the action potential of a point neuron.
"""
def __init__(
self,
initial_conditions=[0.01, 0.01],
model_parameters=[-84., 8., 130., 4.4, -60., 2., 0.04, -1.2, 18., 2., 30., 80.],
final_time=500,
time_steps=1000):
self.initial_conditions = np.array(initial_conditions)
self.model_parameters = model_parameters
self.final_time = final_time
self.time_steps = time_steps
def equations(self, state, t):
v, n = tf.unstack(state)
dv = (-self.model_parameters[3]
* (0.5 * (1 + tf.tanh((v - self.model_parameters[7]) / self.model_parameters[8])))
* (v - self.model_parameters[2]) - self.model_parameters[1] * n
* (v - self.model_parameters[0]) - self.model_parameters[5]
* (v - self.model_parameters[4]) + self.model_parameters[11])
dn = (self.model_parameters[6]
* ((0.5 * (1 + tf.tanh((v - self.model_parameters[9]) / self.model_parameters[10]))) - n)) \
/ (1 / tf.cosh((v - self.model_parameters[9]) / (2 * self.model_parameters[10])))
return tf.stack([dv, dn])
class Vanderpol(Model):
"""Van der pol oscillator
This system of ODEs is an implementation of the van der pol
oscillator, a commonly used introductory system in the study
of dynamical systems.
"""
def __init__(
self,
initial_conditions=[0.01, 0.01],
model_parameters=[-0.05],
final_time=50,
time_steps=250):
self.initial_conditions = np.array(initial_conditions)
self.model_parameters = model_parameters
self.final_time = final_time
self.time_steps = time_steps
def equations(self, state, t):
x, y = | |
object
"""
if device.uuid is None:
return _FS_UNAVAIL
else:
return device.uuid
def _GetInstNic(index, cb):
"""Build function for calling another function with an instance NIC.
@type index: int
@param index: NIC index
@type cb: callable
@param cb: Callback
"""
def fn(ctx, inst):
"""Call helper function with instance NIC.
@type ctx: L{InstanceQueryData}
@type inst: L{objects.Instance}
@param inst: Instance object
"""
try:
nic = inst.nics[index]
except IndexError:
return _FS_UNAVAIL
return cb(ctx, index, nic)
return fn
def _GetInstNicNetworkName(ctx, _, nic): # pylint: disable=W0613
"""Get a NIC's Network.
@type ctx: L{InstanceQueryData}
@type nic: L{objects.NIC}
@param nic: NIC object
"""
if nic.network is None:
return _FS_UNAVAIL
else:
return ctx.networks[nic.network].name
def _GetInstNicNetwork(ctx, _, nic): # pylint: disable=W0613
"""Get a NIC's Network.
@type ctx: L{InstanceQueryData}
@type nic: L{objects.NIC}
@param nic: NIC object
"""
if nic.network is None:
return _FS_UNAVAIL
else:
return nic.network
def _GetInstNicIp(ctx, _, nic): # pylint: disable=W0613
"""Get a NIC's IP address.
@type ctx: L{InstanceQueryData}
@type nic: L{objects.NIC}
@param nic: NIC object
"""
if nic.ip is None:
return _FS_UNAVAIL
else:
return nic.ip
def _GetInstNicBridge(ctx, index, _):
"""Get a NIC's bridge.
@type ctx: L{InstanceQueryData}
@type index: int
@param index: NIC index
"""
assert len(ctx.inst_nicparams) >= index
nicparams = ctx.inst_nicparams[index]
if nicparams[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
return nicparams[constants.NIC_LINK]
else:
return _FS_UNAVAIL
def _GetInstNicVLan(ctx, index, _):
"""Get a NIC's VLAN.
@type ctx: L{InstanceQueryData}
@type index: int
@param index: NIC index
"""
assert len(ctx.inst_nicparams) >= index
nicparams = ctx.inst_nicparams[index]
if nicparams[constants.NIC_MODE] == constants.NIC_MODE_OVS:
return nicparams[constants.NIC_VLAN]
else:
return _FS_UNAVAIL
def _GetInstAllNicNetworkNames(ctx, inst):
"""Get all network names for an instance.
@type ctx: L{InstanceQueryData}
@type inst: L{objects.Instance}
@param inst: Instance object
"""
result = []
for nic in inst.nics:
name = None
if nic.network:
name = ctx.networks[nic.network].name
result.append(name)
assert len(result) == len(inst.nics)
return result
def _GetInstAllNicBridges(ctx, inst):
"""Get all network bridges for an instance.
@type ctx: L{InstanceQueryData}
@type inst: L{objects.Instance}
@param inst: Instance object
"""
assert len(ctx.inst_nicparams) == len(inst.nics)
result = []
for nicp in ctx.inst_nicparams:
if nicp[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
result.append(nicp[constants.NIC_LINK])
else:
result.append(None)
assert len(result) == len(inst.nics)
return result
def _GetInstAllNicVlans(ctx, inst):
"""Get all network VLANs of an instance.
@type ctx: L{InstanceQueryData}
@type inst: L{objects.Instance}
@param inst: Instance object
"""
assert len(ctx.inst_nicparams) == len(inst.nics)
result = []
for nicp in ctx.inst_nicparams:
if nicp[constants.NIC_MODE] == constants.NIC_MODE_OVS:
result.append(nicp[constants.NIC_VLAN])
else:
result.append(None)
assert len(result) == len(inst.nics)
return result
def _GetInstNicParam(name):
"""Build function for retrieving a NIC parameter.
@type name: string
@param name: Parameter name
"""
def fn(ctx, index, _):
"""Get a NIC's bridge.
@type ctx: L{InstanceQueryData}
@type inst: L{objects.Instance}
@param inst: Instance object
@type nic: L{objects.NIC}
@param nic: NIC object
"""
assert len(ctx.inst_nicparams) >= index
return ctx.inst_nicparams[index][name]
return fn
def _GetInstanceNetworkFields():
"""Get instance fields involving network interfaces.
@return: Tuple containing list of field definitions used as input for
L{_PrepareFieldList} and a list of aliases
"""
nic_mac_fn = lambda ctx, _, nic: nic.mac
nic_mode_fn = _GetInstNicParam(constants.NIC_MODE)
nic_link_fn = _GetInstNicParam(constants.NIC_LINK)
fields = [
# All NICs
(_MakeField("nic.count", "NICs", QFT_NUMBER,
"Number of network interfaces"),
IQ_CONFIG, 0, lambda ctx, inst: len(inst.nics)),
(_MakeField("nic.macs", "NIC_MACs", QFT_OTHER,
"List containing each network interface's MAC address"),
IQ_CONFIG, 0, lambda ctx, inst: [nic.mac for nic in inst.nics]),
(_MakeField("nic.ips", "NIC_IPs", QFT_OTHER,
"List containing each network interface's IP address"),
IQ_CONFIG, 0, lambda ctx, inst: [nic.ip for nic in inst.nics]),
(_MakeField("nic.names", "NIC_Names", QFT_OTHER,
"List containing each network interface's name"),
IQ_CONFIG, 0, lambda ctx, inst: [nic.name for nic in inst.nics]),
(_MakeField("nic.uuids", "NIC_UUIDs", QFT_OTHER,
"List containing each network interface's UUID"),
IQ_CONFIG, 0, lambda ctx, inst: [nic.uuid for nic in inst.nics]),
(_MakeField("nic.modes", "NIC_modes", QFT_OTHER,
"List containing each network interface's mode"), IQ_CONFIG, 0,
lambda ctx, inst: [nicp[constants.NIC_MODE]
for nicp in ctx.inst_nicparams]),
(_MakeField("nic.links", "NIC_links", QFT_OTHER,
"List containing each network interface's link"), IQ_CONFIG, 0,
lambda ctx, inst: [nicp[constants.NIC_LINK]
for nicp in ctx.inst_nicparams]),
(_MakeField("nic.vlans", "NIC_VLANs", QFT_OTHER,
"List containing each network interface's VLAN"),
IQ_CONFIG, 0, _GetInstAllNicVlans),
(_MakeField("nic.bridges", "NIC_bridges", QFT_OTHER,
"List containing each network interface's bridge"),
IQ_CONFIG, 0, _GetInstAllNicBridges),
(_MakeField("nic.networks", "NIC_networks", QFT_OTHER,
"List containing each interface's network"), IQ_CONFIG, 0,
lambda ctx, inst: [nic.network for nic in inst.nics]),
(_MakeField("nic.networks.names", "NIC_networks_names", QFT_OTHER,
"List containing each interface's network"),
IQ_NETWORKS, 0, _GetInstAllNicNetworkNames)
]
# NICs by number
for i in range(constants.MAX_NICS):
numtext = utils.FormatOrdinal(i + 1)
fields.extend([
(_MakeField("nic.ip/%s" % i, "NicIP/%s" % i, QFT_TEXT,
"IP address of %s network interface" % numtext),
IQ_CONFIG, 0, _GetInstNic(i, _GetInstNicIp)),
(_MakeField("nic.mac/%s" % i, "NicMAC/%s" % i, QFT_TEXT,
"MAC address of %s network interface" % numtext),
IQ_CONFIG, 0, _GetInstNic(i, nic_mac_fn)),
(_MakeField("nic.name/%s" % i, "NicName/%s" % i, QFT_TEXT,
"Name address of %s network interface" % numtext),
IQ_CONFIG, 0, _GetInstNic(i, _GetInstDeviceName)),
(_MakeField("nic.uuid/%s" % i, "NicUUID/%s" % i, QFT_TEXT,
"UUID address of %s network interface" % numtext),
IQ_CONFIG, 0, _GetInstNic(i, _GetInstDeviceUUID)),
(_MakeField("nic.mode/%s" % i, "NicMode/%s" % i, QFT_TEXT,
"Mode of %s network interface" % numtext),
IQ_CONFIG, 0, _GetInstNic(i, nic_mode_fn)),
(_MakeField("nic.link/%s" % i, "NicLink/%s" % i, QFT_TEXT,
"Link of %s network interface" % numtext),
IQ_CONFIG, 0, _GetInstNic(i, nic_link_fn)),
(_MakeField("nic.bridge/%s" % i, "NicBridge/%s" % i, QFT_TEXT,
"Bridge of %s network interface" % numtext),
IQ_CONFIG, 0, _GetInstNic(i, _GetInstNicBridge)),
(_MakeField("nic.vlan/%s" % i, "NicVLAN/%s" % i, QFT_TEXT,
"VLAN of %s network interface" % numtext),
IQ_CONFIG, 0, _GetInstNic(i, _GetInstNicVLan)),
(_MakeField("nic.network/%s" % i, "NicNetwork/%s" % i, QFT_TEXT,
"Network of %s network interface" % numtext),
IQ_CONFIG, 0, _GetInstNic(i, _GetInstNicNetwork)),
(_MakeField("nic.network.name/%s" % i, "NicNetworkName/%s" % i, QFT_TEXT,
"Network name of %s network interface" % numtext),
IQ_NETWORKS, 0, _GetInstNic(i, _GetInstNicNetworkName)),
])
aliases = [
# Legacy fields for first NIC
("ip", "nic.ip/0"),
("mac", "nic.mac/0"),
("bridge", "nic.bridge/0"),
("nic_mode", "nic.mode/0"),
("nic_link", "nic.link/0"),
("nic_network", "nic.network/0"),
]
return (fields, aliases)
def _GetInstDiskUsage(ctx, inst):
"""Get disk usage for an instance.
@type ctx: L{InstanceQueryData}
@type inst: L{objects.Instance}
@param inst: Instance object
"""
usage = ctx.disk_usage[inst.uuid]
if usage is None:
usage = 0
return usage
def _GetInstanceConsole(ctx, inst):
"""Get console information for instance.
@type ctx: L{InstanceQueryData}
@type inst: L{objects.Instance}
@param inst: Instance object
"""
consinfo = ctx.console[inst.uuid]
if consinfo is None:
return _FS_UNAVAIL
return consinfo
def _GetInstanceDiskFields():
"""Get instance fields involving disks.
@return: List of field definitions used as input for L{_PrepareFieldList}
"""
fields = [
(_MakeField("disk_usage", "DiskUsage", QFT_UNIT,
"Total disk space used by instance on each of its nodes;"
" this is not the disk size visible to the instance, but"
" the usage on the node"),
IQ_DISKUSAGE, 0, _GetInstDiskUsage),
(_MakeField("disk.count", "Disks", QFT_NUMBER, "Number of disks"),
IQ_CONFIG, 0, lambda ctx, inst: len(inst.disks)),
(_MakeField("disk.sizes", "Disk_sizes", QFT_OTHER, "List of disk sizes"),
IQ_CONFIG, 0, lambda ctx, inst: [disk.size for disk in inst.disks]),
(_MakeField("disk.spindles", "Disk_spindles", QFT_OTHER,
"List of disk spindles"),
IQ_CONFIG, 0, lambda ctx, inst: [disk.spindles for disk in inst.disks]),
(_MakeField("disk.names", "Disk_names", QFT_OTHER, "List of disk names"),
IQ_CONFIG, 0, lambda ctx, inst: [disk.name for disk in inst.disks]),
(_MakeField("disk.uuids", "Disk_UUIDs", QFT_OTHER, "List of disk UUIDs"),
IQ_CONFIG, 0, lambda ctx, inst: [disk.uuid for disk in inst.disks]),
]
# Disks by number
for i in range(constants.MAX_DISKS):
numtext = utils.FormatOrdinal(i + 1)
fields.extend([
(_MakeField("disk.size/%s" % i, "Disk/%s" % i, QFT_UNIT,
"Disk size of %s disk" % numtext),
IQ_CONFIG, 0, _GetInstDisk(i, _GetInstDiskSize)),
(_MakeField("disk.spindles/%s" % i, "DiskSpindles/%s" % i, QFT_NUMBER,
"Spindles of %s disk" % numtext),
IQ_CONFIG, 0, _GetInstDisk(i, _GetInstDiskSpindles)),
(_MakeField("disk.name/%s" % i, "DiskName/%s" % i, QFT_TEXT,
"Name of %s disk" % numtext),
IQ_CONFIG, 0, _GetInstDisk(i, _GetInstDeviceName)),
(_MakeField("disk.uuid/%s" % i, "DiskUUID/%s" % i, QFT_TEXT,
"UUID of %s disk" % numtext),
IQ_CONFIG, 0, _GetInstDisk(i, _GetInstDeviceUUID))])
return fields
def _GetInstanceParameterFields():
"""Get instance fields involving parameters.
@return: List of field definitions used as input for L{_PrepareFieldList}
"""
fields = [
# Filled parameters
(_MakeField("hvparams", "HypervisorParameters", QFT_OTHER,
"Hypervisor parameters (merged)"),
IQ_CONFIG, 0, lambda ctx, _: ctx.inst_hvparams),
(_MakeField("beparams", "BackendParameters", QFT_OTHER,
"Backend parameters (merged)"),
IQ_CONFIG, 0, lambda ctx, _: ctx.inst_beparams),
(_MakeField("osparams", "OpSysParameters", QFT_OTHER,
"Operating system parameters (merged)"),
IQ_CONFIG, 0, lambda ctx, _: ctx.inst_osparams),
# Unfilled parameters
(_MakeField("custom_hvparams", "CustomHypervisorParameters", QFT_OTHER,
"Custom hypervisor parameters"),
IQ_CONFIG, 0, _GetItemAttr("hvparams")),
(_MakeField("custom_beparams", "CustomBackendParameters", QFT_OTHER,
"Custom backend parameters",),
IQ_CONFIG, 0, _GetItemAttr("beparams")),
(_MakeField("custom_osparams", "CustomOpSysParameters", QFT_OTHER,
"Custom operating system parameters",),
IQ_CONFIG, 0, _GetItemAttr("osparams")),
(_MakeField("custom_nicparams", "CustomNicParameters", QFT_OTHER,
"Custom network interface parameters"),
IQ_CONFIG, 0, lambda ctx, inst: [nic.nicparams for nic in inst.nics]),
]
# HV params
def _GetInstHvParam(name):
return lambda ctx, _: ctx.inst_hvparams.get(name, _FS_UNAVAIL)
fields.extend([
(_MakeField("hv/%s" % name,
constants.HVS_PARAMETER_TITLES.get(name, "hv/%s" % name),
_VTToQFT[kind], "The \"%s\" hypervisor parameter" % name),
IQ_CONFIG, 0, | |
used by other tools.
print(','.join(
toolchain.FilterToolchains(toolchains, 'default', True).keys() +
toolchain.FilterToolchains(toolchains, 'default', False).keys()))
def GeneratePathWrapper(root, wrappath, path):
"""Generate a shell script to execute another shell script
Since we can't symlink a wrapped ELF (see GenerateLdsoWrapper) because the
argv[0] won't be pointing to the correct path, generate a shell script that
just executes another program with its full path.
Args:
root: The root tree to generate scripts inside of
wrappath: The full path (inside |root|) to create the wrapper
path: The target program which this wrapper will execute
"""
replacements = {
'path': path,
'relroot': os.path.relpath('/', os.path.dirname(wrappath)),
}
wrapper = """#!/bin/sh
base=$(realpath "$0")
basedir=${base%%/*}
exec "${basedir}/%(relroot)s%(path)s" "$@"
""" % replacements
root_wrapper = root + wrappath
if os.path.islink(root_wrapper):
os.unlink(root_wrapper)
else:
osutils.SafeMakedirs(os.path.dirname(root_wrapper))
osutils.WriteFile(root_wrapper, wrapper)
os.chmod(root_wrapper, 0o755)
def FixClangXXWrapper(root, path):
"""Fix wrapper shell scripts and symlinks for invoking clang++
In a typical installation, clang++ symlinks to clang, which symlinks to the
elf executable. The executable distinguishes between clang and clang++ based
on argv[0].
When invoked through the LdsoWrapper, argv[0] always contains the path to the
executable elf file, making clang/clang++ invocations indistinguishable.
This function detects if the elf executable being wrapped is clang-X.Y, and
fixes wrappers/symlinks as necessary so that clang++ will work correctly.
The calling sequence now becomes:
-) clang++ invocation turns into clang++-3.9 (which is a copy of clang-3.9,
the Ldsowrapper).
-) clang++-3.9 uses the Ldso to invoke clang++-3.9.elf, which is a symlink
to the original clang-3.9 elf.
-) The difference this time is that inside the elf file execution, $0 is
set as .../usr/bin/clang++-3.9.elf, which contains 'clang++' in the name.
Args:
root: The root tree to generate scripts / symlinks inside of
path: The target elf for which LdsoWrapper was created
"""
if re.match(r'/usr/bin/clang-\d+\.\d+$', path):
logging.info('fixing clang++ invocation for %s', path)
clangdir = os.path.dirname(root + path)
clang = os.path.basename(path)
clangxx = clang.replace('clang', 'clang++')
# Create a symlink clang++-X.Y.elf to point to clang-X.Y.elf
os.symlink(clang + '.elf', os.path.join(clangdir, clangxx + '.elf'))
# Create a hardlink clang++-X.Y pointing to clang-X.Y
os.link(os.path.join(clangdir, clang), os.path.join(clangdir, clangxx))
# Adjust the clang++ symlink to point to clang++-X.Y
os.unlink(os.path.join(clangdir, 'clang++'))
os.symlink(clangxx, os.path.join(clangdir, 'clang++'))
def FileIsCrosSdkElf(elf):
"""Determine if |elf| is an ELF that we execute in the cros_sdk
We don't need this to be perfect, just quick. It makes sure the ELF
is a 64bit LSB x86_64 ELF. That is the native type of cros_sdk.
Args:
elf: The file to check
Returns:
True if we think |elf| is a native ELF
"""
with open(elf) as f:
data = f.read(20)
# Check the magic number, EI_CLASS, EI_DATA, and e_machine.
return (data[0:4] == '\x7fELF' and
data[4] == '\x02' and
data[5] == '\x01' and
data[18] == '\x3e')
def IsPathPackagable(ptype, path):
"""Should the specified file be included in a toolchain package?
We only need to handle files as we'll create dirs as we need them.
Further, trim files that won't be useful:
- non-english translations (.mo) since it'd require env vars
- debug files since these are for the host compiler itself
- info/man pages as they're big, and docs are online, and the
native docs should work fine for the most part (`man gcc`)
Args:
ptype: A string describing the path type (i.e. 'file' or 'dir' or 'sym')
path: The full path to inspect
Returns:
True if we want to include this path in the package
"""
return not (ptype in ('dir',) or
path.startswith('/usr/lib/debug/') or
os.path.splitext(path)[1] == '.mo' or
('/man/' in path or '/info/' in path))
def ReadlinkRoot(path, root):
"""Like os.readlink(), but relative to a |root|
Args:
path: The symlink to read
root: The path to use for resolving absolute symlinks
Returns:
A fully resolved symlink path
"""
while os.path.islink(root + path):
path = os.path.join(os.path.dirname(path), os.readlink(root + path))
return path
def _GetFilesForTarget(target, root='/'):
"""Locate all the files to package for |target|
This does not cover ELF dependencies.
Args:
target: The toolchain target name
root: The root path to pull all packages from
Returns:
A tuple of a set of all packable paths, and a set of all paths which
are also native ELFs
"""
paths = set()
elfs = set()
# Find all the files owned by the packages for this target.
for pkg in GetTargetPackages(target):
# Ignore packages that are part of the target sysroot.
if pkg in ('kernel', 'libc'):
continue
# Skip Go compiler from redistributable packages.
# The "go" executable has GOROOT=/usr/lib/go/${CTARGET} hardcoded
# into it. Due to this, the toolchain cannot be unpacked anywhere
# else and be readily useful. To enable packaging Go, we need to:
# -) Tweak the wrappers/environment to override GOROOT
# automatically based on the unpack location.
# -) Make sure the ELF dependency checking and wrapping logic
# below skips the Go toolchain executables and libraries.
# -) Make sure the packaging process maintains the relative
# timestamps of precompiled standard library packages.
# (see dev-lang/go ebuild for details).
if pkg == 'ex_go':
continue
atom = GetPortagePackage(target, pkg)
cat, pn = atom.split('/')
ver = GetInstalledPackageVersions(atom, root=root)[0]
logging.info('packaging %s-%s', atom, ver)
# pylint: disable=E1101
dblink = portage.dblink(cat, pn + '-' + ver, myroot=root,
settings=portage.settings)
contents = dblink.getcontents()
for obj in contents:
ptype = contents[obj][0]
if not IsPathPackagable(ptype, obj):
continue
if ptype == 'obj':
# For native ELFs, we need to pull in their dependencies too.
if FileIsCrosSdkElf(obj):
elfs.add(obj)
paths.add(obj)
return paths, elfs
def _BuildInitialPackageRoot(output_dir, paths, elfs, ldpaths,
path_rewrite_func=lambda x: x, root='/'):
"""Link in all packable files and their runtime dependencies
This also wraps up executable ELFs with helper scripts.
Args:
output_dir: The output directory to store files
paths: All the files to include
elfs: All the files which are ELFs (a subset of |paths|)
ldpaths: A dict of static ldpath information
path_rewrite_func: User callback to rewrite paths in output_dir
root: The root path to pull all packages/files from
"""
# Link in all the files.
sym_paths = []
for path in paths:
new_path = path_rewrite_func(path)
dst = output_dir + new_path
osutils.SafeMakedirs(os.path.dirname(dst))
# Is this a symlink which we have to rewrite or wrap?
# Delay wrap check until after we have created all paths.
src = root + path
if os.path.islink(src):
tgt = os.readlink(src)
if os.path.sep in tgt:
sym_paths.append((new_path, lddtree.normpath(ReadlinkRoot(src, root))))
# Rewrite absolute links to relative and then generate the symlink
# ourselves. All other symlinks can be hardlinked below.
if tgt[0] == '/':
tgt = os.path.relpath(tgt, os.path.dirname(new_path))
os.symlink(tgt, dst)
continue
os.link(src, dst)
# Now see if any of the symlinks need to be wrapped.
for sym, tgt in sym_paths:
if tgt in elfs:
GeneratePathWrapper(output_dir, sym, tgt)
# Locate all the dependencies for all the ELFs. Stick them all in the
# top level "lib" dir to make the wrapper simpler. This exact path does
# not matter since we execute ldso directly, and we tell the ldso the
# exact path to search for its libraries.
libdir = os.path.join(output_dir, 'lib')
osutils.SafeMakedirs(libdir)
donelibs = set()
for elf in elfs:
e = lddtree.ParseELF(elf, root=root, ldpaths=ldpaths)
interp = e['interp']
if interp:
# Generate a wrapper if it is executable.
interp = os.path.join('/lib', os.path.basename(interp))
lddtree.GenerateLdsoWrapper(output_dir, path_rewrite_func(elf), interp,
libpaths=e['rpath'] + e['runpath'])
FixClangXXWrapper(output_dir, path_rewrite_func(elf))
for lib, lib_data in e['libs'].iteritems():
if lib in donelibs:
continue
src = path = lib_data['path']
if path is None:
logging.warning('%s: could not locate %s', elf, lib)
continue
donelibs.add(lib)
# Needed libs are the SONAME, but that is usually a symlink, not a
# real file. So link in the target rather than the symlink itself.
# We have to walk all the possible symlinks (SONAME could point to a
# symlink which points to a symlink), and we have to handle absolute
# ourselves (since we have a "root" argument).
dst = os.path.join(libdir, os.path.basename(path))
src = ReadlinkRoot(src, root)
os.link(root + src, dst)
def _EnvdGetVar(envd, var):
"""Given a Gentoo env.d file, extract a var from it
Args:
envd: The env.d file to load (may be a glob path)
var: The var to extract
Returns:
The value of |var|
| |
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and / or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110 - 1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
bl_info = {
"name": "Simple Curve",
"author": "<NAME> (cwolf3d)",
"version": (1, 6, 1),
"blender": (2, 80, 0),
"location": "View3D > Add > Curve",
"description": "Adds Simple Curve",
"warning": "",
"wiki_url": "https://wiki.blender.org/index.php/Extensions:2.6/"
"Py/Scripts/Curve/Simple_curves",
"category": "Add Curve"}
# ------------------------------------------------------------
import bpy
from bpy_extras import object_utils
from bpy.types import (
Operator,
Menu,
Panel,
PropertyGroup,
)
from bpy.props import (
BoolProperty,
EnumProperty,
FloatProperty,
FloatVectorProperty,
IntProperty,
StringProperty,
PointerProperty,
)
from mathutils import (
Vector,
Matrix,
)
from math import (
sin, asin, sqrt,
acos, cos, pi,
radians, tan,
hypot,
)
# from bpy_extras.object_utils import *
# ------------------------------------------------------------
# Point:
def SimplePoint():
newpoints = []
newpoints.append([0.0, 0.0, 0.0])
return newpoints
# ------------------------------------------------------------
# Line:
def SimpleLine(c1=[0.0, 0.0, 0.0], c2=[2.0, 2.0, 2.0]):
newpoints = []
c3 = Vector(c2) - Vector(c1)
newpoints.append([0.0, 0.0, 0.0])
newpoints.append([c3[0], c3[1], c3[2]])
return newpoints
# ------------------------------------------------------------
# Angle:
def SimpleAngle(length=1.0, angle=45.0):
newpoints = []
angle = radians(angle)
newpoints.append([length, 0.0, 0.0])
newpoints.append([0.0, 0.0, 0.0])
newpoints.append([length * cos(angle), length * sin(angle), 0.0])
return newpoints
# ------------------------------------------------------------
# Distance:
def SimpleDistance(length=1.0, center=True):
newpoints = []
if center:
newpoints.append([-length / 2, 0.0, 0.0])
newpoints.append([length / 2, 0.0, 0.0])
else:
newpoints.append([0.0, 0.0, 0.0])
newpoints.append([length, 0.0, 0.0])
return newpoints
# ------------------------------------------------------------
# Circle:
def SimpleCircle(sides=4, radius=1.0):
newpoints = []
angle = radians(360) / sides
newpoints.append([radius, 0, 0])
if radius != 0 :
j = 1
while j < sides:
t = angle * j
x = cos(t) * radius
y = sin(t) * radius
newpoints.append([x, y, 0])
j += 1
return newpoints
# ------------------------------------------------------------
# Ellipse:
def SimpleEllipse(a=2.0, b=1.0):
newpoints = []
newpoints.append([a, 0.0, 0.0])
newpoints.append([0.0, b, 0.0])
newpoints.append([-a, 0.0, 0.0])
newpoints.append([0.0, -b, 0.0])
return newpoints
# ------------------------------------------------------------
# Arc:
def SimpleArc(sides=0, radius=1.0, startangle=0.0, endangle=45.0):
newpoints = []
startangle = radians(startangle)
endangle = radians(endangle)
sides += 1
angle = (endangle - startangle) / sides
x = cos(startangle) * radius
y = sin(startangle) * radius
newpoints.append([x, y, 0])
j = 1
while j < sides:
t = angle * j
x = cos(t + startangle) * radius
y = sin(t + startangle) * radius
newpoints.append([x, y, 0])
j += 1
x = cos(endangle) * radius
y = sin(endangle) * radius
newpoints.append([x, y, 0])
return newpoints
# ------------------------------------------------------------
# Sector:
def SimpleSector(sides=0, radius=1.0, startangle=0.0, endangle=45.0):
newpoints = []
startangle = radians(startangle)
endangle = radians(endangle)
sides += 1
newpoints.append([0, 0, 0])
angle = (endangle - startangle) / sides
x = cos(startangle) * radius
y = sin(startangle) * radius
newpoints.append([x, y, 0])
j = 1
while j < sides:
t = angle * j
x = cos(t + startangle) * radius
y = sin(t + startangle) * radius
newpoints.append([x, y, 0])
j += 1
x = cos(endangle) * radius
y = sin(endangle) * radius
newpoints.append([x, y, 0])
return newpoints
# ------------------------------------------------------------
# Segment:
def SimpleSegment(sides=0, a=2.0, b=1.0, startangle=0.0, endangle=45.0):
newpoints = []
startangle = radians(startangle)
endangle = radians(endangle)
sides += 1
angle = (endangle - startangle) / sides
x = cos(startangle) * a
y = sin(startangle) * a
newpoints.append([x, y, 0])
j = 1
while j < sides:
t = angle * j
x = cos(t + startangle) * a
y = sin(t + startangle) * a
newpoints.append([x, y, 0])
j += 1
x = cos(endangle) * a
y = sin(endangle) * a
newpoints.append([x, y, 0])
x = cos(endangle) * b
y = sin(endangle) * b
newpoints.append([x, y, 0])
j = sides - 1
while j > 0:
t = angle * j
x = cos(t + startangle) * b
y = sin(t + startangle) * b
newpoints.append([x, y, 0])
j -= 1
x = cos(startangle) * b
y = sin(startangle) * b
newpoints.append([x, y, 0])
return newpoints
# ------------------------------------------------------------
# Rectangle:
def SimpleRectangle(width=2.0, length=2.0, rounded=0.0, center=True):
newpoints = []
r = rounded / 2
if center:
x = width / 2
y = length / 2
if rounded != 0.0:
newpoints.append([-x + r, y, 0.0])
newpoints.append([x - r, y, 0.0])
newpoints.append([x, y - r, 0.0])
newpoints.append([x, -y + r, 0.0])
newpoints.append([x - r, -y, 0.0])
newpoints.append([-x + r, -y, 0.0])
newpoints.append([-x, -y + r, 0.0])
newpoints.append([-x, y - r, 0.0])
else:
newpoints.append([-x, y, 0.0])
newpoints.append([x, y, 0.0])
newpoints.append([x, -y, 0.0])
newpoints.append([-x, -y, 0.0])
else:
x = width
y = length
if rounded != 0.0:
newpoints.append([r, y, 0.0])
newpoints.append([x - r, y, 0.0])
newpoints.append([x, y - r, 0.0])
newpoints.append([x, r, 0.0])
newpoints.append([x - r, 0.0, 0.0])
newpoints.append([r, 0.0, 0.0])
newpoints.append([0.0, r, 0.0])
newpoints.append([0.0, y - r, 0.0])
else:
newpoints.append([0.0, 0.0, 0.0])
newpoints.append([0.0, y, 0.0])
newpoints.append([x, y, 0.0])
newpoints.append([x, 0.0, 0.0])
return newpoints
# ------------------------------------------------------------
# Rhomb:
def SimpleRhomb(width=2.0, length=2.0, center=True):
newpoints = []
x = width / 2
y = length / 2
if center:
newpoints.append([-x, 0.0, 0.0])
newpoints.append([0.0, y, 0.0])
newpoints.append([x, 0.0, 0.0])
newpoints.append([0.0, -y, 0.0])
else:
newpoints.append([x, 0.0, 0.0])
newpoints.append([0.0, y, 0.0])
newpoints.append([x, length, 0.0])
newpoints.append([width, y, 0.0])
return newpoints
# ------------------------------------------------------------
# Polygon:
def SimplePolygon(sides=3, radius=1.0):
newpoints = []
angle = radians(360.0) / sides
j = 0
while j < sides:
t = angle * j
x = sin(t) * radius
y = cos(t) * radius
newpoints.append([x, y, 0.0])
j += 1
return newpoints
# ------------------------------------------------------------
# Polygon_ab:
def SimplePolygon_ab(sides=3, a=2.0, b=1.0):
newpoints = []
angle = radians(360.0) / sides
j = 0
while j < sides:
t = angle * j
x = sin(t) * a
y = cos(t) * b
newpoints.append([x, y, 0.0])
j += 1
return newpoints
# ------------------------------------------------------------
# Trapezoid:
def SimpleTrapezoid(a=2.0, b=1.0, h=1.0, center=True):
newpoints = []
x = a / 2
y = b / 2
r = h / 2
if center:
newpoints.append([-x, -r, 0.0])
newpoints.append([-y, r, 0.0])
newpoints.append([y, r, 0.0])
newpoints.append([x, -r, 0.0])
else:
newpoints.append([0.0, 0.0, 0.0])
newpoints.append([x - y, h, 0.0])
newpoints.append([x + y, h, 0.0])
newpoints.append([a, 0.0, 0.0])
return newpoints
# ------------------------------------------------------------
# get array of vertcoordinates according to splinetype
def vertsToPoints(Verts, splineType):
# main vars
vertArray = []
# array for BEZIER spline output (V3)
if splineType == 'BEZIER':
for v in Verts:
vertArray += v
# array for nonBEZIER output (V4)
else:
for v in Verts:
vertArray += v
if splineType == 'NURBS':
# for nurbs w=1
vertArray.append(1)
else:
# for poly w=0
vertArray.append(0)
return vertArray
# ------------------------------------------------------------
# Main Function
def main(context, self, use_enter_edit_mode):
# output splineType 'POLY' 'NURBS' 'BEZIER'
splineType = self.outputType
sides = abs(int((self.Simple_endangle - self.Simple_startangle) / 90))
# get verts
if self.Simple_Type == 'Point':
verts = SimplePoint()
if self.Simple_Type == 'Line':
verts = SimpleLine(self.location, self.Simple_endlocation)
if self.Simple_Type == 'Distance':
verts = SimpleDistance(self.Simple_length, self.Simple_center)
if self.Simple_Type == 'Angle':
verts = SimpleAngle(self.Simple_length, self.Simple_angle)
if self.Simple_Type == 'Circle':
if self.Simple_sides < 4:
self.Simple_sides = 4
if self.Simple_radius == 0:
return {'FINISHED'}
verts = SimpleCircle(self.Simple_sides, self.Simple_radius)
if self.Simple_Type == 'Ellipse':
verts = SimpleEllipse(self.Simple_a, self.Simple_b)
if self.Simple_Type == 'Arc':
if self.Simple_sides < sides:
self.Simple_sides = sides
if self.Simple_radius == 0:
return {'FINISHED'}
verts = SimpleArc(
self.Simple_sides, self.Simple_radius,
self.Simple_startangle, self.Simple_endangle
)
if self.Simple_Type == 'Sector':
if self.Simple_sides < sides:
self.Simple_sides = sides
if self.Simple_radius == 0:
return {'FINISHED'}
verts = SimpleSector(
self.Simple_sides, self.Simple_radius,
self.Simple_startangle, self.Simple_endangle
)
if self.Simple_Type == 'Segment':
if self.Simple_sides < sides:
self.Simple_sides = sides
if self.Simple_a == 0 or self.Simple_b == 0 or self.Simple_a == self.Simple_b:
return {'FINISHED'}
if self.Simple_a > self.Simple_b:
verts = SimpleSegment(
self.Simple_sides, self.Simple_a, self.Simple_b,
self.Simple_startangle, self.Simple_endangle
)
if self.Simple_a < self.Simple_b:
verts = SimpleSegment(
self.Simple_sides, self.Simple_b, self.Simple_a,
self.Simple_startangle, self.Simple_endangle
)
if self.Simple_Type == 'Rectangle':
verts = SimpleRectangle(
self.Simple_width, self.Simple_length,
| |
<gh_stars>10-100
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
import math
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
class idex (Exchange):
def describe(self):
return self.deep_extend(super(idex, self).describe(), {
'id': 'idex',
'name': 'IDEX',
'countries': ['US'],
'rateLimit': 1500,
'certified': True,
'requiresWeb3': True,
'has': {
'fetchOrderBook': True,
'fetchTicker': True,
'fetchTickers': True,
'fetchMarkets': True,
'fetchBalance': True,
'createOrder': True,
'cancelOrder': True,
'fetchTransactions': True,
'fetchTrades': False,
'fetchMyTrades': True,
'withdraw': True,
'fetchOHLCV': False,
},
'timeframes': {
'1m': 'M1',
'3m': 'M3',
'5m': 'M5',
'15m': 'M15',
'30m': 'M30', # default
'1h': 'H1',
'4h': 'H4',
'1d': 'D1',
'1w': 'D7',
'1M': '1M',
},
'urls': {
'test': 'https://api.idex.market',
'logo': 'https://user-images.githubusercontent.com/1294454/63693236-3415e380-c81c-11e9-8600-ba1634f1407d.jpg',
'api': 'https://api.idex.market',
'www': 'https://idex.market',
'doc': [
'https://github.com/AuroraDAO/idex-api-docs',
],
},
'api': {
'public': {
'post': [
'returnTicker',
'returnCurrenciesWithPairs', # undocumented
'returnCurrencies',
'return24Volume',
'returnBalances',
'returnCompleteBalances', # shows amount in orders as well as total
'returnDepositsWithdrawals',
'returnOpenOrders',
'returnOrderBook',
'returnOrderStatus',
'returnOrderTrades',
'returnTradeHistory',
'returnTradeHistoryMeta', # not documented
'returnContractAddress',
'returnNextNonce',
],
},
'private': {
'post': [
'order',
'cancel',
'trade',
'withdraw',
],
},
},
'options': {
'contractAddress': None, # 0x2a0c0DBEcC7E4D658f48E01e3fA353F44050c208
'orderNonce': None,
},
'exceptions': {
'Invalid order signature. Please try again.': AuthenticationError,
'You have insufficient funds to match self order. If you believe self is a mistake please refresh and try again.': InsufficientFunds,
'Order no longer available.': InvalidOrder,
},
'requiredCredentials': {
'walletAddress': True,
'privateKey': True,
'apiKey': False,
'secret': False,
},
})
async def fetch_markets(self, params={}):
# idex does not have an endpoint for markets
# instead we generate the markets from the endpoint for currencies
request = {
'includeDelisted': True,
}
markets = await self.publicPostReturnCurrenciesWithPairs(self.extend(request, params))
currenciesById = {}
currencies = markets['tokens']
for i in range(0, len(currencies)):
currency = currencies[i]
currenciesById[currency['symbol']] = currency
result = []
limits = {
'amount': {
'min': None,
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': None,
'max': None,
},
}
quotes = markets['pairs']
keys = list(quotes.keys())
for i in range(0, len(keys)):
quoteId = keys[i]
bases = quotes[quoteId]
quote = self.safe_currency_code(quoteId)
quoteCurrency = currenciesById[quoteId]
for j in range(0, len(bases)):
baseId = bases[j]
id = quoteId + '_' + baseId
base = self.safe_currency_code(baseId)
symbol = base + '/' + quote
baseCurrency = currenciesById[baseId]
baseAddress = baseCurrency['address']
quoteAddress = quoteCurrency['address']
precision = {
'price': self.safe_integer(quoteCurrency, 'decimals'),
'amount': self.safe_integer(baseCurrency, 'decimals'),
}
result.append({
'symbol': symbol,
'precision': precision,
'base': base,
'quote': quote,
'baseId': baseAddress,
'quoteId': quoteAddress,
'limits': limits,
'id': id,
'info': baseCurrency,
'tierBased': False,
})
return result
def parse_ticker(self, ticker, market=None):
#
# {
# last: '0.0016550916',
# high: 'N/A',
# low: 'N/A',
# lowestAsk: '0.0016743368',
# highestBid: '0.001163726270773897',
# percentChange: '0',
# baseVolume: '0',
# quoteVolume: '0'
# }
#
symbol = None
if market:
symbol = market['symbol']
baseVolume = self.safe_float(ticker, 'baseVolume')
quoteVolume = self.safe_float(ticker, 'quoteVolume')
last = self.safe_float(ticker, 'last')
percentage = self.safe_float(ticker, 'percentChange')
return {
'symbol': symbol,
'timestamp': None,
'datetime': None,
'high': self.safe_float(ticker, 'high'),
'low': self.safe_float(ticker, 'low'),
'bid': self.safe_float(ticker, 'highestBid'),
'bidVolume': None,
'ask': self.safe_float(ticker, 'lowestAsk'),
'askVolume': None,
'vwap': None,
'open': None,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': percentage,
'average': None,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}
async def fetch_tickers(self, symbols=None, params={}):
await self.load_markets()
response = await self.publicPostReturnTicker(params)
# {ETH_BOUNCY:
# {last: '0.000000004000088005',
# high: 'N/A',
# low: 'N/A',
# lowestAsk: '0.00000000599885995',
# highestBid: '0.000000001400500103',
# percentChange: '0',
# baseVolume: '0',
# quoteVolume: '0'},
# ETH_NBAI:
# {last: '0.0000032',
# high: 'N/A',
# low: 'N/A',
# lowestAsk: '0.000004000199999502',
# highestBid: '0.0000016002',
# percentChange: '0',
# baseVolume: '0',
# quoteVolume: '0'},}
ids = list(response.keys())
result = {}
for i in range(0, len(ids)):
id = ids[i]
symbol = None
market = None
if id in self.markets_by_id:
market = self.markets_by_id[id]
symbol = market['symbol']
else:
quoteId, baseId = id.split('_')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
market = {'symbol': symbol}
ticker = response[id]
result[symbol] = self.parse_ticker(ticker, market)
return result
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
}
response = await self.publicPostReturnTicker(self.extend(request, params))
# {last: '0.0016550916',
# high: 'N/A',
# low: 'N/A',
# lowestAsk: '0.0016743368',
# highestBid: '0.001163726270773897',
# percentChange: '0',
# baseVolume: '0',
# quoteVolume: '0'}
return self.parse_ticker(response, market)
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
id = market['quote'] + '_' + market['base']
request = {
'market': id,
'count': 100, # the default will only return one trade
}
if limit is not None:
request['count'] = limit
response = await self.publicPostReturnOrderBook(self.extend(request, params))
#
# {
# "asks": [
# {
# "price": "0.001675282799999",
# "amount": "206.163978911921061732",
# "total": "0.345382967850497906",
# "orderHash": "0xfdf12c124a6a7fa4a8e1866b324da888c8e1b3ad209f5050d3a23df3397a5cb7",
# "params": {
# "tokenBuy": "0x0000000000000000000000000000000000000000",
# "buySymbol": "ETH",
# "buyPrecision": 18,
# "amountBuy": "345382967850497906",
# "tokenSell": "0xb98d4c97425d9908e66e53a6fdf673acca0be986",
# "sellSymbol": "ABT",
# "sellPrecision": 18,
# "amountSell": "206163978911921061732",
# "expires": 10000,
# "nonce": 13489307413,
# "user": "0x9e8ef79316a4a79bbf55a5f9c16b3e068fff65c6"
# }
# }
# ],
# "bids": [
# {
# "price": "0.001161865193232242",
# "amount": "854.393661648355",
# "total": "0.992690256787469029",
# "orderHash": "0x2f2baaf982085e4096f9e23e376214885fa74b2939497968e92222716fc2c86d",
# "params": {
# "tokenBuy": "0xb98d4c97425d9908e66e53a6fdf673acca0be986",
# "buySymbol": "ABT",
# "buyPrecision": 18,
# "amountBuy": "854393661648355000000",
# "tokenSell": "0x0000000000000000000000000000000000000000",
# "sellSymbol": "ETH",
# "sellPrecision": 18,
# "amountSell": "992690256787469029",
# "expires": 10000,
# "nonce": 18155189676,
# "user": "0xb631284dd7b74a846af5b37766ceb1f85d53eca4"
# }
# }
# ]
# }
#
return self.parse_order_book(response, None, 'bids', 'asks', 'price', 'amount')
def parse_bid_ask(self, bidAsk, priceKey=0, amountKey=1):
price = self.safe_float(bidAsk, priceKey)
amount = self.safe_float(bidAsk, amountKey)
info = bidAsk
return [price, amount, info]
async def fetch_balance(self, params={}):
request = {
'address': self.walletAddress,
}
response = await self.publicPostReturnCompleteBalances(self.extend(request, params))
#
# {
# ETH: {available: '0.0167', onOrders: '0.1533'}
# }
#
result = {
'info': response,
}
keys = list(response.keys())
for i in range(0, len(keys)):
currency = keys[i]
balance = response[currency]
code = self.safe_currency_code(currency)
result[code] = {
'free': self.safe_float(balance, 'available'),
'used': self.safe_float(balance, 'onOrders'),
}
return self.parse_balance(result)
async def create_order(self, symbol, type, side, amount, price=None, params={}):
self.check_required_dependencies()
await self.load_markets()
market = self.market(symbol)
if type == 'limit':
expires = 100000
contractAddress = await self.get_contract_address()
tokenBuy = None
tokenSell = None
amountBuy = None
amountSell = None
quoteAmount = float(price) * float(amount)
if side == 'buy':
tokenBuy = market['baseId']
tokenSell = market['quoteId']
amountBuy = self.toWei(amount, 'ether', market['precision']['amount'])
amountSell = self.toWei(quoteAmount, 'ether', 18)
else:
tokenBuy = market['quoteId']
tokenSell = market['baseId']
amountBuy = self.toWei(quoteAmount, 'ether', 18)
amountSell = self.toWei(amount, 'ether', market['precision']['amount'])
nonce = await self.get_nonce()
orderToHash = {
'contractAddress': contractAddress,
'tokenBuy': tokenBuy,
'amountBuy': amountBuy,
'tokenSell': tokenSell,
'amountSell': amountSell,
'expires': expires,
'nonce': nonce,
'address': self.walletAddress,
}
orderHash = self.get_idex_create_order_hash(orderToHash)
signature = self.signMessage(orderHash, self.privateKey)
request = {
'tokenBuy': tokenBuy,
'amountBuy': amountBuy,
'tokenSell': tokenSell,
'amountSell': amountSell,
'address': self.walletAddress,
'nonce': nonce,
'expires': expires,
}
response = await self.privatePostOrder(self.extend(request, signature)) # self.extend(request, params) will cause invalid signature
# {orderNumber: 1562323021,
# orderHash:
# '0x31c42154a8421425a18d076df400d9ec1ef64d5251285384a71ba3c0ab31beb4',
# timestamp: 1564041428,
# price: '0.00073',
# amount: '210',
# total: '0.1533',
# type: 'buy',
# params:
# {tokenBuy: '<KEY>',
# buyPrecision: 18,
# amountBuy: '210000000000000000000',
# tokenSell: '0x0000000000000000000000000000000000000000',
# sellPrecision: 18,
# amountSell: '153300000000000000',
# expires: 100000,
# nonce: 1,
# user: '0x0ab991497116f7f5532a4c2f4f7b1784488628e1'} }
return self.parse_order(response, market)
elif type == 'market':
if not('orderHash' in list(params.keys())):
raise ArgumentsRequired(self.id + ' market order requires an order structure such as that in fetchOrderBook()[\'bids\'][0][2], fetchOrder()[\'info\'], or fetchOpenOrders()[0][\'info\']')
# {price: '0.000132247803328924',
# amount: '19980',
# total: '2.6423111105119',
# orderHash:
# '0x5fb3452b3d13fc013585b51c91c43a0fbe4298c211243763c49437848c274749',
# params:
# {tokenBuy: '0x0000000000000000000000000000000000000000',
# buySymbol: 'ETH',
# buyPrecision: 18,
# amountBuy: '2642311110511900000',
# tokenSell: '<KEY>',
# sellSymbol: 'IDEX',
# sellPrecision: 18,
# amountSell: '19980000000000000000000',
# expires: 10000,
# nonce: 1564656561510,
# user: '0xc3f8304270e49b8e8197bfcfd8567b83d9e4479b'} }
orderToSign = {
'orderHash': params['orderHash'],
'amount': params['params']['amountBuy'],
'address': params['params']['user'],
'nonce': params['params']['nonce'],
}
orderHash = self.get_idex_market_order_hash(orderToSign)
signature = self.signMessage(orderHash, self.privateKey)
signedOrder = self.extend(orderToSign, signature)
signedOrder['address'] = self.walletAddress
signedOrder['nonce'] = await self.get_nonce()
# [{
# "amount": "0.07",
# "date": "2017-10-13 16:25:36",
# "total": "0.49",
# "market": "ETH_DVIP",
# "type": "buy",
# "price": "7",
# "orderHash": "0xcfe4018c59e50e0e1964c979e6213ce5eb8c751cbc98a44251eb48a0985adc52",
# "uuid": "250d51a0-b033-11e7-9984-a9ab79bb8f35"
# }]
response = await self.privatePostTrade(signedOrder)
return self.parse_orders(response, market)
async def get_nonce(self):
if self.options['orderNonce'] | |
import smart_imports
smart_imports.all()
TIME_TO_LVL_DELTA = float(7) # разница во времени получения двух соседних уровней
TIME_TO_LVL_MULTIPLIER = float(1.02) # множитель опыта, возводится в степень уровня
INITIAL_HP = int(500) # начальное здоровье героя
HP_PER_LVL = int(50) # бонус к здоровью на уровень
MOB_HP_MULTIPLIER = float(0.25) # какой процент здоровье среднего моба составляет от здоровья героя
BOSS_HP_MULTIPLIER = float(0.5) # какой процент здоровье среднего моба составляет от здоровья героя
TURN_DELTA = int(10) # в секундах - задержка одного хода
TURNS_IN_HOUR = float(60.0 * 60 / TURN_DELTA) # количество ходов в 1 часе
POWER_PER_LVL = int(1) # значение "чистой" силы героя (т.е. без артефактов)
EQUIP_SLOTS_NUMBER = int(11) # количество слотов экипировки
# за скорость получения артефактов принимаем скорость получения их из лута
# остальные способы получения (покупка, квесты) считаем флуктуациями
ARTIFACTS_LOOT_PER_DAY = float(2.0) # количество новых артефактов, в реальный день
ARTIFACT_FOR_QUEST_PROBABILITY = float(0.2) # вероятность получить артефакт в награда за квест
# Доли лута и артефактов в доходах героя. В артефакты влючены и награды за задания.
INCOME_LOOT_FRACTION = float(0.6)
INCOME_ARTIFACTS_FRACTION = float(1.0 - INCOME_LOOT_FRACTION)
# магическое число — ожидаемое количество выполненных героем квестов в день
EXPECTED_QUESTS_IN_DAY = float(2.0)
# количество поломок артефактов в день, расчитывается так, чтобы за 3 недели в идеальном случае была обновлена вся экипировка
ARTIFACTS_BREAKING_SPEED = float(EQUIP_SLOTS_NUMBER / (3 * 7.0))
EQUIPMENT_BREAK_FRACTION = float(0.5) # доля артифактов в экипировке, которые могут сломаться
NORMAL_SLOT_REPAIR_PRIORITY = float(1.0) # приоритет починки обычного слота
SPECIAL_SLOT_REPAIR_PRIORITY = float(2.0) # приоритет починки слота из предпочтения
EXP_PER_HOUR = int(10) # опыт в час
EXP_PER_QUEST_FRACTION = float(0.33) # разброс опыта за задание
COMPANIONS_BONUS_EXP_FRACTION = float(0.2) # доля бонусного опыта, которую могут приносить спутники
# с учётом возможных способностей (т.е. считаем, что при нужных абилках у премиума скорость получения опыта будет 1.0)
EXP_FOR_PREMIUM_ACCOUNT = float(1.0) # модификатор опыта для премиум аккаунтов
EXP_FOR_NORMAL_ACCOUNT = float(0.66) # модификатор опыта для обычных акканутов
# TODO: привести EXP_FOR_PREMIUM_ACCOUNT к 1.0 (разница с нормальным аккаунтом должна быть 50%)
# сейчас это сделать нельзя т.к. паливо
HERO_MOVE_SPEED = float(0.1) # базовая скорость героя расстояние в ход
BATTLE_LENGTH = int(16) # ходов - средняя длительность одного боя (количество действий в бой)
INTERVAL_BETWEEN_BATTLES = int(3) # ходов - время, между двумя битвами
BATTLES_BEFORE_HEAL = int(8) # количество боёв в непрерывной цепочке битв
MOVE_TURNS_IN_ACTION_CYCLE = INTERVAL_BETWEEN_BATTLES * BATTLES_BEFORE_HEAL
DISTANCE_IN_ACTION_CYCLE = HERO_MOVE_SPEED * MOVE_TURNS_IN_ACTION_CYCLE
HEAL_TIME_FRACTION = float(0.2) # доля времени от цепочки битв, которую занимает полный отхил героя
HEAL_STEP_FRACTION = float(0.2) # разброс регенерации за один ход
HEALTH_IN_SETTLEMENT_TO_START_HEAL_FRACTION = float(0.33) # если у героя здоровья меньше, чем указанная доля и он в городе, то он будет лечиться
HEALTH_IN_MOVE_TO_START_HEAL_FRACTION = float(2 * (1.0 / BATTLES_BEFORE_HEAL)) # если у героя здоровья меньше, чем указанная доля и он в походе, то он будет лечиться
TURNS_TO_IDLE = int(6) # количество ходов на уровень, которое герой бездельничает в соответствующей action
TURNS_TO_RESURRECT = int(TURNS_TO_IDLE * 3) # количество ходов на уровень, необходимое для воскрешения
GET_LOOT_PROBABILITY = float(0.50) # вероятность получить добычу после боя, если не получен артефакт
# вероятности получить разный тип добычи
EPIC_ARTIFACT_PROBABILITY = float(0.005)
RARE_ARTIFACT_PROBABILITY = float(0.05)
NORMAL_ARTIFACT_PROBABILITY = float(1 - RARE_ARTIFACT_PROBABILITY - EPIC_ARTIFACT_PROBABILITY)
NORMAL_LOOT_COST = float(1) # стоимость разной добычи на единицу уровня
MAX_BAG_SIZE = int(12) # максимальный размер рюкзака героя
BAG_SIZE_TO_SELL_LOOT_FRACTION = float(0.33) # процент заполненности рюкзака, после которого герой начнёт продавать вещи
# относительные размеры различных трат
BASE_EXPERIENCE_FOR_MONEY_SPEND = int(24 * EXP_PER_HOUR * 0.4)
EXPERIENCE_DELTA_FOR_MONEY_SPEND = float(0.5)
POWER_TO_LVL = float(EQUIP_SLOTS_NUMBER) # бонус к ожидаемой силе на уровнеь героя
# Разброс силы артефактов делаем от -ItemPowerDelta до +ItemPowerDelta.
# за базу берём количество слотов, т.е., теоретически, может не быть предметов с повторяющейся силой
# что бы не вводить дизбаланса, надо на маленьких уровнях уменьшать делту, что бу разница уровня предмета и дельты была неменьше единицы
ARTIFACT_POWER_DELTA = float(0.2) # дельта, на которую может изменяться сила артифакта
ARTIFACT_BETTER_MIN_POWER_DELTA = int(5) # минимальная дельта, на которую может изменятся сила лучшего артефакта (для магазина)
# ходов - длинна непрерывной цепочки боёв до остановки на лечение
BATTLES_LINE_LENGTH = int(BATTLES_BEFORE_HEAL * (BATTLE_LENGTH + INTERVAL_BETWEEN_BATTLES) - INTERVAL_BETWEEN_BATTLES)
# количество битв в ход в промежутке непрерывных боёв
BATTLES_PER_TURN = float(1.0 / (INTERVAL_BETWEEN_BATTLES + 1))
WHILD_BATTLES_PER_TURN_BONUS = float(0.05)
# максимально допустимое значение вероятности битв в час
MAX_BATTLES_PER_TURN = float(0.9)
COMPANIONS_DEFENDS_IN_BATTLE = float(1.5) # среднее количество «защит» героя средним спутником за 1 бой
COMPANIONS_HEAL_FRACTION = float(0.05) # доля действия уход за спутнкиком со средним количеством здоровья от всех действий героя
HEAL_LENGTH = int(math.floor(BATTLES_LINE_LENGTH * HEAL_TIME_FRACTION)) # ходов - длительность лечения героя
ACTIONS_CYCLE_LENGTH = int(math.ceil((BATTLES_LINE_LENGTH + HEAL_LENGTH) / (1 - COMPANIONS_HEAL_FRACTION))) # ходов - длинна одного "игрового цикла" - цепочка боёв + хил
MOVE_TURNS_IN_HOUR = MOVE_TURNS_IN_ACTION_CYCLE * (ACTIONS_CYCLE_LENGTH * TURN_DELTA / float(60 * 60))
# примерное количество боёв, которое будет происходить в час игрового времени
BATTLES_PER_HOUR = TURNS_IN_HOUR * (float(BATTLES_BEFORE_HEAL) / ACTIONS_CYCLE_LENGTH)
# вероятность выпаденя артефакта из моба (т.е. вероятноть получить артефакт после боя)
ARTIFACTS_PER_BATTLE = float(ARTIFACTS_LOOT_PER_DAY / (BATTLES_PER_HOUR * 24))
# вероятность сломать артефакт после боя
ARTIFACTS_BREAKS_PER_BATTLE = float(ARTIFACTS_BREAKING_SPEED / (BATTLES_PER_HOUR * 24))
ARTIFACT_FROM_PREFERED_SLOT_PROBABILITY = float(0.25) # вероятность выбрать для покупки/обновления артефакт из предпочитаемого слота
ARTIFACT_INTEGRITY_DAMAGE_PER_BATTLE = int(1) # уменьшение целостности артефактов за бой
ARTIFACT_INTEGRITY_DAMAGE_FOR_FAVORITE_ITEM = float(0.5) # модификатор повреждений целостности любимого предмета
_INTEGRITY_LOST_IN_DAY = BATTLES_PER_HOUR * 24 * ARTIFACT_INTEGRITY_DAMAGE_PER_BATTLE
ARTIFACT_RARE_MAX_INTEGRITY_MULTIPLIER = float(1.5) # коофициент увеличения максимальной целостности для редких артефактов
ARTIFACT_EPIC_MAX_INTEGRITY_MULTIPLIER = float(2) # коофициент увеличения максимальной целостности для эпических артефактов
ARTIFACT_MAX_INTEGRITY_DELTA = float(0.25) # разброс допустимой максимальной целостности
ARTIFACT_MAX_INTEGRITY = int(round(_INTEGRITY_LOST_IN_DAY * 30, -3)) # максимальная целостность обычного артефакта
ARTIFACT_SHARP_MAX_INTEGRITY_LOST_FRACTION = float(0.04) # доля максимальной целостности, теряемая при заточке
ARTIFACT_INTEGRITY_SAFE_BARRIER = float(0.2) # доля от максимальной целостности, артефакт не может сломаться, если его целостность отличается от максимальной меньше чем на эту долю
ARTIFACT_BREAK_POWER_FRACTIONS = (float(0.2), float(0.3)) # на сколько артефакт может сломаться за раз
ARTIFACT_BREAK_INTEGRITY_FRACTIONS = (float(0.1), float(0.2)) # на сколько артефакт может сломаться за раз
PREFERED_MOB_LOOT_PROBABILITY_MULTIPLIER = float(2) # множитель вероятности получения лута из любимой добычи
DAMAGE_TO_HERO_PER_HIT_FRACTION = float(1.0 / (BATTLES_BEFORE_HEAL * (BATTLE_LENGTH / 2 - COMPANIONS_DEFENDS_IN_BATTLE))) # доля урона, наносимого герою за удар
DAMAGE_TO_MOB_PER_HIT_FRACTION = float(1.0 / (BATTLE_LENGTH / 2)) # доля урона, наносимого мобу за удар
DAMAGE_DELTA = float(0.2) # разброс в значениях урона [1-DAMAGE_DELTA, 1+DAMAGE_DELTA]
DAMAGE_CRIT_MULTIPLIER = float(2.0) # во сколько раз увеличивается урон при критическом ударе
# таким образом, напрашиваются следующие параметры мобов:
# - здоровье, в долях от среднемобского - чем больше его, тем дольше моб живёт
# - инициатива, в долях относительно геройской - чем больше, тем чаще моб ходит
# - урон, в долях от среднемобского - чем больше, тем больнее бьёт
# - разброс урона, в долях от среднего - декоративный параметр, т.к. в итоге будет средний урон наноситься
# так как все параметры измеряются в долях, то сложность моба можно высчитать как hp * initiative * damage = 1 для среднего моба
# моб со всеми парамтрами, увеличеными на 10% будет иметь сложность 1.1^3 ~ 1.33
# соответственно, вводня для каждого параметра шаг в 0.1 и скалируя от 0.5 до 1.5, получим 11^3 вариантов параметров (и, соответственно поведения)
# сложность мобов в этом случае будет изменяться от 0.5^3 до 1.5^3 ~ (0.125, 3.375)
#
# возникает проблема обеспечения равномерности прокачки героев на разных территориях - для полностью честных условий необходимо обеспечить одинаковую сложность мобов,
# альтернативный вариант - изменять количесво опыта, даваемого за моба, в зависимости от его сложности, этот вариант кажется как более логичным с точки зрения игрока, так и простым в реализации, на нём и остановимся
#
# расчёт прочей добычи и золота: добыча/трата
# считаем, что если герой не выбил артефакт, то у него есть вероятность выбить добычу
# добычу делим на обычную, редкую и очень редкую
# добыча является основным источником дохода, вырученное за его продажу золото является функцией от уровня и редкости - т.е. есть три фунции от уровня
# добыча, как и мобы, организован в список, отсортированый по уровню, на котором он становится доступным, это позволит открывать игрокам новый контент, а так же сделать разброс цен
##########################
# разные левые "неприкаянные" константы
##########################
DESTINY_POINT_IN_LEVELS = int(5) # раз в сколько уровней давать очко абилок
SPEND_MONEY_FOR_HEAL_HEALTH_FRACTION = float(0.75) # герой будет тратить деньги на лечение, когда его здоровье будет меньше этого параметра
##########################
# параметры ангелов
##########################
ANGEL_ENERGY_REGENERATION_TIME = float(0.5) # раз в сколько часов регенерируем
ANGEL_ENERGY_REGENERATION_AMAUNT = int(1) # сколько восстанавливаем
ANGEL_ENERGY_REGENERATION_PERIOD = int(ANGEL_ENERGY_REGENERATION_TIME * TURNS_IN_HOUR) # раз в сколько ходов
ANGEL_ENERGY_IN_DAY = int(24.0 / ANGEL_ENERGY_REGENERATION_TIME * ANGEL_ENERGY_REGENERATION_AMAUNT)
ANGEL_ENERGY_REGENERATION_LENGTH = int(3) # сколько ходов будет идти ренерация единицы энергии
# энергия должна полностью регенериться за сутки, раз в 2 часа должна появляться новая мажка
##########################
# абилки ангела
##########################
ANGEL_HELP_COST = int(4)
ANGEL_ARENA_COST = int(1)
ANGEL_ARENA_QUIT_COST = int(0)
ANGEL_DROP_ITEM_COST = int(1)
ANGEL_HELP_HEAL_FRACTION = (float(0.25), float(0.5)) # (min, max) процент хелсов, которые будут вылечины
ANGEL_HELP_TELEPORT_DISTANCE = float(1.0) # расстяние на которое происходит | |
"decreased",
"bose",
"roc",
"blockbuster",
"smog",
"wildfire",
"man.",
"e.t.",
"amethyst",
"collided",
"walk-in",
"prussia",
"invoices",
"mir",
"infidel",
"rogelio",
"wart",
"forceful",
"lieu",
"iodine",
"decreed",
"hairline",
"'hello",
"theatres",
"wreak",
"karina",
"limelight",
"duffel",
"calais",
"gandalf",
"hassling",
"mau",
"mj",
"clout",
"shafts",
"adoptive",
"expires",
"lakers",
"scruffy",
"parmesan",
"consultants",
"tunisia",
"flagship",
"hasten",
"stead",
"teamed",
"ofthem",
"my-my",
"backfired",
"autographed",
"deportation",
"vida",
"pumpkins",
"oda",
"coveted",
"vive",
"objected",
"conniving",
"sicko",
"take-off",
"astute",
"ridiculed",
"priesthood",
"mid-tempo",
"heretics",
"tranquil",
"bonner",
"'ra",
"flunk",
"assembling",
"wallow",
"whoring",
"'sup",
"stapler",
"hep",
"imbalance",
"tins",
"gp",
"bulldozer",
"hormonal",
"towering",
"full-blown",
"catwoman",
"rhubarb",
"gertie",
"bla",
"workmen",
"leviathan",
"safekeeping",
"faxed",
"imprison",
"triage",
"conjugal",
"yorkers",
"incognito",
"jenner",
"mindset",
"blended",
"akash",
"sparkles",
"laverne",
"aurelie",
"croaking",
"allez",
"'e",
"specified",
"crusty",
"overlap",
"luscious",
"gladstone",
"anthropology",
"hermione",
"jacking",
"roost",
"spouses",
"greyhound",
"ecuador",
"particulars",
"unforeseen",
"cleverly",
"gordy",
"placenta",
"unspoken",
"beamed",
"coachman",
"knave",
"foyer",
"escalate",
"tiki",
"palette",
"embarked",
"zion",
"serenade",
"miser",
"uday",
"inhabit",
"inhibitions",
"stills",
"rant",
"dainty",
"daley",
"trask",
"martín",
"chop-chop",
"worshipping",
"rarest",
"black-and-white",
"wiedersehen",
"augusto",
"batty",
"participant",
"raph",
"cheon",
"far-fetched",
"senpai",
"nike",
"billboards",
"lis",
"sabre",
"manchuria",
"implication",
"deejay",
"scholarships",
"disobeying",
"migraines",
"urinate",
"downer",
"dinars",
"holli",
"leblanc",
"rizzoli",
"spoiler",
"foxtrot",
"renato",
"keats",
"sous",
"superstitions",
"coordinating",
"wisest",
"atwood",
"woodstock",
"dutchy",
"diners",
"inadmissible",
"cally",
"displaying",
"radioactivity",
"connoisseur",
"barbeque",
"mixes",
"scrotum",
"smoother",
"craigslist",
"avalon",
"mutations",
"baja",
"roddy",
"vidal",
"degraded",
"horseshoe",
"pathways",
"sailboat",
"devereaux",
"birdy",
"coincidentally",
"ofa",
"lund",
"mamie",
"all-night",
"dazed",
"overwhelm",
"tahoe",
"automobiles",
"huxley",
"breakers",
"conform",
"comanche",
"miserably",
"deactivated",
"'en",
"defies",
"fazio",
"ingested",
"watchdog",
"addie",
"prawn",
"hells",
"delinquents",
"heathrow",
"tot",
"kayo",
"koichi",
"loosely",
"constitute",
"haynes",
"keane",
"nannies",
"acupuncture",
"displeased",
"runny",
"blot",
"reduces",
"eyeing",
"cossacks",
"stumped",
"h-he",
"royals",
"heo",
"clamps",
"dede",
"canvassing",
"exited",
"exalted",
"gander",
"necktie",
"hordes",
"vomits",
"post-traumatic",
"i1",
"c.i.",
"calhoun",
"conceivable",
"ibiza",
"entangled",
"moshe",
"misread",
"vaccines",
"sexes",
"conjunction",
"scheduling",
"undressing",
"twelfth",
"gash",
"grubby",
"sao",
"chien",
"shimmering",
"holders",
"nautical",
"confucius",
"pushover",
"enlarge",
"dawkins",
"neurologist",
"mangled",
"wag",
"urinal",
"malfunctioning",
"intolerant",
"clerical",
"privates",
"reconciled",
"tackled",
"pesticides",
"euphoria",
"retake",
"jessup",
"bluebird",
"cutting-edge",
"apostle",
"temperance",
"calligraphy",
"brotherly",
"solstice",
"shagged",
"bowled",
"bastille",
"ignited",
"therein",
"camaro",
"nationalist",
"eoraha",
"documentaries",
"storyteller",
"clingy",
"splinters",
"insulation",
"shakira",
"outhouse",
"byrd",
"asuka",
"scones",
"subscribe",
"swirl",
"beavers",
"rip-off",
"tingle",
"jordi",
"gymnasium",
"frisk",
"eren",
"whoo-hoo-hoo",
"paola",
"appliance",
"reprimand",
"yuu",
"gopher",
"marcella",
"cruelly",
"draught",
"buzzed",
"taint",
"bystander",
"lp",
"consummate",
"vigilance",
"hing",
"raptor",
"astro",
"twenty-seven",
"nothing.",
"countenance",
"choreographer",
"obliterated",
"life-",
"massively",
"indict",
"sachs",
"busan",
"zhong",
"cashing",
"neutralized",
"befriend",
"tatsuya",
"empowered",
"appendicitis",
"outlive",
"ph",
"australians",
"indianapolis",
"jails",
"leary",
"interacting",
"foretold",
"syracuse",
"condos",
"dialogues",
"dimples",
"astral",
"pus",
"airship",
"bulkhead",
"sic",
"manoeuvre",
"ake",
"illuminate",
"pimples",
"admin",
"fife",
"this-this",
"mea",
"c-section",
"constituents",
"renal",
"topside",
"beyoncé",
"contingent",
"vane",
"u-boat",
"clasp",
"bagpipes",
"precarious",
"ur",
"delicately",
"yul",
"hard-earned",
"burkhalter",
"harpoon",
"relentlessly",
"photographing",
"rei",
"equilibrium",
"flimsy",
"vocalist",
"torments",
"pimping",
"phases",
"wesen",
"teeming",
"wrestlers",
"antiquities",
"burglaries",
"baddest",
"flicks",
"joni",
"jetson",
"chipping",
"yearly",
"linc",
"nami",
"wilkinson",
"canisters",
"silvio",
"tsai",
"plumbers",
"workforce",
"frostbite",
"kath",
"mes",
"twenty-six",
"selim",
"corona",
"'let",
"mortgages",
"ejected",
"recount",
"stirling",
"sparta",
"vo",
"desist",
"'because",
"ciro",
"rune",
"alleyway",
"doggett",
"sutra",
"thirty-two",
"jb",
"dermot",
"disliked",
"dissect",
"physique",
"'nor",
"fowl",
"showbiz",
"afoot",
"t0",
"engagements",
"stifling",
"milner",
"who-",
"irreversible",
"haters",
"abetting",
"hague",
"remarry",
"cassio",
"v.i.p.",
"sixes",
"five-year-old",
"hobbit",
"manon",
"aram",
"offences",
"stacking",
"feral",
"fide",
"reportedly",
"chrysler",
"coherent",
"coated",
"gambit",
"look.",
"laborers",
"selecting",
"newsreel",
"chimpanzees",
"commander-in-chief",
"peddling",
"catapult",
"sedatives",
"shoppers",
"formations",
"questionnaire",
"beverages",
"josiah",
"retches",
"top-notch",
"mistrust",
"lill",
"chowder",
"lavinia",
"kimber",
"brokers",
"varieties",
"fennel",
"leann",
"curd",
"delenn",
"gushing",
"saheb",
"jerked",
"neo",
"cossack",
"expeditions",
"coldest",
"nosebleed",
"helplessness",
"ft",
"satchel",
"doo-doo",
"layman",
"butthole",
"cuddling",
"policewoman",
"instability",
"ceilings",
"yuka",
"ketamine",
"pippa",
"flanagan",
"off-duty",
"mathis",
"hodge",
"knοw",
"pryor",
"goethe",
"haywire",
"firstborn",
"lestrade",
"gory",
"kanye",
"hypnotize",
"airing",
"peruvian",
"eyre",
"bundles",
"socialize",
"'which",
"machine-gun",
"rallies",
"rouse",
"luckier",
"hildy",
"attaché",
"citation",
"handsomely",
"faceless",
"grownup",
"finely",
"nitwit",
"beckham",
"periscope",
"shimmy",
"two-faced",
"kaleido",
"kaori",
"nt",
"lovemaking",
"sutures",
"beet",
"hatter",
"fraudulent",
"grange",
"οn",
"andrés",
"minh",
"laceration",
"venison",
"hoshi",
"reinforcement",
"flak",
"shrew",
"homeworld",
"toothpick",
"vasco",
"taunt",
"alina",
"ascent",
"frobisher",
"flirty",
"hoodlum",
"destitute",
"inclination",
"abrams",
"wankers",
"shielding",
"cited",
"arf",
"immortals",
"dispersed",
"deliverance",
"intellectually",
"noooo",
"creamed",
"appétit",
"unwelcome",
"piracy",
"wal-mart",
"heats",
"milena",
"adapting",
"geeta",
"pretense",
"nifty",
"amplified",
"ninny",
"seam",
"cheeseburgers",
"'come",
"gramophone",
"expo",
"consumes",
"necessities",
"hartford",
"deux",
"acquiring",
"socialists",
"southampton",
"hou",
"fairbanks",
"katja",
"fairest",
"broader",
"rhinoceros",
"once-in-a-lifetime",
"coincidental",
"guinevere",
"offends",
"cloned",
"brant",
"kellogg",
"sandhya",
"radcliffe",
"illustrate",
"sorely",
"internationally",
"ambient",
"snatching",
"duce",
"crematorium",
"podcast",
"partition",
"slaying",
"leopards",
"puma",
"donaldson",
"blimp",
"misfortunes",
"chillin",
"sheryl",
"prickly",
"high-powered",
"anxiously",
"pestilence",
"unruly",
"papaya",
"trenton",
"lal",
"troublemakers",
"yagyuu",
"municipality",
"bebop",
"holodeck",
"gnarly",
"hanukkah",
"skimming",
"stuntman",
"thom",
"saucers",
"snapshot",
"jurassic",
"boom-boom",
"mobster",
"craftsman",
"khloe",
"cissy",
"ismail",
"lexus",
"fastened",
"santi",
"longtime",
"sander",
"looming",
"requisition",
"tenacious",
"unpacked",
"reps",
"shorthand",
"plait",
"contented",
"centurion",
"realising",
"padded",
"saddled",
"octave",
"bewildered",
"pandey",
"koko",
"entice",
"chau",
"playmate",
"integrate",
"nao",
"dead-end",
"dissertation",
"i`ll",
"sasuke",
"abolish",
"lightening",
"tanned",
"outreach",
"norah",
"eaters",
"fatalities",
"thickness",
"censor",
"oxy",
"cabal",
"pee-pee",
"nostradamus",
"skippy",
"soaps",
"crass",
"einar",
"ten-year-old",
"glaring",
"professionalism",
"baber",
"cale",
"mgm",
"gnomes",
"proposes",
"nicht",
"aquaman",
"desiree",
"osiris",
"alana",
"snagged",
"strays",
"collaborators",
"sabbatical",
"industrialist",
"shana",
"craftsmanship",
"remedies",
"antibiotic",
"bard",
"gutters",
"motels",
"burlesque",
"gaelic",
"acorn",
"maitre",
"eavesdrop",
"arcadia",
"nomad",
"sweeten",
"exclamation",
"five-minute",
"calamari",
"thomson",
"compares",
"golfer",
"alberta",
"manger",
"ensuring",
"self-pity",
"blackness",
"subterranean",
"fryer",
"seniority",
"cheaters",
"rectify",
"is-is",
"snotty",
"shauna",
"newfound",
"iook",
"experimentation",
"cucumbers",
"jagged",
"gilly",
"unbalanced",
"databases",
"protons",
"rasputin",
"ovaries",
"spatial",
"nhs",
"metaphorically",
"aaaaah",
"no-show",
"decepticons",
"sardine",
"tarek",
"milos",
"eruptions",
"x-men",
"heartily",
"tweed",
"relaxes",
"heaviest",
"clogs",
"chaudhary",
"rallo",
"av",
"masseur",
"joxer",
"baptiste",
"goldstein",
"soph",
"hideo",
"freshwater",
"quarantined",
"erosion",
"partied",
"mite",
"leyla",
"marigold",
"itjust",
"point-blank",
"ewan",
"hairpin",
"ice-cold",
"knucklehead",
"mopping",
"montenegro",
"hanky",
"rousseau",
"gangway",
"payal",
"darken",
"quiche",
"clairvoyant",
"pearly",
"ballast",
"fuck-up",
"configuration",
"oversized",
"subpoenaed",
"trapper",
"doh",
"cosby",
"compartments",
"admires",
"vamp",
"viggo",
"reactionary",
"idly",
"pout",
"approximate",
"walkie",
"playstation",
"compiled",
"sprang",
"mg",
"ama",
"magnolia",
"commissions",
"angered",
"two-hour",
"pero",
"stoke",
"primed",
"bragg",
"unearthed",
"ingram",
"hortense",
"heartbeats",
"three-dimensional",
"conflicting",
"tami",
"frontline",
"vita",
"approves",
"utilize",
"stuffs",
"habib",
"lnspector",
"gutless",
"transplants",
"ahab",
"kasey",
"bolsheviks",
"ventured",
"snowstorm",
"headset",
"grovel",
"appreciative",
"urchin",
"tata",
"litre",
"balu",
"hostiles",
"newsletter",
"berk",
"subordinates",
"mansions",
"rayyan",
"informs",
"pistachio",
"gilda",
"gleaming",
"yaah",
"relinquish",
"railroads",
"standin",
"fondly",
"crewman",
"finnegan",
"forger",
"ans",
"contradictions",
"accuses",
"kissinger",
"poppins",
"three-quarters",
"truckers",
"outcasts",
"ducts",
"sae",
"starfish",
"nix",
"kudos",
"baboons",
"terrorizing",
"pubes",
"nοt",
"reversing",
"midgets",
"snooker",
"parades",
"one-third",
"jansen",
"shindig",
"mahir",
"susanne",
"daydream",
"southfork",
"chibs",
"whittaker",
"jerk-off",
"luau",
"styling",
"repairman",
"schubert",
"surgically",
"sped",
"crikey",
"analysts",
"simplify",
"gangsta",
"michal",
"agonizing",
"bibi",
"inscribed",
"induction",
"fetched",
"carmel",
"congratulated",
"apex",
"deserters",
"koi",
"xia",
"fibres",
"composing",
"malia",
"terrorized",
"comedies",
"systematic",
"engulfed",
"cosimo",
"hourly",
"congresswoman",
"rummy",
"flinch",
"mongolian",
"unforgiving",
"receptive",
"neurosurgeon",
"twenty-eight",
"miyuki",
"operas",
"programmer",
"ankara",
"luxuries",
"revoke",
"abortions",
"narration",
"liquids",
"multinational",
"goddard",
"robo",
"toddy",
"garter",
"oppressive",
"procure",
"counterattack",
"kiwi",
"pail",
"sacha",
"gymnast",
"glenda",
"rages",
"check-in",
"scaffolding",
"hearth",
"wharton",
"bots",
"sigmund",
"judaism",
"cmdr",
"couture",
"viewpoint",
"toffee",
"spud",
"'scuse",
"helsing",
"decaying",
"ranges",
"cellphones",
"caterina",
"blossoming",
"hacks",
"anakin",
"transforms",
"booths",
"thirds",
"infallible",
"gaurav",
"nobunaga",
"incarceration",
"mumble",
"conveyor",
"cobwebs",
"godsend",
"c.e.o.",
"avert",
"problemo",
"airmen",
"jammer",
"ulrich",
"highlighted",
"mma",
"real-estate",
"faa",
"frye",
"intubate",
"salinger",
"instantaneous",
"bettina",
"stowaway",
"baggy",
"arguably",
"paternal",
"boardroom",
"flaunt",
"kimball",
"gradient",
"retrace",
"stockholders",
"proton",
"tt",
"isnt",
"mallard",
"reincarnated",
"planks",
"enforcer",
"trumpeting",
"crouch",
"foie",
"whacking",
"thane",
"rowena",
"escalating",
"spender",
"brim",
"duster",
"thumps",
"ideological",
"entree",
"bangladesh",
"atrocity",
"preached",
"hpd",
"tre",
"chats",
"sans",
"nimble",
"halftime",
"supremacy",
"warlords",
"prism",
"soulful",
"sundance",
"intrepid",
"whoopee",
"putz",
"rigsby",
"regenerate",
"mystique",
"maxed",
"life-changing",
"janus",
"blending",
"jumpin",
"b.s.",
"rhett",
"scarring",
"swish",
"juniper",
"ginseng",
"ligature",
"cramping",
"reflections",
"bodega",
"vesuvius",
"assad",
"pegs",
"aaargh",
"mongols",
"therapists",
"sampling",
"fairchild",
"methadone",
"stereotypes",
"corned",
"gluten",
"prose",
"eep",
"rainfall",
"anklets",
"callers",
"angrier",
"attractions",
"jacko",
"jud",
"photon",
"commoner",
"inquiring",
"implemented",
"-what",
"gustave",
"worry.",
"regionals",
"feride",
"aoi",
"centimetres",
"justifies",
"chernobyl",
"montage",
"rumba",
"hilt",
"minna",
"norse",
"pelé",
"lib",
"luxembourg",
"athos",
"azul",
"corrigan",
"reb",
"abuela",
"peeps",
"hammerhead",
"eraser",
"trinkets",
"patented",
"kook",
"renfield",
"monstrosity",
"strides",
"veritable",
"nomads",
"exempt",
"sonata",
"fuentes",
"averted",
"ozzie",
"spheres",
"sauerkraut",
"pawnshop",
"wildcat",
"differential",
"degradation",
"bourgeoisie",
"coils",
"endearing",
"weakening",
"rong",
"trimmed",
"slaughtering",
"freestyle",
"ts",
"dayton",
"oh-ho-ho",
"dismantled",
"svu",
"fray",
"forefront",
"khaled",
"uv",
"indiscreet",
"tarmac",
"fer",
"red-hot",
"doable",
"probing",
"groupies",
"jumpers",
"bookkeeper",
"siegel",
"mangoes",
"granada",
"yoshiko",
"sachiko",
"pheromones",
"expedite",
"apparition",
"hallmark",
"shareholder",
"furnish",
"meringue",
"kinks",
"'from",
"subaru",
"criminally",
"caw",
"raccoons",
"nο",
"lute",
"battled",
"mozzarella",
"nasser",
"scooped",
"correcting",
"economical",
"holm",
"antisocial",
"negativity",
"rotted",
"vaudeville",
"docile",
"constructing",
"escalated",
"co-operation",
"benjy",
"remanded",
"tether",
"daresay",
"poignant",
"loafers",
"stahl",
"miso",
"rubin",
"humanly",
"relieving",
"irena",
"dalia",
| |
MONOSPACE CAPITAL F
1D676 MATHEMATICAL MONOSPACE CAPITAL G
1D677 MATHEMATICAL MONOSPACE CAPITAL H
1D678 MATHEMATICAL MONOSPACE CAPITAL I
1D679 MATHEMATICAL MONOSPACE CAPITAL J
1D67A MATHEMATICAL MONOSPACE CAPITAL K
1D67B MATHEMATICAL MONOSPACE CAPITAL L
1D67C MATHEMATICAL MONOSPACE CAPITAL M
1D67D MATHEMATICAL MONOSPACE CAPITAL N
1D67E MATHEMATICAL MONOSPACE CAPITAL O
1D67F MATHEMATICAL MONOSPACE CAPITAL P
1D680 MATHEMATICAL MONOSPACE CAPITAL Q
1D681 MATHEMATICAL MONOSPACE CAPITAL R
1D682 MATHEMATICAL MONOSPACE CAPITAL S
1D683 MATHEMATICAL MONOSPACE CAPITAL T
1D684 MATHEMATICAL MONOSPACE CAPITAL U
1D685 MATHEMATICAL MONOSPACE CAPITAL V
1D686 MATHEMATICAL MONOSPACE CAPITAL W
1D687 MATHEMATICAL MONOSPACE CAPITAL X
1D688 MATHEMATICAL MONOSPACE CAPITAL Y
1D689 MATHEMATICAL MONOSPACE CAPITAL Z
1D68A MATHEMATICAL MONOSPACE SMALL A
1D68B MATHEMATICAL MONOSPACE SMALL B
1D68C MATHEMATICAL MONOSPACE SMALL C
1D68D MATHEMATICAL MONOSPACE SMALL D
1D68E MATHEMATICAL MONOSPACE SMALL E
1D68F MATHEMATICAL MONOSPACE SMALL F
1D690 MATHEMATICAL MONOSPACE SMALL G
1D691 MATHEMATICAL MONOSPACE SMALL H
1D692 MATHEMATICAL MONOSPACE SMALL I
1D693 MATHEMATICAL MONOSPACE SMALL J
1D694 MATHEMATICAL MONOSPACE SMALL K
1D695 MATHEMATICAL MONOSPACE SMALL L
1D696 MATHEMATICAL MONOSPACE SMALL M
1D697 MATHEMATICAL MONOSPACE SMALL N
1D698 MATHEMATICAL MONOSPACE SMALL O
1D699 MATHEMATICAL MONOSPACE SMALL P
1D69A MATHEMATICAL MONOSPACE SMALL Q
1D69B MATHEMATICAL MONOSPACE SMALL R
1D69C MATHEMATICAL MONOSPACE SMALL S
1D69D MATHEMATICAL MONOSPACE SMALL T
1D69E MATHEMATICAL MONOSPACE SMALL U
1D69F MATHEMATICAL MONOSPACE SMALL V
1D6A0 MATHEMATICAL MONOSPACE SMALL W
1D6A1 MATHEMATICAL MONOSPACE SMALL X
1D6A2 MATHEMATICAL MONOSPACE SMALL Y
1D6A3 MATHEMATICAL MONOSPACE SMALL Z
1D6A4 MATHEMATICAL ITALIC SMALL DOTLESS I
1D6A5 MATHEMATICAL ITALIC SMALL DOTLESS J
1D6A8 MATHEMATICAL BOLD CAPITAL ALPHA
1D6A9 MATHEMATICAL BOLD CAPITAL BETA
1D6AA MATHEMATICAL BOLD CAPITAL GAMMA
1D6AB MATHEMATICAL BOLD CAPITAL DELTA
1D6AC MATHEMATICAL BOLD CAPITAL EPSILON
1D6AD MATHEMATICAL BOLD CAPITAL ZETA
1D6AE MATHEMATICAL BOLD CAPITAL ETA
1D6AF MATHEMATICAL BOLD CAPITAL THETA
1D6B0 MATHEMATICAL BOLD CAPITAL IOTA
1D6B1 MATHEMATICAL BOLD CAPITAL KAPPA
1D6B2 MATHEMATICAL BOLD CAPITAL LAMDA
1D6B3 MATHEMATICAL BOLD CAPITAL MU
1D6B4 MATHEMATICAL BOLD CAPITAL NU
1D6B5 MATHEMATICAL BOLD CAPITAL XI
1D6B6 MATHEMATICAL BOLD CAPITAL OMICRON
1D6B7 MATHEMATICAL BOLD CAPITAL PI
1D6B8 MATHEMATICAL BOLD CAPITAL RHO
1D6B9 MATHEMATICAL BOLD CAPITAL THETA SYMBOL
1D6BA MATHEMATICAL BOLD CAPITAL SIGMA
1D6BB MATHEMATICAL BOLD CAPITAL TAU
1D6BC MATHEMATICAL BOLD CAPITAL UPSILON
1D6BD MATHEMATICAL BOLD CAPITAL PHI
1D6BE MATHEMATICAL BOLD CAPITAL CHI
1D6BF MATHEMATICAL BOLD CAPITAL PSI
1D6C0 MATHEMATICAL BOLD CAPITAL OMEGA
1D6C1 MATHEMATICAL BOLD NABLA
1D6C2 MATHEMATICAL BOLD SMALL ALPHA
1D6C3 MATHEMATICAL BOLD SMALL BETA
1D6C4 MATHEMATICAL BOLD SMALL GAMMA
1D6C5 MATHEMATICAL BOLD SMALL DELTA
1D6C6 MATHEMATICAL BOLD SMALL EPSILON
1D6C7 MATHEMATICAL BOLD SMALL ZETA
1D6C8 MATHEMATICAL BOLD SMALL ETA
1D6C9 MATHEMATICAL BOLD SMALL THETA
1D6CA MATHEMATICAL BOLD SMALL IOTA
1D6CB MATHEMATICAL BOLD SMALL KAPPA
1D6CC MATHEMATICAL BOLD SMALL LAMDA
1D6CD MATHEMATICAL BOLD SMALL MU
1D6CE MATHEMATICAL BOLD SMALL NU
1D6CF MATHEMATICAL BOLD SMALL XI
1D6D0 MATHEMATICAL BOLD SMALL OMICRON
1D6D1 MATHEMATICAL BOLD SMALL PI
1D6D2 MATHEMATICAL BOLD SMALL RHO
1D6D3 MATHEMATICAL BOLD SMALL FINAL SIGMA
1D6D4 MATHEMATICAL BOLD SMALL SIGMA
1D6D5 MATHEMATICAL BOLD SMALL TAU
1D6D6 MATHEMATICAL BOLD SMALL UPSILON
1D6D7 MATHEMATICAL BOLD SMALL PHI
1D6D8 MATHEMATICAL BOLD SMALL CHI
1D6D9 MATHEMATICAL BOLD SMALL PSI
1D6DA MATHEMATICAL BOLD SMALL OMEGA
1D6DB MATHEMATICAL BOLD PARTIAL DIFFERENTIAL
1D6DC MATHEMATICAL BOLD EPSILON SYMBOL
1D6DD MATHEMATICAL BOLD THETA SYMBOL
1D6DE MATHEMATICAL BOLD KAPPA SYMBOL
1D6DF MATHEMATICAL BOLD PHI SYMBOL
1D6E0 MATHEMATICAL BOLD RHO SYMBOL
1D6E1 MATHEMATICAL BOLD PI SYMBOL
1D6E2 MATHEMATICAL ITALIC CAPITAL ALPHA
1D6E3 MATHEMATICAL ITALIC CAPITAL BETA
1D6E4 MATHEMATICAL ITALIC CAPITAL GAMMA
1D6E5 MATHEMATICAL ITALIC CAPITAL DELTA
1D6E6 MATHEMATICAL ITALIC CAPITAL EPSILON
1D6E7 MATHEMATICAL ITALIC CAPITAL ZETA
1D6E8 MATHEMATICAL ITALIC CAPITAL ETA
1D6E9 MATHEMATICAL ITALIC CAPITAL THETA
1D6EA MATHEMATICAL ITALIC CAPITAL IOTA
1D6EB MATHEMATICAL ITALIC CAPITAL KAPPA
1D6EC MATHEMATICAL ITALIC CAPITAL LAMDA
1D6ED MATHEMATICAL ITALIC CAPITAL MU
1D6EE MATHEMATICAL ITALIC CAPITAL NU
1D6EF MATHEMATICAL ITALIC CAPITAL XI
1D6F0 MATHEMATICAL ITALIC CAPITAL OMICRON
1D6F1 MATHEMATICAL ITALIC CAPITAL PI
1D6F2 MATHEMATICAL ITALIC CAPITAL RHO
1D6F3 MATHEMATICAL ITALIC CAPITAL THETA SYMBOL
1D6F4 MATHEMATICAL ITALIC CAPITAL SIGMA
1D6F5 MATHEMATICAL ITALIC CAPITAL TAU
1D6F6 MATHEMATICAL ITALIC CAPITAL UPSILON
1D6F7 MATHEMATICAL ITALIC CAPITAL PHI
1D6F8 MATHEMATICAL ITALIC CAPITAL CHI
1D6F9 MATHEMATICAL ITALIC CAPITAL PSI
1D6FA MATHEMATICAL ITALIC CAPITAL OMEGA
1D6FB MATHEMATICAL ITALIC NABLA
1D6FC MATHEMATICAL ITALIC SMALL ALPHA
1D6FD MATHEMATICAL ITALIC SMALL BETA
1D6FE MATHEMATICAL ITALIC SMALL GAMMA
1D6FF MATHEMATICAL ITALIC SMALL DELTA
1D700 MATHEMATICAL ITALIC SMALL EPSILON
1D701 MATHEMATICAL ITALIC SMALL ZETA
1D702 MATHEMATICAL ITALIC SMALL ETA
1D703 MATHEMATICAL ITALIC SMALL THETA
1D704 MATHEMATICAL ITALIC SMALL IOTA
1D705 MATHEMATICAL ITALIC SMALL KAPPA
1D706 MATHEMATICAL ITALIC SMALL LAMDA
1D707 MATHEMATICAL ITALIC SMALL MU
1D708 MATHEMATICAL ITALIC SMALL NU
1D709 MATHEMATICAL ITALIC SMALL XI
1D70A MATHEMATICAL ITALIC SMALL OMICRON
1D70B MATHEMATICAL ITALIC SMALL PI
1D70C MATHEMATICAL ITALIC SMALL RHO
1D70D MATHEMATICAL ITALIC SMALL FINAL SIGMA
1D70E MATHEMATICAL ITALIC SMALL SIGMA
1D70F MATHEMATICAL ITALIC SMALL TAU
1D710 MATHEMATICAL ITALIC SMALL UPSILON
1D711 MATHEMATICAL ITALIC SMALL PHI
1D712 MATHEMATICAL ITALIC SMALL CHI
1D713 MATHEMATICAL ITALIC SMALL PSI
1D714 MATHEMATICAL ITALIC SMALL OMEGA
1D715 MATHEMATICAL ITALIC PARTIAL DIFFERENTIAL
1D716 MATHEMATICAL ITALIC EPSILON SYMBOL
1D717 MATHEMATICAL ITALIC THETA SYMBOL
1D718 MATHEMATICAL ITALIC KAPPA SYMBOL
1D719 MATHEMATICAL ITALIC PHI SYMBOL
1D71A MATHEMATICAL ITALIC RHO SYMBOL
1D71B MATHEMATICAL ITALIC PI SYMBOL
1D71C MATHEMATICAL BOLD ITALIC CAPITAL ALPHA
1D71D MATHEMATICAL BOLD ITALIC CAPITAL BETA
1D71E MATHEMATICAL BOLD ITALIC CAPITAL GAMMA
1D71F MATHEMATICAL BOLD ITALIC CAPITAL DELTA
1D720 MATHEMATICAL BOLD ITALIC CAPITAL EPSILON
1D721 MATHEMATICAL BOLD ITALIC CAPITAL ZETA
1D722 MATHEMATICAL BOLD ITALIC CAPITAL ETA
1D723 MATHEMATICAL BOLD ITALIC CAPITAL THETA
1D724 MATHEMATICAL BOLD ITALIC CAPITAL IOTA
1D725 MATHEMATICAL BOLD ITALIC CAPITAL KAPPA
1D726 MATHEMATICAL BOLD ITALIC CAPITAL LAMDA
1D727 MATHEMATICAL BOLD ITALIC CAPITAL MU
1D728 MATHEMATICAL BOLD ITALIC CAPITAL NU
1D729 MATHEMATICAL BOLD ITALIC CAPITAL XI
1D72A MATHEMATICAL BOLD ITALIC CAPITAL OMICRON
1D72B MATHEMATICAL BOLD ITALIC CAPITAL PI
1D72C MATHEMATICAL BOLD ITALIC CAPITAL RHO
1D72D MATHEMATICAL BOLD ITALIC CAPITAL THETA SYMBOL
1D72E MATHEMATICAL BOLD ITALIC CAPITAL SIGMA
1D72F MATHEMATICAL BOLD ITALIC CAPITAL TAU
1D730 MATHEMATICAL BOLD ITALIC CAPITAL UPSILON
1D731 MATHEMATICAL BOLD ITALIC CAPITAL PHI
1D732 MATHEMATICAL BOLD ITALIC CAPITAL CHI
1D733 MATHEMATICAL BOLD ITALIC CAPITAL PSI
1D734 MATHEMATICAL BOLD ITALIC CAPITAL OMEGA
1D735 MATHEMATICAL BOLD ITALIC NABLA
1D736 MATHEMATICAL BOLD ITALIC SMALL ALPHA
1D737 MATHEMATICAL BOLD ITALIC SMALL BETA
1D738 MATHEMATICAL BOLD ITALIC SMALL GAMMA
1D739 MATHEMATICAL BOLD ITALIC SMALL DELTA
1D73A MATHEMATICAL BOLD ITALIC SMALL EPSILON
1D73B MATHEMATICAL BOLD ITALIC SMALL ZETA
1D73C MATHEMATICAL BOLD ITALIC SMALL ETA
1D73D MATHEMATICAL BOLD ITALIC SMALL THETA
1D73E MATHEMATICAL BOLD ITALIC SMALL IOTA
1D73F MATHEMATICAL BOLD ITALIC SMALL KAPPA
1D740 MATHEMATICAL BOLD ITALIC SMALL LAMDA
1D741 MATHEMATICAL BOLD ITALIC SMALL MU
1D742 MATHEMATICAL BOLD ITALIC SMALL NU
1D743 MATHEMATICAL BOLD ITALIC SMALL XI
1D744 MATHEMATICAL BOLD ITALIC SMALL OMICRON
1D745 MATHEMATICAL BOLD ITALIC SMALL PI
1D746 MATHEMATICAL BOLD ITALIC SMALL RHO
1D747 MATHEMATICAL BOLD ITALIC SMALL FINAL SIGMA
1D748 MATHEMATICAL BOLD ITALIC SMALL SIGMA
1D749 MATHEMATICAL BOLD ITALIC SMALL TAU
1D74A MATHEMATICAL BOLD ITALIC SMALL UPSILON
1D74B MATHEMATICAL BOLD ITALIC SMALL PHI
1D74C MATHEMATICAL BOLD ITALIC SMALL CHI
1D74D MATHEMATICAL BOLD ITALIC SMALL PSI
1D74E MATHEMATICAL BOLD ITALIC SMALL OMEGA
1D74F MATHEMATICAL BOLD ITALIC PARTIAL DIFFERENTIAL
1D750 MATHEMATICAL BOLD ITALIC EPSILON SYMBOL
1D751 MATHEMATICAL BOLD ITALIC THETA SYMBOL
1D752 MATHEMATICAL BOLD ITALIC KAPPA SYMBOL
1D753 MATHEMATICAL BOLD ITALIC PHI SYMBOL
1D754 MATHEMATICAL BOLD ITALIC RHO SYMBOL
1D755 MATHEMATICAL BOLD ITALIC PI SYMBOL
1D756 MATHEMATICAL SANS-SERIF BOLD CAPITAL ALPHA
1D757 MATHEMATICAL SANS-SERIF BOLD CAPITAL BETA
1D758 MATHEMATICAL SANS-SERIF BOLD CAPITAL GAMMA
1D759 MATHEMATICAL SANS-SERIF BOLD CAPITAL DELTA
1D75A MATHEMATICAL SANS-SERIF BOLD CAPITAL EPSILON
1D75B MATHEMATICAL SANS-SERIF BOLD CAPITAL ZETA
1D75C MATHEMATICAL SANS-SERIF BOLD CAPITAL ETA
1D75D MATHEMATICAL SANS-SERIF BOLD CAPITAL THETA
1D75E MATHEMATICAL SANS-SERIF BOLD CAPITAL IOTA
1D75F MATHEMATICAL SANS-SERIF BOLD CAPITAL KAPPA
1D760 MATHEMATICAL SANS-SERIF BOLD CAPITAL LAMDA
1D761 MATHEMATICAL SANS-SERIF BOLD CAPITAL MU
1D762 MATHEMATICAL SANS-SERIF BOLD CAPITAL NU
1D763 MATHEMATICAL SANS-SERIF BOLD CAPITAL XI
1D764 MATHEMATICAL SANS-SERIF BOLD CAPITAL OMICRON
1D765 MATHEMATICAL SANS-SERIF BOLD CAPITAL PI
1D766 MATHEMATICAL SANS-SERIF BOLD CAPITAL RHO
1D767 MATHEMATICAL SANS-SERIF BOLD CAPITAL THETA SYMBOL
1D768 MATHEMATICAL SANS-SERIF BOLD CAPITAL SIGMA
1D769 MATHEMATICAL SANS-SERIF BOLD CAPITAL TAU
1D76A MATHEMATICAL SANS-SERIF BOLD CAPITAL UPSILON
1D76B MATHEMATICAL SANS-SERIF BOLD CAPITAL PHI
1D76C MATHEMATICAL SANS-SERIF BOLD CAPITAL CHI
1D76D MATHEMATICAL SANS-SERIF BOLD CAPITAL PSI
1D76E MATHEMATICAL SANS-SERIF BOLD CAPITAL OMEGA
1D76F MATHEMATICAL SANS-SERIF BOLD NABLA
1D770 MATHEMATICAL SANS-SERIF BOLD SMALL ALPHA
1D771 MATHEMATICAL SANS-SERIF BOLD SMALL BETA
1D772 MATHEMATICAL SANS-SERIF BOLD SMALL GAMMA
1D773 MATHEMATICAL SANS-SERIF BOLD SMALL DELTA
1D774 MATHEMATICAL SANS-SERIF BOLD SMALL EPSILON
1D775 MATHEMATICAL SANS-SERIF BOLD SMALL ZETA
1D776 MATHEMATICAL SANS-SERIF BOLD SMALL ETA
1D777 MATHEMATICAL SANS-SERIF BOLD SMALL THETA
1D778 MATHEMATICAL SANS-SERIF BOLD SMALL IOTA
1D779 MATHEMATICAL SANS-SERIF BOLD SMALL KAPPA
1D77A MATHEMATICAL SANS-SERIF BOLD SMALL LAMDA
1D77B MATHEMATICAL SANS-SERIF BOLD SMALL MU
1D77C MATHEMATICAL SANS-SERIF BOLD SMALL NU
1D77D MATHEMATICAL SANS-SERIF BOLD SMALL XI
1D77E MATHEMATICAL SANS-SERIF BOLD SMALL OMICRON
1D77F MATHEMATICAL SANS-SERIF BOLD SMALL PI
1D780 MATHEMATICAL SANS-SERIF BOLD SMALL RHO
1D781 MATHEMATICAL SANS-SERIF BOLD SMALL FINAL SIGMA
1D782 MATHEMATICAL SANS-SERIF BOLD SMALL SIGMA
1D783 MATHEMATICAL SANS-SERIF BOLD SMALL TAU
1D784 MATHEMATICAL SANS-SERIF BOLD SMALL UPSILON
1D785 MATHEMATICAL SANS-SERIF BOLD SMALL PHI
1D786 MATHEMATICAL SANS-SERIF BOLD SMALL CHI
1D787 MATHEMATICAL SANS-SERIF BOLD SMALL PSI
1D788 MATHEMATICAL SANS-SERIF BOLD SMALL OMEGA
1D789 MATHEMATICAL SANS-SERIF BOLD PARTIAL DIFFERENTIAL
1D78A MATHEMATICAL SANS-SERIF BOLD EPSILON SYMBOL
1D78B MATHEMATICAL SANS-SERIF BOLD THETA SYMBOL
1D78C MATHEMATICAL SANS-SERIF BOLD KAPPA SYMBOL
1D78D MATHEMATICAL SANS-SERIF BOLD PHI SYMBOL
1D78E MATHEMATICAL SANS-SERIF BOLD RHO SYMBOL
1D78F MATHEMATICAL SANS-SERIF BOLD PI SYMBOL
1D790 MATHEMATICAL SANS-SERIF BOLD ITALIC CAPITAL ALPHA
1D791 MATHEMATICAL SANS-SERIF BOLD ITALIC CAPITAL BETA
1D792 MATHEMATICAL SANS-SERIF BOLD ITALIC CAPITAL GAMMA
1D793 MATHEMATICAL SANS-SERIF BOLD ITALIC CAPITAL DELTA
1D794 MATHEMATICAL SANS-SERIF BOLD ITALIC CAPITAL EPSILON
1D795 MATHEMATICAL SANS-SERIF BOLD ITALIC CAPITAL ZETA
1D796 MATHEMATICAL SANS-SERIF BOLD ITALIC CAPITAL ETA
1D797 MATHEMATICAL SANS-SERIF BOLD ITALIC CAPITAL THETA
1D798 MATHEMATICAL SANS-SERIF BOLD ITALIC CAPITAL IOTA
1D799 MATHEMATICAL SANS-SERIF BOLD ITALIC CAPITAL KAPPA
1D79A MATHEMATICAL SANS-SERIF BOLD ITALIC CAPITAL LAMDA
1D79B MATHEMATICAL SANS-SERIF BOLD ITALIC CAPITAL MU
1D79C MATHEMATICAL SANS-SERIF BOLD ITALIC CAPITAL NU
1D79D MATHEMATICAL SANS-SERIF BOLD ITALIC CAPITAL XI
1D79E MATHEMATICAL SANS-SERIF BOLD ITALIC CAPITAL OMICRON
1D79F MATHEMATICAL SANS-SERIF BOLD ITALIC CAPITAL PI
1D7A0 MATHEMATICAL SANS-SERIF BOLD ITALIC CAPITAL RHO
1D7A1 MATHEMATICAL SANS-SERIF BOLD ITALIC CAPITAL THETA SYMBOL
1D7A2 MATHEMATICAL SANS-SERIF BOLD ITALIC CAPITAL SIGMA
1D7A3 MATHEMATICAL SANS-SERIF BOLD ITALIC CAPITAL TAU
1D7A4 MATHEMATICAL SANS-SERIF BOLD ITALIC CAPITAL UPSILON
1D7A5 MATHEMATICAL SANS-SERIF BOLD ITALIC CAPITAL PHI
1D7A6 MATHEMATICAL SANS-SERIF BOLD ITALIC CAPITAL CHI
1D7A7 MATHEMATICAL SANS-SERIF BOLD ITALIC CAPITAL PSI
1D7A8 MATHEMATICAL SANS-SERIF BOLD ITALIC CAPITAL OMEGA
1D7A9 MATHEMATICAL SANS-SERIF BOLD ITALIC NABLA
1D7AA MATHEMATICAL SANS-SERIF BOLD ITALIC SMALL ALPHA
1D7AB MATHEMATICAL SANS-SERIF BOLD ITALIC SMALL BETA
1D7AC MATHEMATICAL SANS-SERIF BOLD ITALIC SMALL GAMMA
1D7AD MATHEMATICAL SANS-SERIF BOLD ITALIC SMALL DELTA
1D7AE MATHEMATICAL SANS-SERIF BOLD ITALIC SMALL EPSILON
1D7AF MATHEMATICAL SANS-SERIF BOLD ITALIC SMALL ZETA
1D7B0 MATHEMATICAL SANS-SERIF BOLD ITALIC SMALL ETA
1D7B1 MATHEMATICAL SANS-SERIF BOLD ITALIC SMALL THETA
1D7B2 MATHEMATICAL SANS-SERIF BOLD ITALIC SMALL IOTA
1D7B3 MATHEMATICAL SANS-SERIF BOLD ITALIC SMALL KAPPA
1D7B4 MATHEMATICAL SANS-SERIF BOLD ITALIC SMALL LAMDA
1D7B5 MATHEMATICAL SANS-SERIF BOLD ITALIC SMALL MU
1D7B6 MATHEMATICAL SANS-SERIF BOLD ITALIC SMALL NU
1D7B7 MATHEMATICAL SANS-SERIF BOLD ITALIC SMALL XI
1D7B8 MATHEMATICAL SANS-SERIF BOLD ITALIC SMALL OMICRON
1D7B9 MATHEMATICAL SANS-SERIF BOLD ITALIC SMALL PI
1D7BA MATHEMATICAL SANS-SERIF BOLD ITALIC SMALL RHO
1D7BB MATHEMATICAL SANS-SERIF BOLD ITALIC SMALL FINAL SIGMA
1D7BC MATHEMATICAL SANS-SERIF BOLD ITALIC SMALL SIGMA
1D7BD MATHEMATICAL SANS-SERIF BOLD ITALIC SMALL TAU
1D7BE MATHEMATICAL SANS-SERIF BOLD ITALIC SMALL UPSILON
1D7BF MATHEMATICAL SANS-SERIF BOLD ITALIC SMALL PHI
1D7C0 MATHEMATICAL SANS-SERIF BOLD ITALIC SMALL CHI
1D7C1 MATHEMATICAL SANS-SERIF BOLD ITALIC SMALL PSI
1D7C2 MATHEMATICAL SANS-SERIF BOLD ITALIC SMALL OMEGA
1D7C3 MATHEMATICAL SANS-SERIF BOLD ITALIC PARTIAL DIFFERENTIAL
1D7C4 MATHEMATICAL SANS-SERIF BOLD ITALIC EPSILON SYMBOL
1D7C5 MATHEMATICAL SANS-SERIF BOLD ITALIC THETA SYMBOL
1D7C6 MATHEMATICAL SANS-SERIF BOLD ITALIC KAPPA SYMBOL
1D7C7 MATHEMATICAL SANS-SERIF BOLD ITALIC PHI SYMBOL
1D7C8 MATHEMATICAL SANS-SERIF BOLD ITALIC RHO SYMBOL
1D7C9 MATHEMATICAL SANS-SERIF BOLD ITALIC PI SYMBOL
1D7CA MATHEMATICAL BOLD CAPITAL DIGAMMA
1D7CB MATHEMATICAL BOLD SMALL DIGAMMA
1D7CE MATHEMATICAL BOLD DIGIT ZERO
1D7CF MATHEMATICAL BOLD DIGIT ONE
1D7D0 MATHEMATICAL BOLD DIGIT TWO
1D7D1 MATHEMATICAL BOLD DIGIT THREE
1D7D2 MATHEMATICAL BOLD DIGIT FOUR
1D7D3 MATHEMATICAL BOLD DIGIT FIVE
1D7D4 MATHEMATICAL | |
from twitchio.ext import commands
import twitchio as tw
import config as cf
import inflect
import random as rd
import re
from alphagram import alphagram
from api import predict
from calculator import equity, evaluate
from cipher import cipher
import dictionary
from difflib import SequenceMatcher
from ety import origins
from pager import paginate, truncate
custom_commands = cf.custom_commands()
engine = inflect.engine()
class TwitchBot(commands.Bot):
def __init__(self, config=cf.config()):
super().__init__(api_token=config.api_token, token=config.irc_token,
client_id=config.client_id, nick=config.nick, prefix='!',
initial_channels=config.channels.keys())
self.config = config
def run(self):
dictionary.open_files()
super().run()
async def event_ready(self):
print(f'Wordsmith 0.25 by <NAME> | {self.nick}')
async def event_message(self, ctx):
if ctx.author and not ctx.author.name == self.nick:
print(ctx.content)
if SequenceMatcher(None, "Wanna become famous? Buy followers, primes and views on example*com (example . com)!", ctx.content).ratio() >= 0.8:
if not ctx.author.is_mod:
message = f'/timeout {ctx.author.name}'
print(len(message))
await ctx.channel.send(message)
message = '"If fame is only to come after death, I am in no hurry for it." - Martial'
print(len(message))
await ctx.channel.send(message)
elif len(ctx.content) > 1 and ctx.content[0] == '!':
if ctx.content[1:] in custom_commands.keys():
with open(custom_commands[ctx.content[1:]], 'r') as f:
messages = list(f)
message = rd.choice(messages).strip()
print(len(message))
await ctx.channel.send(message)
else:
await self.handle_commands(ctx)
@commands.command(name='predict')
async def predict(self, ctx, opponent):
if ctx.author.name == ctx.channel.name or ctx.author.is_mod:
msg = predict(self.config, ctx.channel.name, opponent)
else:
msg = f'Command can only be used by {ctx.channel.name} or moderators'
print(len(msg))
await ctx.send(msg)
@commands.command(name='check')
async def check(self, ctx, *words):
if words and len(words) > 0:
lexicon = self.config.channels[ctx.channel.name]['lexicon']
results = []
for word in words:
offensive, word, entry = dictionary.check(word.upper(), self.config.channels[ctx.channel.name]['lexicon'])
if not offensive:
results.append('%s%s is valid VoteYea' % (dictionary.decorate(word, entry, lexicon, '')) if entry else ('%s* not found VoteNay' % word))
msg = truncate(' ', results)
print(len(msg))
await ctx.send(msg)
@commands.command(name='common')
async def common(self, ctx, *words):
if words and len(words) > 0:
lexicon = self.config.channels[ctx.channel.name]['lexicon']
results = []
for word in words:
offensive, word, entry = dictionary.check(word.upper(), self.config.channels[ctx.channel.name]['lexicon'])
if not offensive:
msg = ('%s%s' % dictionary.decorate(word, entry, lexicon, '')) if entry else ('%s*' % word)
results.append((msg + ' is common VoteYea') if dictionary.common(word.lower()) else (msg + ' not common VoteNay'))
msg = truncate(' ', results)
print(len(msg))
await ctx.send(msg)
@commands.command(name='wordnik')
async def wordnik(self, ctx, *words):
if words and len(words) > 0:
lexicon = self.config.channels[ctx.channel.name]['lexicon']
results = []
for word in words:
offensive, word, entry = dictionary.check(word.upper(), self.config.channels[ctx.channel.name]['lexicon'])
if not offensive:
msg = ('%s%s' % dictionary.decorate(word, entry, lexicon, '')) if entry else ('%s*' % word)
results.append((msg + ' is open-source VoteYea') if dictionary.wordnik(word.lower()) else (msg + ' not open-source VoteNay'))
msg = truncate(' ', results)
print(len(msg))
await ctx.send(msg)
@commands.command(name='yawl')
async def yawl(self, ctx, *words):
if words and len(words) > 0:
lexicon = self.config.channels[ctx.channel.name]['lexicon']
results = []
for word in words:
offensive, word, entry = dictionary.check(word.upper(), self.config.channels[ctx.channel.name]['lexicon'])
if not offensive:
msg = ('%s%s' % dictionary.decorate(word, entry, lexicon, '')) if entry else ('%s*' % word)
results.append((msg + ' is open-source VoteYea') if dictionary.yawl(word.lower()) else (msg + ' not open-source VoteNay'))
msg = truncate(' ', results)
print(len(msg))
await ctx.send(msg)
@commands.command(name='equity')
async def equity(self, ctx, *racks):
if racks and len(racks) > 0:
lexicon = self.config.channels[ctx.channel.name]['lexicon']
alphabet = self.config.channels[ctx.channel.name]['alphabet']
results = []
for rack in racks:
if rack:
if len(rack) >= 2 and len(rack) <= 5:
result = equity(rack, lexicon)
if result[0] == '{':
msg = '%s: %s' % (alphagram(rack.upper(), alphabet), result)
else:
msg = '%s: %0.3f' % (alphagram(result[0], alphabet), result[1])
else:
msg = alphagram(rack.upper(), alphabet) + ': ?'
results.append(msg)
msg = truncate('; ', results)
print(len(msg))
await ctx.send(msg)
@commands.command(name='sum')
async def sum(self, ctx, *racks):
if racks and len(racks) > 0:
alphabet = self.config.channels[ctx.channel.name]['alphabet']
results = []
for rack in racks:
if rack:
msg = '%s: %d' % (alphagram(rack.upper(), alphabet), evaluate(rack.upper()))
results.append(msg)
msg = truncate('; ', results)
print(len(msg))
await ctx.send(msg)
@commands.command(name='define')
async def define(self, ctx, *words):
if words and len(words) > 0:
definitions = []
for word in words:
offensive, word, entry = dictionary.check(word.upper(), self.config.channels[ctx.channel.name]['lexicon'])
if offensive:
pass
elif entry:
lexicon = self.config.channels[ctx.channel.name]['lexicon']
word, entry, definition, mark = dictionary.define(word, entry, lexicon, '')
definitions.append('%s%s - %s' % (word, mark, definition))
else:
definitions.append(word + '* - not found')
msg = truncate('; ', definitions)
print(len(msg))
await ctx.send(msg)
@commands.command(name='inflect')
async def inflect(self, ctx, *words):
if words and len(words) > 0:
inflections = []
for word in words:
offensive, word, entry = dictionary.check(word.upper(), self.config.channels[ctx.channel.name]['lexicon'])
if offensive:
pass
elif entry:
inflections.append(dictionary.inflect(word.upper(), entry, self.config.channels[ctx.channel.name]['lexicon']))
else:
inflections.append(word.upper() + '* - not found')
msg = truncate('; ', inflections)
print(len(msg))
await ctx.send(msg)
@commands.command(name='lexicon')
async def lexicon(self, ctx, word):
if ctx.author.name == ctx.channel.name or ctx.author.is_mod:
self.config.channels[ctx.channel.name]['lexicon']=word.lower()
cf.save(self.config)
msg = f'Lexicon changed to {word.lower()}'
else:
msg = f'Command can only be used by {ctx.channel.name} or moderators'
await ctx.send(msg)
@commands.command(name='timeout')
async def timeout(self, ctx, user):
if ctx.author.name == ctx.channel.name or ctx.author.is_mod:
message = f'/timeout {user}'
else:
msg = f'Command can only be used by {ctx.channel.name} or moderators'
print(len(msg))
await ctx.send(msg)
@commands.command(name='origin')
async def origin(self, ctx, word):
offensive, word, entry = dictionary.check(word.upper(), self.config.channels[ctx.channel.name]['lexicon'])
if not offensive:
if entry:
roots = origins(word.lower(), recursive=True)
msg = '; '.join(root.pretty for root in roots) if roots else f'Origins not found for {word}'
else:
msg = f'{word.upper()}* not found'
print(len(msg))
await ctx.send(msg)
@commands.command(name='rhymeswith')
async def rhyme(self, ctx, word, page='1'):
result = dictionary.rhyme(word.upper(), self.config.channels[ctx.channel.name]['lexicon'])
num, msg = paginate(result, self.config.channels[ctx.channel.name]['lexicon'], int(page))
print(len(msg))
await ctx.send(f'{num} %s:\n{msg}' % engine.plural('result', num))
@commands.command(name='related')
async def related(self, ctx, word, page='1'):
result = dictionary.related(word.upper(), self.config.channels[ctx.channel.name]['lexicon'])
num, msg = paginate(result, self.config.channels[ctx.channel.name]['lexicon'], int(page))
print(len(msg))
await ctx.send(f'{num} %s:\n{msg}' % engine.plural('result', num))
@commands.command(name='beginswith')
async def beginswith(self, ctx, hook, page='1'):
result = dictionary.begins_with(hook.upper(), self.config.channels[ctx.channel.name]['lexicon'])
num, msg = paginate(result, self.config.channels[ctx.channel.name]['lexicon'], int(page))
print(len(msg))
await ctx.send(f'{num} %s:\n{msg}' % engine.plural('result', num))
@commands.command(name='startswith')
async def startswith(self, ctx, hook, page='1'):
result = dictionary.begins_with(hook.upper(), self.config.channels[ctx.channel.name]['lexicon'])
num, msg = paginate(result, self.config.channels[ctx.channel.name]['lexicon'], int(page))
print(len(msg))
await ctx.send(f'{num} %s:\n{msg}' % engine.plural('result', num))
@commands.command(name='endswith')
async def endswith(self, ctx, hook, page='1'):
result = dictionary.ends_with(hook.upper(), self.config.channels[ctx.channel.name]['lexicon'])
num, msg = paginate(result, self.config.channels[ctx.channel.name]['lexicon'], int(page))
print(len(msg))
await ctx.send(f'{num} %s:\n{msg}' % engine.plural('result', num))
@commands.command(name='finisheswith')
async def finisheswith(self, ctx, hook, page='1'):
result = dictionary.ends_with(hook.upper(), self.config.channels[ctx.channel.name]['lexicon'])
num, msg = paginate(result, self.config.channels[ctx.channel.name]['lexicon'], int(page))
print(len(msg))
await ctx.send(f'{num} %s:\n{msg}' % engine.plural('result', num))
@commands.command(name='contains')
async def contains(self, ctx, stem, page='1'):
result = dictionary.contains(stem.upper(), self.config.channels[ctx.channel.name]['lexicon'])
num, msg = paginate(result, self.config.channels[ctx.channel.name]['lexicon'], int(page))
print(len(msg))
await ctx.send(f'{num} %s:\n{msg}' % engine.plural('result', num))
@commands.command(name='pattern')
async def pattern(self, ctx, pattern, page='1'):
result = dictionary.pattern(pattern.upper(), self.config.channels[ctx.channel.name]['lexicon'])
num, msg = paginate(result, self.config.channels[ctx.channel.name]['lexicon'], int(page))
print(len(msg))
await ctx.send(f'{num} %s:\n{msg}' % engine.plural('result', num))
@commands.command(name='regex')
async def regex(self, ctx, pattern, page='1'):
result = dictionary.find(pattern.upper(), self.config.channels[ctx.channel.name]['lexicon'])
num, msg = paginate(result, self.config.channels[ctx.channel.name]['lexicon'], int(page))
print(len(msg))
await ctx.send(f'{num} %s:\n{msg}' % engine.plural('result', num))
@commands.command(name='hook')
async def hook(self, ctx, stem):
msg = dictionary.hook(stem.upper(), self.config.channels[ctx.channel.name]['lexicon'])
print(len(msg))
await ctx.send(msg)
@commands.command(name='unhook')
async def unhook(self, ctx, rack, page='1'):
result = dictionary.unhook(rack.upper(), self.config.channels[ctx.channel.name]['lexicon'])
num, msg = paginate(result, self.config.channels[ctx.channel.name]['lexicon'], int(page))
print(len(msg))
await ctx.send(msg)
@commands.command(name='info')
async def info(self, ctx, *stems):
if stems and len(stems) > 0:
lexicon = self.config.channels[ctx.channel.name]['lexicon']
alphabet = self.config.channels[ctx.channel.name]['alphabet']
results = []
for stem in stems:
if stem:
msg = dictionary.info(stem.upper(), lexicon, alphabet)
if len(stem) >= 2 and len(stem) <= 5:
result = equity(stem, lexicon)
if result[0] == '{':
msg += ' Equity: %s' % result
else:
msg += ' Equity: %0.3f' % result[1]
results.append(msg)
msg = truncate('; ', results)
print(len(msg))
await ctx.send(msg)
@commands.command(name='anagram')
async def anagram(self, ctx, *racks):
if racks and len(racks) > 0:
lexicon = self.config.channels[ctx.channel.name]['lexicon']
results = []
msg = None
length = -2
for rack in racks:
if anagrams := dictionary.anagram(rack.upper(), lexicon):
count = len(anagrams)
msg = f'{count} %s' % engine.plural('result', count)
for n, element in enumerate(anagrams):
word, entry = element
if length + len(msg) + len(word) > 465:
msg += f' Limited to first {n} results'
break
msg += ' %s%s' % dictionary.decorate(word, entry, lexicon, '')
else:
msg = 'No anagrams found'
length += len(msg) + 2
if length >= 500:
break
results.append(msg)
msg = truncate('; ', results)
print(len(msg))
await ctx.send(msg)
@commands.command(name='bingo')
async def bingo(self, ctx, length='7'):
msg = dictionary.random_word(int(length), self.config.channels[ctx.channel.name]['lexicon'])
print(len(msg))
await ctx.send(msg)
@commands.command(name='random')
async def random(self, ctx, option='0'):
lexicon = self.config.channels[ctx.channel.name]['lexicon']
if option.isnumeric():
msg = dictionary.random_word(int(option), lexicon)
else:
word, entry = rd.choice(dictionary.related(option.upper(), lexicon))
word, _, definition, mark = dictionary.define(word, entry, lexicon, '')
msg = '%s%s - %s' % (word, mark, definition)
print(len(msg))
await ctx.send(msg)
@commands.command(name='pronounce')
async def pronounce(self, ctx, stem):
offensive, | |
sheet.write(idx + 1, 4, nexttower['tower_name'])
sheet.write(idx + 1, 5, toweridx)
#if idx==0:
#sheet.write(idx + 1 , 0, str(uuid.UUID('0'*32)))
#sheet.write(idx + 1 , 1, u'变电站')
#sheet.write(idx + 1 , 2, 0)
#sheet.write(idx + 1 , 3, tower['id'])
#sheet.write(idx + 1 , 4, tower['tower_name'])
#sheet.write(idx + 1 , 5, toweridx)
#if idx+1 < len(towers):
#nexttower = towers[idx+1]
#sheet.write(idx + 1 + 1 , 0, tower['id'])
#sheet.write(idx + 1 + 1 , 1, tower['tower_name'])
#sheet.write(idx + 1 + 1 , 2, toweridx)
#m = re.search('(\d+)', nexttower['tower_name'])
#toweridx = ''
#if m:
#toweridx = m.group(0)
#sheet.write(idx + 1 + 1 , 3, nexttower['id'])
#sheet.write(idx + 1 + 1 , 4, nexttower['tower_name'])
#sheet.write(idx + 1 + 1 , 5, toweridx)
#else:
#sheet.write(idx + 1 + 1 , 0, tower['id'])
#sheet.write(idx + 1 + 1 , 1, tower['tower_name'])
#sheet.write(idx + 1 + 1 , 2, toweridx)
#sheet.write(idx + 1 + 1 , 3, str(uuid.UUID('1'*32)))
#sheet.write(idx + 1 + 1 , 4, u'变电站')
#sheet.write(idx + 1 + 1 , 5, 0)
p = os.path.join(XLS_REPORT_DIR, line['line_name'] + u'_待填.xls')
book.save(p)
def import_from_excel():
def dms2d(d, m, s):
ret = float(d) + float(m)/60.0 + float(s)/3600.0
return ret
ret = {}
#ret['line'] = []
#ret['towers'] = {}
filepaths = {u'永甘甲线':ur'D:\gis\南网\昭通\云南_昭通_500_永甘甲线.xls',
u'永甘乙线': ur'D:\gis\南网\昭通\云南_昭通_500_永甘乙线.xls',
u'镇永甲线': ur'D:\gis\南网\昭通\云南_昭通_500_镇永甲线.xls',
u'甘大线': ur'D:\gis\南网\昭通\云南_昭通_220_甘大线.xls',
u'甘镇线': ur'D:\gis\南网\昭通\云南_昭通_220_甘镇线.xls',
}
for key in filepaths.keys():
lines = odbc_get_records('TABLE_LINE', "line_name='%s'" % key)
#print('%s=%s' %(key,lines[0]['id']))
line_id = lines[0]['id']
book = xlrd.open_workbook(filepaths[key])
sheet_line = book.sheet_by_name('Sheet1')
ret[line_id] = []
print(key)
for i in range(sheet_line.nrows):
try:
ret[line_id].append( {'line_id':line_id,
'tower_name': key + str(int(sheet_line.cell_value(i+1,0))) + '#',
'geo_x':dms2d(sheet_line.cell_value(i+1,1), sheet_line.cell_value(i+1,2), sheet_line.cell_value(i+1,3) ),
'geo_y':dms2d(sheet_line.cell_value(i+1,4), sheet_line.cell_value(i+1,5), sheet_line.cell_value(i+1,6) ),
} )
except:
pass
conn = None
cur = None
try:
conn = pypyodbc.connect(ODBC_STRING)
cur = conn.cursor()
except:
print(sys.exc_info()[1])
return
for key in ret.keys():
cur.execute('''DELETE FROM TABLE_TOWER WHERE line_id='%s' ''' % key)
#cur.execute('''DELETE FROM TABLE_TOWER_RELATION WHERE line_id='%s' ''' % key)
cur.execute('''DELETE FROM TABLE_SEGMENT WHERE line_id='%s' ''' % key)
towerid, tower_startid, tower_endid = None, None, None
for tower in ret[key]:
idx = ret[key].index(tower)
tower_endid = towerid = str(uuid.uuid4()).upper()
if idx == len(ret[key]):
tower_startid = None
if tower_startid and tower_endid:
#cur.execute('''INSERT INTO TABLE_TOWER_RELATION VALUES(?, ?, ?, ?)''',(str(uuid.uuid4()).upper(), key, tower_startid, tower_endid))
cur.execute('''INSERT INTO TABLE_SEGMENT VALUES(?, ?, ?, ?, ?, ?, ?, ?, NULL, NULL, NULL)''',(str(uuid.uuid4()).upper(), key, tower_startid, tower_endid, 0,0,0,0))
cur.execute('''INSERT INTO TABLE_TOWER VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)''',(towerid, key, None, tower['tower_name'], str(uuid.UUID('0'*32)), tower['geo_x'],tower['geo_y'], 0, 0, None, 0, 0, 0, 0, 0))
tower_startid = tower_endid
cur.commit()
cur.close()
conn.close()
return ret
def import_from_excel1():
ret = {}
filepath = ur'D:\gis\南网\昭通\昭通_220kv_永发I,II回线.xls'
book = xlrd.open_workbook(filepath)
for key in [u'永发I回线',u'永发II回线']:
lines = odbc_get_records('TABLE_LINE', "line_name='%s'" % key)
line_id = lines[0]['id']
sheet_line = book.sheet_by_name(key)
ret[line_id] = []
#print(key)
for i in range(sheet_line.nrows):
try:
if (i+1)%2>0:
ret[line_id].append( {'line_id':line_id,
'tower_name': key + unicode(int(sheet_line.cell_value(i+1,0))) + '#',
'model_code': sheet_line.cell_value(i+1,1) ,
'denomi_height': float(sheet_line.cell_value(i+1,2)),
'horizontal_span': int(sheet_line.cell_value(i+1,3)),
'vertical_span': int(sheet_line.cell_value(i+1,4)),
} )
print(key + unicode(int(sheet_line.cell_value(i+1,0))) + '#')
except:
pass
#print('%s=%d' % (key, len(ret[line_id])))
conn = None
cur = None
try:
conn = pypyodbc.connect(ODBC_STRING)
cur = conn.cursor()
except:
print(sys.exc_info()[1])
return
for key in ret.keys():
cur.execute('''DELETE FROM TABLE_TOWER WHERE line_id='%s' ''' % key)
#cur.execute('''DELETE FROM TABLE_TOWER_RELATION WHERE line_id='%s' ''' % key)
cur.execute('''DELETE FROM TABLE_SEGMENT WHERE line_id='%s' ''' % key)
towerid, tower_startid, tower_endid = None, None, None
for tower in ret[key]:
idx = ret[key].index(tower)
tower_endid = towerid = str(uuid.uuid4()).upper()
if idx == len(ret[key]):
tower_startid = None
if tower_startid and tower_endid:
#cur.execute('''INSERT INTO TABLE_TOWER_RELATION VALUES(?, ?, ?, ?)''',(str(uuid.uuid4()).upper(), key, tower_startid, tower_endid))
cur.execute('''INSERT INTO TABLE_SEGMENT VALUES(?, ?, ?, ?, ?, ?, ?, ?, NULL, NULL, NULL)''',(str(uuid.uuid4()).upper(), key, tower_startid, tower_endid, 0,0,0,0))
cur.execute('''INSERT INTO TABLE_TOWER VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)''',(towerid, key, None, tower['tower_name'], str(uuid.UUID('0'*32)), None, None, None, 0, tower['model_code'], tower['denomi_height'], tower['horizontal_span'], tower['vertical_span'], 0, 0))
tower_startid = tower_endid
cur.commit()
cur.close()
conn.close()
return ret
def excel_to_database(line_name):
lines = odbc_get_records('TABLE_LINE',"line_name='%s'" % line_name)
line_id = lines[0]['id']
DIR_EXCLE = ur'G:\work\csharp\kmgdgis\doc'
filepath = os.path.join(DIR_EXCLE , line_name + u'_已填.xls')
book = xlrd.open_workbook(filepath)
sqls = []
#线路信息
sheet_line = book.sheet_by_name(line_name + u'_线路信息')
if sheet_line.nrows>1:
length, manage_length = 0., 0.
line_code = sheet_line.cell_value(1, 1)
if isinstance(sheet_line.cell_value(1, 2), float):
length = float(sheet_line.cell_value(1, 2))
if isinstance(sheet_line.cell_value(1, 3), float):
manage_length = float(sheet_line.cell_value(1, 3))
start_point = sheet_line.cell_value(1, 4)
end_point = sheet_line.cell_value(1, 5)
status = sheet_line.cell_value(1, 6)
maintenace = sheet_line.cell_value(1, 7)
management = sheet_line.cell_value(1, 8)
owner = sheet_line.cell_value(1, 9)
team = sheet_line.cell_value(1, 10)
responsible = sheet_line.cell_value(1, 11)
investor = sheet_line.cell_value(1, 12)
designer = sheet_line.cell_value(1, 13)
supervisor = sheet_line.cell_value(1, 14)
constructor = sheet_line.cell_value(1, 15)
operator = sheet_line.cell_value(1, 16)
finish_date = sheet_line.cell_value(1, 17)
production_date = sheet_line.cell_value(1, 18)
decease_date = sheet_line.cell_value(1, 19)
if isinstance(finish_date, str):
finish_date = '1900-01-01'
if isinstance(production_date, str):
production_date = '1900-01-01'
if isinstance(decease_date, str):
decease_date = '1900-01-01'
dtfmt = '%Y-%m-%d'
if isinstance(finish_date, float):
tu = xlrd.xldate_as_tuple(finish_date, book.datemode)
dt = datetime.datetime(*tu)
finish_date = dt.strftime(dtfmt)
if isinstance(production_date, float):
tu = xlrd.xldate_as_tuple(production_date, book.datemode)
dt = datetime.datetime(*tu)
production_date = dt.strftime(dtfmt)
if isinstance(decease_date, float):
tu = xlrd.xldate_as_tuple(decease_date, book.datemode)
dt = datetime.datetime(*tu)
decease_date = dt.strftime(dtfmt)
sql = '''UPDATE TABLE_LINE SET
line_code='%s',
length=%f,
manage_length=%f,
start_point='%s',
end_point='%s',
status='%s',
maintenace='%s',
management='%s',
owner='%s',
team='%s',
responsible='%s',
investor='%s',
designer='%s',
supervisor='%s',
constructor='%s',
operator='%s',
finish_date=NULL,
production_date=NULL,
decease_date=NULL
WHERE line_name='%s' ''' % (
line_code,
length,
manage_length,
start_point,
end_point,
status,
maintenace,
management,
owner,
team,
responsible,
investor,
designer,
supervisor,
constructor,
operator,
#finish_date,
#production_date,
#decease_date,
line_name
)
sqls.append(sql)
#杆塔信息
sheet_tower = book.sheet_by_name(line_name + u'_杆塔信息')
for i in range(1, sheet_tower.nrows):
tower_id = sheet_tower.cell_value(i, 0)
tower_name = sheet_tower.cell_value(i, 1)
if len(tower_id)>0:
tower_code = sheet_tower.cell_value(i, 3)
model_code = sheet_tower.cell_value(i, 4)
denomi_height = 0.
if isinstance(sheet_tower.cell_value(i, 5), float):
denomi_height = sheet_tower.cell_value(i, 5)
horizontal_span = 0
if isinstance(sheet_tower.cell_value(i, 6), float):
horizontal_span = int(sheet_tower.cell_value(i, 6))
vertical_span = 0
if isinstance(sheet_tower.cell_value(i, 7), float):
vertical_span = int(sheet_tower.cell_value(i, 7))
building_level = 0.
if isinstance(sheet_tower.cell_value(i, 8), float):
building_level = sheet_tower.cell_value(i, 8)
line_rotate = 0.
if isinstance(sheet_tower.cell_value(i, 9), float):
line_rotate = sheet_tower.cell_value(i, 9)
sql = '''
UPDATE TABLE_TOWER SET
tower_code = '%s',
model_code = '%s',
denomi_height = %f,
horizontal_span = %d,
vertical_span = %d,
building_level = %f,
line_rotate = %f
WHERE id='%s'
''' % (
tower_code,
model_code,
denomi_height,
horizontal_span,
vertical_span,
building_level,
line_rotate,
tower_id
)
sqls.append(sql)
#杆塔附件信息
sheet_tower_attach = book.sheet_by_name(line_name + u'_杆塔附件信息')
for i in range(1, sheet_tower_attach.nrows):
tower_id = sheet_tower_attach.cell_value(i, 0)
if len(tower_id)>0:
sql = '''DELETE FROM TABLE_TOWER_METALS WHERE tower_id='%s' ''' % tower_id
sqls.append(sql)
for j in range(3, sheet_tower_attach.ncols):
if j in range(3,15,3):
if len(sheet_tower_attach.cell_value(i, j))>0:
if isinstance(sheet_tower_attach.cell_value(i, j+1), float) and isinstance(sheet_tower_attach.cell_value(i, j+2), float):
sql = '''INSERT INTO TABLE_TOWER_METALS VALUES( '%s', '%s', '%s', '%s', '%s', %d, %d, NULL)
''' % (
str(uuid.uuid4()).upper(),
tower_id,
u'绝缘子串',
u'导线绝缘子串',
sheet_tower_attach.cell_value(i, j),
int(sheet_tower_attach.cell_value(i, j+1)),
int(sheet_tower_attach.cell_value(i, j+2))
)
sqls.append(sql)
if j in range(15,27,3):
if len(sheet_tower_attach.cell_value(i, j))>0:
if isinstance(sheet_tower_attach.cell_value(i, j+1), float) and isinstance(sheet_tower_attach.cell_value(i, j+2), float):
sql = '''
INSERT INTO TABLE_TOWER_METALS VALUES( '%s', '%s', '%s', '%s', '%s', %d, %d, NULL)
''' % (
str(uuid.uuid4()).upper(),
tower_id,
u'绝缘子串',
u'跳线绝缘子串',
sheet_tower_attach.cell_value(i, j),
int(sheet_tower_attach.cell_value(i, j+1)),
int(sheet_tower_attach.cell_value(i, j+2)),
)
sqls.append(sql)
if j in range(27, 30, 3):
if len(sheet_tower_attach.cell_value(i, j))>0:
if isinstance(sheet_tower_attach.cell_value(i, j+1), float) and isinstance(sheet_tower_attach.cell_value(i, j+2), float):
sql = '''
INSERT INTO TABLE_TOWER_METALS VALUES( '%s', '%s', '%s', '%s', '%s', %d, NULL, %f)
''' % (
str(uuid.uuid4()).upper(),
tower_id,
u'防震锤',
u'导线小号侧',
sheet_tower_attach.cell_value(i, j),
int(sheet_tower_attach.cell_value(i, j+1)),
sheet_tower_attach.cell_value(i, j+2)
)
sqls.append(sql)
if j in range(30, 33, 3):
if len(sheet_tower_attach.cell_value(i, j))>0:
if isinstance(sheet_tower_attach.cell_value(i, j+1), float) and isinstance(sheet_tower_attach.cell_value(i, j+2), float):
sql = '''
INSERT INTO TABLE_TOWER_METALS VALUES( '%s', '%s', '%s', '%s', '%s', %d, NULL, %f)
''' % (
str(uuid.uuid4()).upper(),
tower_id,
u'防震锤',
u'导线大号侧',
sheet_tower_attach.cell_value(i, j),
int(sheet_tower_attach.cell_value(i, j+1)),
sheet_tower_attach.cell_value(i, j+2)
)
sqls.append(sql)
if j in range(33, 36, 3):
if len(sheet_tower_attach.cell_value(i, j))>0:
if isinstance(sheet_tower_attach.cell_value(i, j+1), float) and isinstance(sheet_tower_attach.cell_value(i, j+2), float):
sql = '''
INSERT INTO TABLE_TOWER_METALS VALUES( '%s', '%s', '%s', '%s', '%s', %d, NULL, %f)
''' % (
str(uuid.uuid4()).upper(),
tower_id,
u'防震锤',
| |
#!/usr/bin/env python
"""
Test cases for tournament.py
These tests are not exhaustive, but they should cover the majority of the cases.
"""
import unittest
from tournament import *
class TestTournament(unittest.TestCase):
def setUp(self):
delete_matches()
delete_players()
def tearDown(self):
delete_matches()
delete_players()
def test_registration_and_count(self):
"""
Test for initial player count,
player count after 1 and 2 players registered,
player count after players deleted.
"""
c = count_players()
self.assertTrue(c == 0, 'count_players should return numeric zero, but returned {}'.format(c))
register_player("<NAME>")
c = count_players()
self.assertTrue(c == 1, 'count_players should return 1, but returned {}'.format(c))
register_player("<NAME>")
c = count_players()
self.assertTrue(c == 2, 'count_players should return 2, but returned {}'.format(c))
delete_players()
c = count_players()
self.assertTrue(c == 0, 'count_players should return numeric zero, but returned {}'.format(c))
def test_multi_tournament_registration_count_delete(self):
"""
Tests counting registered players across multiple tournaments
player count after 2 players for one tournament, 2 players for another tournament and one player for default tournament
player count after a tournament is deleted
player count after the other tournament is deleted
"""
# TODO: Register current players for additional tournaments
register_player('awesome person', tournament='t1')
register_player('terminator', tournament='t1')
register_player('<NAME>')
register_player('lonely', tournament='t2')
register_player('not lonely', tournament='t2')
c = count_registered_players()
c2 = count_players()
self.assertFalse(c != 5, 'count_registered_players should return 5 from across 3 tournaments')
self.assertFalse(c2 != c, 'count_players should return 5 got instead {}'.format(c2))
delete_tournament(tournament='t1')
c = count_registered_players()
c2 = count_players()
self.assertFalse(c != 3, 'count_registered_players should return 3 from across 2 tournaments')
self.assertFalse(c2 != 5, 'count_players should return 5 got instead {}'.format(c2))
delete_tournament(tournament='t2')
c = count_registered_players()
c2 = count_players()
self.assertFalse(c != 1, 'count_registered_players should return 1 from across default tournament')
self.assertFalse(c2 != 5, 'count_players should return 5 got instead {}'.format(c2))
p_id = get_player('terminator')
register_player_to_tournament(p_id)
p_id = get_player('not lonely')
register_player_to_tournament(p_id)
c = count_registered_players()
self.assertFalse(c != 3,
'Two existing players should be registered to the default tournament, got: {}'.format(c))
c_def = count_registered_players('default')
self.assertFalse(c != c_def, 'Tournament default should be the only tournament with registered players')
register_player('<NAME>', '<NAME>')
c_new = count_registered_players('<NAME>')
self.assertFalse(c_new > c,
'"Crazy Wasteland" should have 1 registered player has {} "default" should have 3 registered players has {}'.format(
c_new, c_def))
def test_get_tournament_and_player_id(self):
"""
Tests registration and retrieval of tournament id's and player id's
"""
delete_tournament()
t_id = get_tournament('default')
self.assertFalse(t_id, 'No tournament should be returned after deletions.')
register_tournament('test1')
t_id = get_tournament('test1')
self.assertTrue(t_id, 'Tournament was not found')
register_tournament('test2')
t_id_test1 = get_tournament('test1')
t_id_test2 = get_tournament('test2')
self.assertFalse(t_id_test1 == t_id_test2,
"get_tournament is supposed to return two unique id's for 2 tournaments")
self.assertFalse(t_id_test1 > t_id_test2,
'Tournament test1 should return a serial smaller than tournament test2.'
'Instead returned test1 : {} test2 : {}'.format(t_id_test1, t_id_test2))
register_tournament('test3')
t_id_test3 = get_tournament('test3')
t_id_test2 = get_tournament('test2')
t_id_test1 = get_tournament('test1')
self.assertFalse(t_id_test3 == t_id_test2 or t_id_test2 == t_id_test1 or t_id_test1 == t_id_test3,
"get_tournament is supposed to return three unique id's for 3 tournaments")
self.assertFalse(t_id_test3 < t_id_test2 < t_id_test1,
'Tournament test1 should return a serial smaller than tournament test2.'
'Instead returned test1 : {} test2 : {}'.format(t_id_test1, t_id_test2, t_id_test3))
delete_tournament('test3')
t_id_test = get_tournament('test3')
self.assertFalse(t_id_test,
'Tournament is supposed to be deleted instead "test3" returned {}'.format(t_id_test3))
delete_tournament()
t_id_test2 = get_tournament('test2')
t_id_test1 = get_tournament('test1')
self.assertFalse(t_id_test2 or t_id_test1, 'Tournaments are not correctly deleted')
register_tournament('test2')
t_id_test4 = get_tournament('test2')
self.assertFalse(t_id_test4 < t_id_test3, 'New tournament "test2" should return a serial larger than'
'former "test3" id.')
p_id = get_player('Not Here')
self.assertFalse(p_id, 'Got an id for a player that does not exist.')
registered_id = register_player('I M Here')
p_id = get_player('I M Here')
self.assertFalse(registered_id != p_id,
'Player id does not match ID returned({}) when player was registered({})'.format(p_id,
registered_id))
def test_standings_before_matches(self):
"""
Test to ensure players are properly represented in standings prior
to any matches being reported.
"""
register_player("<NAME>")
register_player("<NAME>")
standings = player_standings()
# TODO: Add testing for a 5-tuple return
self.assertFalse(len(standings) < 2, "Players should appear in player_standings even before "
"they have played any matches.")
self.assertFalse(len(standings) > 2, "Only registered players should appear in standings.")
self.assertFalse(len(standings[0]) != 4, "Each player_standings row should have four columns.")
[(id1, name1, wins1, matches1), (id2, name2, wins2, matches2)] = standings
self.assertFalse(set([name1, name2]) != set(["<NAME>", "<NAME>"]),
"Registered players' names should appear in standings, "
"even if they have no matches played.")
def test_multi_standings_before_match(self):
"""
Similar test to test_standings_before_matches but with multiple tournaments
"""
register_player('<NAME>', 'wrestling')
register_player('<NAME>', 'wrestling')
register_player('<NAME>', 'CE-THROWDOWN')
register_player('<NAME>', 'CE-THROWDOWN')
standings1 = player_standings('wrestling')
self.assertFalse(len(standings1) < 2, "Two Players should appear in player_standings even before "
"they have played any matches.")
standings2 = player_standings('CE-THROWDOWN')
self.assertFalse(len(standings2) < 2, "Two Players should appear in player_standings even before "
"they have played any matches.")
self.assertFalse(len(standings1[0]) != 4, "Each player_standings row should have four columns.")
self.assertFalse(len(standings2[0]) != 4, "Each player_standings row should have four columns.")
[(id1, name1, wins1, matches1), (id2, name2, wins2, matches2)] = standings1
self.assertFalse(set([name1, name2]) != set(['<NAME>', '<NAME>']),
"Registered players' names should appear in standings, "
"even if they have no matches played.")
[(id1, name1, wins1, matches1), (id2, name2, wins2, matches2)] = standings2
self.assertFalse(set([name1, name2]) != set(['<NAME>', '<NAME>']),
"Registered players' names should appear in standings, "
"even if they have no matches played.")
def test_report_matches(self):
"""
Test that matches are reported properly.
Test to confirm matches are deleted properly.
"""
register_player("<NAME>")
register_player("<NAME>")
register_player("<NAME>")
register_player("<NAME>")
standings = player_standings()
[id1, id2, id3, id4] = [row[0] for row in standings]
report_match({id1: True, id2: False})
report_match({id3: True, id4: False})
standings = player_standings()
for (i, n, w, m) in standings:
self.assertFalse(m != 1, "Each player should have one match recorded.")
if i in (id1, id3):
self.assertFalse(w != 1, "Each match winner should have one win recorded.")
elif i in (id2, id4):
self.assertFalse(w != 0, "Each match loser should have zero wins recorded.")
delete_matches()
standings = player_standings()
self.assertFalse(len(standings) != 4, "Match deletion should not change number of players in standings.")
for (i, n, w, m) in standings:
if m != 0:
self.assertFalse(m != 0, "After deleting matches, players should have zero matches recorded.")
self.assertFalse(w != 0, "After deleting matches, players should have zero wins recorded.")
def test_swiss_pairing(self):
"""
Test that pairings are generated properly both before and after match reporting.
"""
register_player("<NAME>")
register_player("Fluttershy")
register_player("Applejack")
register_player("<NAME>")
register_player("Rarity")
register_player("<NAME>")
register_player("<NAME>")
register_player("<NAME>")
standings = player_standings()
[id1, id2, id3, id4, id5, id6, id7, id8] = [row[0] for row in standings]
pairings = swiss_pairings()
self.assertFalse(len(pairings) != 4,
"P1 Test: For eight players, swiss_pairings should return 4 pairs. Got {pairs}".format(
pairs=len(pairings)))
report_match({id1: True, id2: False})
report_match({id3: True, id4: False})
report_match({id5: True, id6: False})
report_match({id7: True, id8: False})
pairings = swiss_pairings()
[(pid1, pname1, pid2, pname2), (pid3, pname3, pid4, pname4), (pid5, pname5, pid6, pname6),
(pid7, pname7, pid8, pname8)] = pairings
possible_pairs = set([frozenset([id1, id3]), frozenset([id1, id5]),
frozenset([id1, id7]), frozenset([id3, id5]),
frozenset([id3, id7]), frozenset([id5, id7]),
frozenset([id2, id4]), frozenset([id2, id6]),
frozenset([id2, id8]), frozenset([id4, id6]),
frozenset([id4, id8]), frozenset([id6, id8])
])
actual_pairs = set(
[frozenset([pid1, pid2]), frozenset([pid3, pid4]), frozenset([pid5, pid6]), frozenset([pid7, pid8])])
for pair in actual_pairs:
if pair not in possible_pairs:
self.fail('Pair: {} not a possible pair.'.format(str(pair)))
def test_multi_swiss_pairing(self):
"""
Tests that matches are reported properly before and after reporting for multiple tournaments
Tests that swiss pairing is properly conducted for multiple tournaments in basic scenarios (no player cross over)
"""
register_player("<NAME>", "Risk")
register_player("<NAME>", "Risk")
register_player("<NAME>", "Risk")
register_player("<NAME>", "Risk")
register_player("<NAME>", "Risk")
register_player("<NAME>", "Risk")
register_player("<NAME>", "Risk")
register_player("<NAME>", "Risk")
register_player("Dollar", "Money")
register_player("Yen", "Money")
register_player("Euro", "Money")
register_player("Yuan", "Money")
standings1 = player_standings("Risk")
standings2 = player_standings("Money")
for standing1 in standings1:
for standing2 in standings2:
self.assertFalse(standings1[0] == standings2[0],
'Expected no player crossover. "Risk" tournament, player:{} "Money" tournament, player:{}'.format(
standing1, standing2))
[id1, id2, id3, id4, id5, id6, id7, id8] = [row[0] for row in standings1]
report_match({id1: True, id2: False}, "Risk")
report_match({id3: True, id4: False}, "Risk")
report_match({id5: True, id6: False}, "Risk")
report_match({id7: True, id8: False}, "Risk")
pairings = swiss_pairings("Risk")
[(pid1, pname1, pid2, pname2), (pid3, pname3, pid4, pname4), (pid5, pname5, pid6, pname6),
(pid7, pname7, pid8, pname8)] = pairings
possible_pairs = set([frozenset([id1, id3]), frozenset([id1, id5]),
frozenset([id1, id7]), frozenset([id3, id5]),
frozenset([id3, id7]), frozenset([id5, id7]),
frozenset([id2, id4]), frozenset([id2, id6]),
frozenset([id2, id8]), frozenset([id4, id6]),
frozenset([id4, id8]), frozenset([id6, id8])
])
actual_pairs = set(
[frozenset([pid1, pid2]), frozenset([pid3, pid4]), frozenset([pid5, pid6]), frozenset([pid7, pid8])])
for pair in actual_pairs:
if pair not in possible_pairs:
self.fail('Pair: {} not a | |
from django.core.exceptions import PermissionDenied
from django.views.decorators.http import require_GET
from django.core.mail import EmailMultiAlternatives
from django.template.loader import render_to_string
from django.urls import reverse
from django.conf import settings
from programmes.models import (
Programme,
Lot,
LogementEDD,
ReferenceCadastrale,
)
from programmes.forms import (
ProgrammeSelectionForm,
ProgrammeForm,
ProgrammeCadastralForm,
ProgrammeEDDForm,
LogementEDDFormSet,
ReferenceCadastraleFormSet,
)
from conventions.models import Convention
from conventions.forms import (
UploadForm,
)
from . import utils
from . import upload_objects
def select_programme_create(request):
if request.method == "POST":
form = ProgrammeSelectionForm(request.POST)
if form.is_valid():
existing_programme = form.cleaned_data["existing_programme"]
if existing_programme == "selection":
lot = Lot.objects.get(uuid=form.cleaned_data["lot_uuid"])
request.user.check_perm("convention.add_convention", lot)
else:
request.user.check_perm("convention.add_convention")
programme = Programme.objects.create(
nom=form.cleaned_data["nom"],
code_postal=form.cleaned_data["code_postal"],
ville=form.cleaned_data["ville"],
bailleur_id=form.cleaned_data["bailleur"],
)
programme.save()
lot = Lot.objects.create(
nb_logements=form.cleaned_data["nb_logements"],
financement=form.cleaned_data["financement"],
type_habitat=form.cleaned_data["type_habitat"],
programme=programme,
bailleur_id=form.cleaned_data["bailleur"],
)
lot.save()
convention = Convention.objects.create(
lot=lot,
programme_id=lot.programme_id,
bailleur_id=lot.bailleur_id,
financement=lot.financement,
)
if existing_programme != "selection":
_send_email_staff(request, convention)
convention.save()
# All is OK -> Next:
return {
"success": utils.ReturnStatus.SUCCESS,
"convention": convention,
}
# If this is a GET (or any other method) create the default form.
else:
form = ProgrammeSelectionForm(
initial={
"existing_programme": "selection",
}
)
programmes = _conventions_selection(request)
return {
"success": utils.ReturnStatus.ERROR,
"programmes": programmes,
"form": form,
"editable": request.user.has_perm("convention.add_convention"),
"bailleurs": request.user.bailleurs(),
} # render(request, "conventions/selection.html", {'form': form, 'programmes': programmes})
def _send_email_staff(request, convention):
# envoi d'un mail au staff APiLos lors de la création from scratch
convention_url = request.build_absolute_uri(
reverse("conventions:recapitulatif", args=[convention.uuid])
)
from_email = "<EMAIL>"
text_content = render_to_string(
"emails/alert_create_convention.txt",
{
"convention_url": convention_url,
"convention": convention,
"programme": convention.programme,
"user": request.user,
},
)
html_content = render_to_string(
"emails/alert_create_convention.html",
{
"convention_url": convention_url,
"convention": convention,
"programme": convention.programme,
"user": request.user,
},
)
msg = EmailMultiAlternatives(
f"[{settings.ENVIRONMENT.upper()}] Nouvelle convention créée de zéro ({convention})",
text_content,
from_email,
("<EMAIL>",),
)
msg.attach_alternative(html_content, "text/html")
msg.send()
def programme_update(request, convention_uuid):
convention = (
Convention.objects.prefetch_related("programme")
.prefetch_related("lot")
.get(uuid=convention_uuid)
)
programme = convention.programme
lot = convention.lot
if request.method == "POST":
request.user.check_perm("convention.change_convention", convention)
if request.POST.get("redirect_to_recap", False):
return _programme_atomic_update(request, convention, programme, lot)
form = ProgrammeForm(request.POST)
if form.is_valid():
_save_programme_and_lot(programme, lot, form)
return utils.base_response_success(convention)
# If this is a GET (or any other method) create the default form.
else:
request.user.check_perm("convention.view_convention", convention)
form = ProgrammeForm(
initial={
"uuid": programme.uuid,
"nom": programme.nom,
"adresse": programme.adresse,
"code_postal": programme.code_postal,
"ville": programme.ville,
"nb_logements": lot.nb_logements,
"type_habitat": lot.type_habitat,
"type_operation": programme.type_operation,
"anru": programme.anru,
"autres_locaux_hors_convention": programme.autres_locaux_hors_convention,
"nb_locaux_commerciaux": programme.nb_locaux_commerciaux,
"nb_bureaux": programme.nb_bureaux,
}
)
return {
**utils.base_convention_response_error(request, convention),
"form": form,
}
def _programme_atomic_update(request, convention, programme, lot):
form = ProgrammeForm(
{
"uuid": programme.uuid,
"nb_logements": request.POST.get("nb_logements", lot.nb_logements),
"type_habitat": request.POST.get("type_habitat", lot.type_habitat),
**utils.build_partial_form(
request,
programme,
[
"nom",
"adresse",
"code_postal",
"ville",
"type_operation",
"anru",
"autres_locaux_hors_convention",
"nb_locaux_commerciaux",
"nb_bureaux",
],
),
}
)
if form.is_valid():
_save_programme_and_lot(programme, lot, form)
return utils.base_response_redirect_recap_success(convention)
return {
**utils.base_convention_response_error(request, convention),
"form": form,
}
def _save_programme_and_lot(programme, lot, form):
programme.nom = form.cleaned_data["nom"]
programme.adresse = form.cleaned_data["adresse"]
programme.code_postal = form.cleaned_data["code_postal"]
programme.ville = form.cleaned_data["ville"]
programme.type_operation = form.cleaned_data["type_operation"]
programme.anru = form.cleaned_data["anru"]
programme.autres_locaux_hors_convention = form.cleaned_data[
"autres_locaux_hors_convention"
]
programme.nb_locaux_commerciaux = form.cleaned_data["nb_locaux_commerciaux"]
programme.nb_bureaux = form.cleaned_data["nb_bureaux"]
programme.save()
lot.nb_logements = form.cleaned_data["nb_logements"]
lot.type_habitat = form.cleaned_data["type_habitat"]
lot.save()
def programme_cadastral_update(request, convention_uuid):
# pylint: disable=R0915
convention = (
Convention.objects.prefetch_related("programme")
.prefetch_related("programme__referencecadastrale_set")
.get(uuid=convention_uuid)
)
programme = convention.programme
import_warnings = None
editable_upload = request.POST.get("editable_upload", False)
if request.method == "POST":
request.user.check_perm("convention.change_convention", convention)
# When the user cliked on "Téléverser" button
if request.POST.get("Upload", False):
form = ProgrammeCadastralForm(request.POST)
formset, upform, import_warnings, editable_upload = _upload_cadastre(
request, convention, import_warnings, editable_upload
)
# When the user cliked on "Enregistrer et Suivant"
else:
result = _programme_cadastrale_atomic_update(request, convention, programme)
if result["success"] == utils.ReturnStatus.SUCCESS and request.POST.get(
"redirect_to_recap", False
):
result["redirect"] = "recapitulatif"
return {
**result,
"editable_upload": request.user.full_editable_convention(convention)
or editable_upload,
}
# When display the file for the first time
else:
request.user.check_perm("convention.view_convention", convention)
initial = []
referencecadastrales = programme.referencecadastrale_set.all().order_by(
"section"
)
for referencecadastrale in referencecadastrales:
initial.append(
{
"uuid": referencecadastrale.uuid,
"section": referencecadastrale.section,
"numero": referencecadastrale.numero,
"lieudit": referencecadastrale.lieudit,
"surface": referencecadastrale.surface,
}
)
formset = ReferenceCadastraleFormSet(initial=initial)
upform = UploadForm()
form = ProgrammeCadastralForm(
initial={
"uuid": programme.uuid,
"permis_construire": programme.permis_construire,
"date_acte_notarie": utils.format_date_for_form(
programme.date_acte_notarie
),
"date_achevement_previsible": utils.format_date_for_form(
programme.date_achevement_previsible
),
"date_achat": utils.format_date_for_form(programme.date_achat),
"date_achevement": utils.format_date_for_form(
programme.date_achevement
),
**utils.get_text_and_files_from_field("vendeur", programme.vendeur),
**utils.get_text_and_files_from_field("acquereur", programme.acquereur),
**utils.get_text_and_files_from_field(
"reference_notaire", programme.reference_notaire
),
**utils.get_text_and_files_from_field(
"reference_publication_acte", programme.reference_publication_acte
),
**utils.get_text_and_files_from_field(
"effet_relatif", programme.effet_relatif
),
**utils.get_text_and_files_from_field(
"acte_de_propriete", programme.acte_de_propriete
),
**utils.get_text_and_files_from_field(
"certificat_adressage", programme.certificat_adressage
),
**utils.get_text_and_files_from_field(
"reference_cadastrale", programme.reference_cadastrale
),
}
)
return {
**utils.base_convention_response_error(request, convention),
"form": form,
"formset": formset,
"upform": upform,
"import_warnings": import_warnings,
"editable_upload": request.user.full_editable_convention(convention)
or editable_upload,
}
def _upload_cadastre(request, convention, import_warnings, editable_upload):
formset = ReferenceCadastraleFormSet(request.POST)
upform = UploadForm(request.POST, request.FILES)
if upform.is_valid():
result = upload_objects.handle_uploaded_xlsx(
upform,
request.FILES["file"],
ReferenceCadastrale,
convention,
"cadastre.xlsx",
)
if result["success"] != utils.ReturnStatus.ERROR:
refcads_by_section = {}
for refcad in ReferenceCadastrale.objects.filter(
programme_id=convention.programme_id
):
refcads_by_section[f"{refcad.section}__{refcad.numero}"] = refcad.uuid
for obj in result["objects"]:
if (
"section" in obj
and "numero" in obj
and f"{obj['section']}__{obj['numero']}" in refcads_by_section
):
obj["uuid"] = refcads_by_section[
f"{obj['section']}__{obj['numero']}"
]
formset = ReferenceCadastraleFormSet(initial=result["objects"])
import_warnings = result["import_warnings"]
editable_upload = True
return formset, upform, import_warnings, editable_upload
def _save_programme_cadastrale(form, programme):
programme.permis_construire = form.cleaned_data["permis_construire"]
programme.date_acte_notarie = form.cleaned_data["date_acte_notarie"]
programme.date_achevement_previsible = form.cleaned_data[
"date_achevement_previsible"
]
programme.date_achat = form.cleaned_data["date_achat"]
programme.date_achevement = form.cleaned_data["date_achevement"]
programme.vendeur = utils.set_files_and_text_field(
form.cleaned_data["vendeur_files"],
form.cleaned_data["vendeur"],
)
programme.acquereur = utils.set_files_and_text_field(
form.cleaned_data["acquereur_files"],
form.cleaned_data["acquereur"],
)
programme.reference_notaire = utils.set_files_and_text_field(
form.cleaned_data["reference_notaire_files"],
form.cleaned_data["reference_notaire"],
)
programme.reference_publication_acte = utils.set_files_and_text_field(
form.cleaned_data["reference_publication_acte_files"],
form.cleaned_data["reference_publication_acte"],
)
programme.effet_relatif = utils.set_files_and_text_field(
form.cleaned_data["effet_relatif_files"],
)
programme.acte_de_propriete = utils.set_files_and_text_field(
form.cleaned_data["acte_de_propriete_files"],
)
programme.certificat_adressage = utils.set_files_and_text_field(
form.cleaned_data["certificat_adressage_files"],
)
programme.reference_cadastrale = utils.set_files_and_text_field(
form.cleaned_data["reference_cadastrale_files"],
)
programme.save()
def _save_programme_reference_cadastrale(formset, convention, programme):
obj_uuids1 = list(map(lambda x: x.cleaned_data["uuid"], formset))
obj_uuids = list(filter(None, obj_uuids1))
programme.referencecadastrale_set.exclude(uuid__in=obj_uuids).delete()
for form in formset:
if form.cleaned_data["uuid"]:
reference_cadastrale = ReferenceCadastrale.objects.get(
uuid=form.cleaned_data["uuid"]
)
reference_cadastrale.section = form.cleaned_data["section"]
reference_cadastrale.numero = form.cleaned_data["numero"]
reference_cadastrale.lieudit = form.cleaned_data["lieudit"]
reference_cadastrale.surface = form.cleaned_data["surface"]
else:
reference_cadastrale = ReferenceCadastrale.objects.create(
programme=programme,
bailleur=convention.bailleur,
section=form.cleaned_data["section"],
numero=form.cleaned_data["numero"],
lieudit=form.cleaned_data["lieudit"],
surface=form.cleaned_data["surface"],
)
reference_cadastrale.save()
def _programme_cadastrale_atomic_update(request, convention, programme):
form = ProgrammeCadastralForm(
{
"uuid": programme.uuid,
**utils.build_partial_form(
request,
programme,
[
"permis_construire",
"date_acte_notarie",
"date_achevement_previsible",
"date_achat",
"date_achevement",
],
),
**utils.build_partial_text_and_files_form(
request,
programme,
[
"vendeur",
"acquereur",
"reference_notaire",
"reference_publication_acte",
"acte_de_propriete",
"effet_relatif",
"certificat_adressage",
"reference_cadastrale",
],
),
}
)
form_is_valid = form.is_valid()
formset = ReferenceCadastraleFormSet(request.POST)
initformset = {
"form-TOTAL_FORMS": request.POST.get("form-TOTAL_FORMS", len(formset)),
"form-INITIAL_FORMS": request.POST.get("form-INITIAL_FORMS", len(formset)),
}
for idx, form_reference_cadastrale in enumerate(formset):
if form_reference_cadastrale["uuid"].value():
reference_cadastrale = ReferenceCadastrale.objects.get(
uuid=form_reference_cadastrale["uuid"].value()
)
initformset = {
**initformset,
f"form-{idx}-uuid": reference_cadastrale.uuid,
f"form-{idx}-section": utils.get_form_value(
form_reference_cadastrale, reference_cadastrale, "section"
),
f"form-{idx}-numero": utils.get_form_value(
form_reference_cadastrale, reference_cadastrale, "numero"
),
f"form-{idx}-lieudit": utils.get_form_value(
form_reference_cadastrale, reference_cadastrale, "lieudit"
),
f"form-{idx}-surface": utils.get_form_value(
form_reference_cadastrale, reference_cadastrale, "surface"
),
}
else:
initformset = {
**initformset,
f"form-{idx}-section": form_reference_cadastrale["section"].value(),
f"form-{idx}-numero": form_reference_cadastrale["numero"].value(),
f"form-{idx}-lieudit": form_reference_cadastrale["lieudit"].value(),
f"form-{idx}-surface": form_reference_cadastrale["surface"].value(),
}
formset = ReferenceCadastraleFormSet(initformset)
formset_is_valid = formset.is_valid()
if form_is_valid and formset_is_valid:
_save_programme_cadastrale(form, programme)
_save_programme_reference_cadastrale(formset, convention, programme)
return {
"success": utils.ReturnStatus.SUCCESS,
"convention": convention,
}
upform = UploadForm()
return {
**utils.base_convention_response_error(request, convention),
"form": form,
"formset": formset,
"upform": upform,
}
def programme_edd_update(request, convention_uuid):
convention = (
Convention.objects.prefetch_related("programme")
.prefetch_related("lot")
.prefetch_related("programme__logementedd_set")
.get(uuid=convention_uuid)
)
programme = convention.programme
import_warnings = None
editable_upload = request.POST.get("editable_upload", False)
if request.method == "POST":
request.user.check_perm("convention.change_convention", convention)
# When the user cliked on "Téléverser" button
if request.POST.get("Upload", False):
form = ProgrammeEDDForm(request.POST)
formset, upform, import_warnings, editable_upload = _upload_logements_edd(
request, convention, import_warnings, editable_upload
)
# When the user cliked on "Enregistrer et Suivant"
else:
result = _programme_edd_atomic_update(request, convention, programme)
if result["success"] == utils.ReturnStatus.SUCCESS and request.POST.get(
"redirect_to_recap", False
):
result["redirect"] = "recapitulatif"
return {
**result,
"editable_upload": request.user.full_editable_convention(convention)
or request.POST.get("redirect_to_recap", False),
}
# When display the file for the first time
else:
request.user.check_perm("convention.view_convention", convention)
initial = []
for logementedd in programme.logementedd_set.all():
initial.append(
{
"uuid": logementedd.uuid,
"financement": logementedd.financement,
"designation": logementedd.designation,
"numero_lot": logementedd.numero_lot,
}
)
formset = LogementEDDFormSet(initial=initial)
upform = UploadForm()
form = ProgrammeEDDForm(
initial={
"uuid": programme.uuid,
"lot_uuid": convention.lot.uuid,
**utils.get_text_and_files_from_field(
"edd_volumetrique", convention.lot.edd_volumetrique
),
"mention_publication_edd_volumetrique": (
programme.mention_publication_edd_volumetrique
),
**utils.get_text_and_files_from_field(
"edd_classique", convention.lot.edd_classique
),
"mention_publication_edd_classique": programme.mention_publication_edd_classique,
}
)
return {
**utils.base_convention_response_error(request, convention),
"form": form,
"formset": formset,
"upform": upform,
"import_warnings": import_warnings,
"editable_upload": request.user.full_editable_convention(convention)
or editable_upload,
}
def _upload_logements_edd(request, convention, import_warnings, editable_upload):
formset = LogementEDDFormSet(request.POST)
upform = UploadForm(request.POST, request.FILES)
if upform.is_valid():
result = upload_objects.handle_uploaded_xlsx(
upform,
request.FILES["file"],
LogementEDD,
convention,
"logements_edd.xlsx",
)
if result["success"] != utils.ReturnStatus.ERROR:
edd_lgts_by_designation = {}
for edd_lgt in LogementEDD.objects.filter(
programme_id=convention.programme_id
):
edd_lgts_by_designation[edd_lgt.designation] = edd_lgt.uuid
for obj in result["objects"]:
if (
"designation" in obj
and obj["designation"] in edd_lgts_by_designation
):
obj["uuid"] = edd_lgts_by_designation[obj["designation"]]
formset = LogementEDDFormSet(initial=result["objects"])
import_warnings = result["import_warnings"]
editable_upload = True
return formset, upform, import_warnings, editable_upload
def _programme_edd_atomic_update(request, convention, programme):
form = ProgrammeEDDForm(
{
"uuid": programme.uuid,
**utils.init_text_and_files_from_field(
request, convention.lot, "edd_volumetrique"
),
"mention_publication_edd_volumetrique": (
request.POST.get(
"mention_publication_edd_volumetrique",
programme.mention_publication_edd_volumetrique,
)
),
**utils.init_text_and_files_from_field(
request, convention.lot, "edd_classique"
),
"mention_publication_edd_classique": (
request.POST.get(
"mention_publication_edd_classique",
programme.mention_publication_edd_classique,
)
),
}
)
form_is_valid = form.is_valid()
formset = LogementEDDFormSet(request.POST)
initformset = {
"form-TOTAL_FORMS": request.POST.get("form-TOTAL_FORMS", len(formset)),
"form-INITIAL_FORMS": request.POST.get("form-INITIAL_FORMS", len(formset)),
}
for idx, form_logementedd in enumerate(formset):
if form_logementedd["uuid"].value():
logementedd = LogementEDD.objects.get(uuid=form_logementedd["uuid"].value())
initformset = {
**initformset,
f"form-{idx}-uuid": logementedd.uuid,
f"form-{idx}-designation": utils.get_form_value(
form_logementedd, logementedd, "designation"
),
f"form-{idx}-financement": utils.get_form_value(
form_logementedd, logementedd, "financement"
),
f"form-{idx}-numero_lot": utils.get_form_value(
form_logementedd, logementedd, "numero_lot"
),
}
else:
initformset = {
**initformset,
f"form-{idx}-designation": form_logementedd["designation"].value(),
f"form-{idx}-financement": form_logementedd["financement"].value(),
f"form-{idx}-numero_lot": | |
version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` id
:param sort: The data field(s) to use for sorting the output. Default is id. Valid values are id, schedule_name, time_zone, system_window_ind, recur_type, start_date, end_date, interval, ordinal, period_mins, start_min, end_min, sun_start, sun_end, mon_start, mon_end, tue_start, tue_end, wed_start, wed_end, thu_start, thu_end, fri_start, fri_end, sat_start, sat_end, created_at, updated_at.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each TimeWindow. Valid values are id, schedule_name, time_zone, system_window_ind, recur_type, start_date, end_date, interval, ordinal, period_mins, start_min, end_min, sun_start, sun_end, mon_start, mon_end, tue_start, tue_end, wed_start, wed_end, thu_start, thu_end, fri_start, fri_end, sat_start, sat_end, created_at, updated_at. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param query: This value will be matched against time windows, looking to see if one or more of the listed attributes contain the passed value. You may also surround the value with '/' and '/' to perform a regular expression search rather than a containment operation. Any record that matches will be returned. The attributes searched are: created_at, end_date, end_min, fri_end, fri_start, id, interval, mon_end, mon_start, ordinal, period_mins, recur_type, sat_end, sat_start, schedule_name, start_date, start_min, sun_end, sun_start, system_window_ind, thu_end, thu_start, time_zone, tue_end, tue_start, updated_at, wed_end, wed_start.
:type query: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return time_windows: An array of the TimeWindow objects that match the specified input criteria.
:rtype time_windows: Array of TimeWindow
"""
return self.api_list_request(self._get_method_fullname("search"), kwargs)
def find(self, **kwargs):
"""Lists the available time windows matching the input specification. This provides the most flexible search specification of all the query mechanisms, enabling searching using comparison operations other than equality. However, it is more complex to use and will not perform as efficiently as the index or search methods. In the input descriptions below, 'field names' refers to the following fields: created_at, end_date, end_min, fri_end, fri_start, id, interval, mon_end, mon_start, ordinal, period_mins, recur_type, sat_end, sat_start, schedule_name, start_date, start_min, sun_end, sun_start, system_window_ind, thu_end, thu_start, time_zone, tue_end, tue_start, updated_at, wed_end, wed_start.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_created_at: The operator to apply to the field created_at. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. created_at: The date and time the record was initially created in NetMRI. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_created_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_created_at: If op_created_at is specified, the field named in this input will be compared to the value in created_at using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_created_at must be specified if op_created_at is specified.
:type val_f_created_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_created_at: If op_created_at is specified, this value will be compared to the value in created_at using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_created_at must be specified if op_created_at is specified.
:type val_c_created_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_end_date: The operator to apply to the field end_date. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. end_date: The ending effective date of this record, or empty if still in effect. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_end_date: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_end_date: If op_end_date is specified, the field named in this input will be compared to the value in end_date using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_end_date must be specified if op_end_date is specified.
:type val_f_end_date: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_end_date: If op_end_date is specified, this value will be compared to the value in end_date using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_end_date must be specified if op_end_date is specified.
:type val_c_end_date: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_end_min: The operator to apply to the field end_min. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. end_min: The ending time of a time window. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_end_min: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_end_min: If op_end_min is specified, the field named in this input will be compared to the value in end_min using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_end_min must be specified if op_end_min is specified.
:type val_f_end_min: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_end_min: If op_end_min is specified, this value will be compared to the value in end_min using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_end_min must be specified if op_end_min is specified.
:type val_c_end_min: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_fri_end: The operator to apply to the field fri_end. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, | |
<filename>python/three_sums.py<gh_stars>10-100
from unittest import TestCase
def threeSum_old(nums):
result = []
for i in range(len(nums)):
for j in range(i + 1, len(nums)):
for k in range(j + 1, len(nums)):
if nums[i] + nums[j] + nums[k] == 0:
new_list = sorted([nums[i], nums[j], nums[k]])
if new_list not in result:
result.append(new_list)
return result
def threeSum(nums):
result = []
if len(nums) < 3:
return []
nums = sorted(nums)
first_positive_index = find_index_of_first_positive(nums)
print("+ve index", first_positive_index, nums[first_positive_index])
for i in range(first_positive_index):
for j in range(i + 1, first_positive_index):
sum_two = nums[i] + nums[j]
if -sum_two in nums[first_positive_index:]:
print("Can", sorted([nums[i], nums[j], nums[nums.index(-sum_two)]]))
new_list = sorted([nums[i], nums[j], nums[nums.index(-sum_two)]])
if new_list not in result:
result.append(new_list)
else:
print("already added")
else:
print("not in list")
print("result", result)
return result
def find_index_of_first_positive(n, start=0, end=None):
s = start
e = len(n) if end is None else end
mid = int((s + e) / 2)
if mid >= e:
return mid
elif n[mid] < 0:
return find_index_of_first_positive(n, mid + 1, e)
else:
return find_index_of_first_positive(n, s, mid)
print(threeSum([-2, 0, 1, 1, 2]))
print("-=-=-=-")
print(threeSum([-1, 0, 1, 2, -1, -4]))
list = [
82597,
-9243,
62390,
83030,
-97960,
-26521,
-61011,
83390,
-38677,
12333,
75987,
46091,
83794,
19355,
-71037,
-6242,
-28801,
324,
1202,
-90885,
-2989,
-95597,
-34333,
35528,
5680,
89093,
-90606,
50360,
-29393,
-27012,
53313,
65213,
99818,
-82405,
-41661,
-3333,
-51952,
72135,
-1523,
26377,
74685,
96992,
92263,
15929,
5467,
-99555,
-43348,
-41689,
-60383,
-3990,
32165,
65265,
-72973,
-58372,
12741,
-48568,
-46596,
72419,
-1859,
34153,
62937,
81310,
-61823,
-96770,
-54944,
8845,
-91184,
24208,
-29078,
31495,
65258,
14198,
85395,
70506,
-40908,
56740,
-12228,
-40072,
32429,
93001,
68445,
-73927,
25731,
-91859,
-24150,
10093,
-60271,
-81683,
-18126,
51055,
48189,
-6468,
25057,
81194,
-58628,
74042,
66158,
-14452,
-49851,
-43667,
11092,
39189,
-17025,
-79173,
13606,
83172,
92647,
-59741,
19343,
-26644,
-57607,
82908,
-20655,
1637,
80060,
98994,
39331,
-31274,
-61523,
91225,
-72953,
13211,
-75116,
-98421,
-41571,
-69074,
99587,
39345,
42151,
-2460,
98236,
15690,
-52507,
-95803,
-48935,
-46492,
-45606,
-79254,
-99851,
52533,
73486,
39948,
-7240,
71815,
-585,
-96252,
90990,
-93815,
93340,
-71848,
58733,
-14859,
-83082,
-75794,
-82082,
-24871,
-15206,
91207,
-56469,
-93618,
67131,
-8682,
75719,
87429,
-98757,
-7535,
-24890,
-94160,
85003,
33928,
75538,
97456,
-66424,
-60074,
-8527,
-28697,
-22308,
2246,
-70134,
-82319,
-10184,
87081,
-34949,
-28645,
-47352,
-83966,
-60418,
-15293,
-53067,
-25921,
55172,
75064,
95859,
48049,
34311,
-86931,
-38586,
33686,
-36714,
96922,
76713,
-22165,
-80585,
-34503,
-44516,
39217,
-28457,
47227,
-94036,
43457,
24626,
-87359,
26898,
-70819,
30528,
-32397,
-69486,
84912,
-1187,
-98986,
-32958,
4280,
-79129,
-65604,
9344,
58964,
50584,
71128,
-55480,
24986,
15086,
-62360,
-42977,
-49482,
-77256,
-36895,
-74818,
20,
3063,
-49426,
28152,
-97329,
6086,
86035,
-88743,
35241,
44249,
19927,
-10660,
89404,
24179,
-26621,
-6511,
57745,
-28750,
96340,
-97160,
-97822,
-49979,
52307,
79462,
94273,
-24808,
77104,
9255,
-83057,
77655,
21361,
55956,
-9096,
48599,
-40490,
-55107,
2689,
29608,
20497,
66834,
-34678,
23553,
-81400,
-66630,
-96321,
-34499,
-12957,
-20564,
25610,
-4322,
-58462,
20801,
53700,
71527,
24669,
-54534,
57879,
-3221,
33636,
3900,
97832,
-27688,
-98715,
5992,
24520,
-55401,
-57613,
-69926,
57377,
-77610,
20123,
52174,
860,
60429,
-91994,
-62403,
-6218,
-90610,
-37263,
-15052,
62069,
-96465,
44254,
89892,
-3406,
19121,
-41842,
-87783,
-64125,
-56120,
73904,
-22797,
-58118,
-4866,
5356,
75318,
46119,
21276,
-19246,
-9241,
-97425,
57333,
-15802,
93149,
25689,
-5532,
95716,
39209,
-87672,
-29470,
-16324,
-15331,
27632,
-39454,
56530,
-16000,
29853,
46475,
78242,
-46602,
83192,
-73440,
-15816,
50964,
-36601,
89758,
38375,
-40007,
-36675,
-94030,
67576,
46811,
-64919,
45595,
76530,
40398,
35845,
41791,
67697,
-30439,
-82944,
63115,
33447,
-36046,
-50122,
-34789,
43003,
-78947,
-38763,
-89210,
32756,
-20389,
-31358,
-90526,
-81607,
88741,
86643,
98422,
47389,
-75189,
13091,
95993,
-15501,
94260,
-25584,
-1483,
-67261,
-70753,
25160,
89614,
-90620,
-48542,
83889,
-12388,
-9642,
-37043,
-67663,
28794,
-8801,
13621,
12241,
55379,
84290,
21692,
-95906,
-85617,
-17341,
-63767,
80183,
-4942,
-51478,
30997,
-13658,
8838,
17452,
-82869,
-39897,
68449,
31964,
98158,
-49489,
62283,
-62209,
-92792,
-59342,
55146,
-38533,
20496,
62667,
62593,
36095,
-12470,
5453,
-50451,
74716,
-17902,
3302,
-16760,
-71642,
-34819,
96459,
-72860,
21638,
47342,
-69897,
-40180,
44466,
76496,
84659,
13848,
-91600,
-90887,
-63742,
-2156,
-84981,
-99280,
94326,
-33854,
92029,
-50811,
98711,
-36459,
-75555,
79110,
-88164,
-97397,
-84217,
97457,
64387,
30513,
-53190,
-83215,
252,
2344,
-27177,
-92945,
-89010,
82662,
-11670,
86069,
53417,
42702,
97082,
3695,
-14530,
-46334,
17910,
77999,
28009,
-12374,
15498,
-46941,
97088,
-35030,
95040,
92095,
-59469,
-24761,
46491,
67357,
-66658,
37446,
-65130,
-50416,
99197,
30925,
27308,
54122,
-44719,
12582,
-99525,
-38446,
-69050,
-22352,
94757,
-56062,
33684,
-40199,
-46399,
96842,
-50881,
-22380,
-65021,
40582,
53623,
-76034,
77018,
-97074,
-84838,
-22953,
-74205,
79715,
-33920,
-35794,
-91369,
73421,
-82492,
63680,
-14915,
-33295,
37145,
76852,
-69442,
60125,
-74166,
74308,
-1900,
-30195,
-16267,
-60781,
-27760,
5852,
38917,
25742,
-3765,
49097,
-63541,
98612,
-92865,
-30248,
9612,
-8798,
53262,
95781,
-42278,
-36529,
7252,
-27394,
-5021,
59178,
80934,
-48480,
-75131,
-54439,
-19145,
-48140,
98457,
-6601,
-51616,
-89730,
78028,
32083,
-48904,
16822,
-81153,
-8832,
48720,
-80728,
-45133,
-86647,
-4259,
-40453,
2590,
28613,
50523,
-4105,
-27790,
-74579,
-17223,
63721,
33489,
-47921,
97628,
-97691,
-14782,
-65644,
18008,
-93651,
-71266,
80990,
-76732,
-47104,
35368,
28632,
59818,
-86269,
-89753,
34557,
-92230,
-5933,
-3487,
-73557,
-13174,
-43981,
-43630,
-55171,
30254,
-83710,
-99583,
-13500,
71787,
5017,
-25117,
-78586,
86941,
-3251,
-23867,
-36315,
75973,
86272,
-45575,
77462,
-98836,
-10859,
70168,
-32971,
-38739,
-12761,
93410,
14014,
-30706,
-77356,
-85965,
-62316,
63918,
-59914,
-64088,
1591,
-10957,
38004,
15129,
-83602,
-51791,
34381,
-89382,
-26056,
8942,
5465,
71458,
-73805,
-87445,
-19921,
-80784,
69150,
-34168,
28301,
-68955,
18041,
6059,
82342,
9947,
39795,
44047,
-57313,
48569,
81936,
-2863,
-80932,
32976,
-86454,
-84207,
33033,
32867,
9104,
-16580,
-25727,
80157,
-70169,
53741,
86522,
84651,
68480,
84018,
61932,
7332,
-61322,
-69663,
76370,
41206,
12326,
-34689,
17016,
82975,
-23386,
39417,
72793,
44774,
-96259,
3213,
79952,
29265,
-61492,
-49337,
14162,
65886,
3342,
-41622,
-62659,
-90402,
-24751,
88511,
54739,
-21383,
-40161,
-96610,
-24944,
-602,
-76842,
-21856,
69964,
43994,
-15121,
-85530,
12718,
13170,
-13547,
69222,
62417,
-75305,
-81446,
-38786,
-52075,
-23110,
97681,
-82800,
-53178,
11474,
35857,
94197,
-58148,
-23689,
32506,
92154,
-64536,
-73930,
-77138,
97446,
-83459,
70963,
22452,
68472,
-3728,
-25059,
-49405,
95129,
-6167,
12808,
99918,
30113,
-12641,
-26665,
86362,
-33505,
50661,
26714,
33701,
89012,
-91540,
40517,
-12716,
-57185,
-87230,
29914,
-59560,
13200,
-72723,
58272,
23913,
-45586,
-96593,
-26265,
-2141,
31087,
81399,
92511,
-34049,
20577,
2803,
26003,
8940,
42117,
40887,
-82715,
38269,
40969,
-50022,
72088,
21291,
-67280,
-16523,
90535,
18669,
94342,
-39568,
-88080,
-99486,
-20716,
23108,
-28037,
63342,
36863,
-29420,
-44016,
75135,
73415,
16059,
-4899,
86893,
43136,
-7041,
33483,
-67612,
25327,
40830,
6184,
61805,
4247,
81119,
-22854,
-26104,
-63466,
63093,
-63685,
60369,
51023,
51644,
-16350,
74438,
-83514,
99083,
10079,
-58451,
-79621,
48471,
67131,
-86940,
99093,
11855,
-22272,
-67683,
-44371,
9541,
18123,
37766,
-70922,
80385,
-57513,
-76021,
-47890,
36154,
72935,
84387,
-92681,
-88303,
-7810,
59902,
-90,
-64704,
-28396,
-66403,
8860,
13343,
33882,
85680,
7228,
28160,
-14003,
54369,
-58893,
92606,
-63492,
-10101,
64714,
58486,
29948,
-44679,
-22763,
10151,
-56695,
4031,
-18242,
-36232,
86168,
-14263,
9883,
47124,
47271,
92761,
-24958,
-73263,
-79661,
-69147,
-18874,
29546,
-92588,
-85771,
26451,
-86650,
-43306,
-59094,
-47492,
-34821,
-91763,
-47670,
33537,
22843,
67417,
-759,
92159,
63075,
94065,
-26988,
55276,
65903,
30414,
-67129,
-99508,
-83092,
-91493,
-50426,
14349,
-83216,
-76090,
32742,
-5306,
-93310,
-60750,
-60620,
-45484,
-21108,
-58341,
-28048,
-52803,
69735,
78906,
81649,
32565,
-86804,
-83202,
-65688,
-1760,
89707,
93322,
-72750,
84134,
71900,
-37720,
19450,
-78018,
22001,
-23604,
26276,
-21498,
65892,
-72117,
-89834,
-23867,
55817,
-77963,
42518,
93123,
-83916,
63260,
-2243,
-97108,
85442,
-36775,
17984,
-58810,
99664,
-19082,
93075,
-69329,
87061,
79713,
16296,
70996,
13483,
-74582,
49900,
-27669,
-40562,
1209,
-20572,
34660,
83193,
75579,
7344,
64925,
88361,
60969,
3114,
44611,
-27445,
53049,
-16085,
-92851,
-53306,
13859,
-33532,
86622,
-75666,
-18159,
-98256,
51875,
-42251,
-27977,
-18080,
23772,
38160,
41779,
9147,
94175,
99905,
-85755,
62535,
-88412,
-52038,
-68171,
93255,
-44684,
-11242,
-104,
31796,
62346,
-54931,
-55790,
-70032,
46221,
56541,
-91947,
90592,
93503,
4071,
20646,
4856,
-63598,
15396,
-50708,
32138,
-85164,
38528,
-89959,
53852,
57915,
-42421,
-88916,
-75072,
67030,
-29066,
49542,
-71591,
61708,
-53985,
-43051,
28483,
46991,
-83216,
80991,
-46254,
-48716,
39356,
-8270,
-47763,
-34410,
874,
| |
0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.44797,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 4.20167,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0065108,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.207803,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.0335685,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.102536,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.165386,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.0834813,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.351403,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.112125,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.10223,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.00634181,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0043008,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0336025,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0318071,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.0399443,
'Execution Unit/Register Files/Runtime Dynamic': 0.0361079,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.0724192,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.179703,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.18039,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00112696,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00112696,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000995662,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000393137,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000456911,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.0037065,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0103022,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0305769,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 1.94496,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.0958958,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.103853,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 4.25787,
'Instruction Fetch Unit/Runtime Dynamic': 0.244335,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0538499,
'L2/Runtime Dynamic': 0.0148173,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.02873,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.40237,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0256105,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0256104,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 2.14967,
'Load Store Unit/Runtime Dynamic': 0.554282,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.063151,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.126302,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0224125,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0232096,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.12093,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0157552,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.31554,
'Memory Management Unit/Runtime Dynamic': 0.0389648,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 14.4686,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.0166828,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.00482915,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0520126,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
| |
#No description available
self.setFocus(self.list4)
elif self.descr_view==False:
self.setFocus(self.list)
elif action==ACTION_MOVE_LEFT:
if self.descr_view==True:
self.list3tb.setVisible(0)
self.list.setVisible(1)
self.setFocus(self.list)
self.descr_view=False
elif (self.getFocus()==self.list) and (self.list != self.list5):
self.setFocus(self.list3)
elif self.list != self.list5:
self.setFocus(self.list)
elif action==ACTION_MOVE_UP:
pos=self.list.getSelectedPosition()
elif (action==ACTION_MOUSEMOVE) or (action==ACTION_MOUSEMOVE2):
xpos=action.getAmount1()
ypos=action.getAmount2()
if xpos < 50:
self.setFocus(self.list3)
#elif (xpos > 500) and (ypos > 140):
# self.setFocus(self.list4)
elif self.ChkContextMenu(action)==True: #White
if self.IsFavoriteListFocus()==True:
self.selectBoxFavoriteList()
elif (self.URL==downloads_file) or (self.URL==downloads_queue) or \
(self.URL==downloads_complete) or (self.URL==parent_list) or \
(self.URL==incomplete_downloads):
self.selectBoxDownloadsList()
else:
self.selectBoxMainList()
#update index number
pos=self.getPlaylistPosition()
if pos >= 0:
self.listpos.setLabel(str(pos+1)+'/'+str(self.pl_focus.size()))
###
if self.state_busy==0: # and action != ACTION_MOUSEMOVE and action != ACTION_MOUSEMOVE2:
if hasattr(self,'labProtocol') or hasattr(self,'labItemUrl'):
try:
if self.list == self.list5:
#if (self.page > 0):
# index=self.counter+(self.page*self.page_size)-1
#else: index=self.counter
index=self.getPlaylistPosition()
else:
index=self.getPlaylistPosition()
try: str_url=self.pl_focus.list[index].URL
except: str_url=""
if (str_url.startswith(TVACoreURL)) and (TstRplcStrng in str_url):
str_url=str_url.replace(TstRplcStrng,'')
#if self.labItemUrl.getLabel() != str_url:
if self.CurItemURL != str_url:
self.CurItemURL=''+str_url
try:
if "://" in str_url: ProtocolMarker=str_url.split("://")[0]
else: ProtocolMarker="Local"
except: ProtocolMarker=""
if hasattr(self,'labProtocol'):
try:
self.labProtocol.setLabel(ProtocolMarker.upper());
self.labProtocol.setVisible(True)
except:
try:
self.MainWindow.labProtocol.setLabel("");
self.labProtocol.setVisible(False)
except: pass
if hasattr(self,'labItemUrl'):
try:
self.labItemUrl.setLabel(str_url)
self.labItemUrl.setVisible(True)
except:
try:
self.labItemUrl.setLabel("")
self.labItemUrl.setVisible(False)
except: pass
except: pass
###
#end of function
except: print '* Error during onAction1.'
######################################################################
# Description: class xbmcgui default member function.
# Parameters : TBD
# Return : TBD
######################################################################
def onFocus(self,controlId):
pass
######################################################################
# Description: class xbmcgui default member function.
# Parameters : TBD
# Return : TBD
######################################################################
def onClick(self,controlId):
try:
if controlId==BUTTON_LEFT:
self.onAction1(ACTION_PREVIOUS_MENU)
elif controlId==BUTTON_RIGHT:
self.onAction1(ACTION_CONTEXT_MENU)
#self.setFocus(self.list4)
elif controlId==BUTTON_EXIT2:
#print 'pressed exit button'
#self.setFocus(self.list3); xbmc.sleep(10); self.onAction1(ACTION_SELECT_ITEM)
dialog=xbmcgui.Dialog()
if dialog.yesno("Navi-X","Are you sure you want to leave?")==True:
self.state_busy=1
#self.setInfoText("Shutting Down Navi-X...")
SetInfoText("Shutting Down Navi-X...",setlock=True)
self.onSaveSettings()
self.bkgndloadertask.kill()
self.bkgndloadertask.join(10) #timeout after 10 seconds.
self.downloader.kill()
self.downloader.join(10) #timeout after 10 seconds.
self.close() #exit
else:
self.state_busy=0
else:
self.onAction1(ACTION_SELECT_ITEM)
except: print '* Error during onClick.'
######################################################################
# Description: Sets the rating image.
# Parameters : -
# Return : -
######################################################################
def UpdateRateingImage(self):
pos=self.getPlaylistPosition()
if pos >= 0:
rating=self.pl_focus.list[pos].rating
if rating != '':
self.rating.setImage('rating'+rating+'.png')
self.rating.setVisible(1)
else:
self.rating.setVisible(0)
######################################################################
# Description: Display the media source for processor based entries.
# Parameters : -
# Return : -
######################################################################
def DisplayMediaSource(self):
pos=self.getPlaylistPosition()
if pos >= 0:
#Display media source
try: str_url=self.pl_focus.list[pos].URL;
except: str_url=""
#TestBug({'str_url':str_url})
#TestBug({'localfile':self.pl_focus.list[pos].localfile})
try:
if "://" in str_url: ProtocolMarker=str_url.split("://")[0]
else: ProtocolMarker="Local"
except: ProtocolMarker=""
#TestBug("ProtocolMarker (navix.py): "+ProtocolMarker)
if hasattr(self,'labProtocol'):
try: self.labProtocol.setLabel(ProtocolMarker);
except:
try: self.labProtocol.setLabel("");
except: pass
str_server_report=""
if str_url != "" and self.pl_focus.list[pos].type != "playlist":
match=re_server.search(str_url)
if match:
str_server_report="Source: "+match.group(1)
if self.pl_focus.list[pos].processor != "":
str_server_report=str_server_report+"+"
SetInfoText(str_server_report)
######################################################################
# Description: Checks if one of the context menu keys is pressed.
# Parameters : action=handle to UI control
# Return : True if valid context menu key is pressed.
######################################################################
def ChkContextMenu(self,action):
result=False
#Support for different remote controls.
if action==261:
result=True
elif action==ACTION_CONTEXT_MENU:
result=True
elif action==ACTION_CONTEXT_MENU2:
result=True
return result
######################################################################
# Description: class xbmcgui default member function.
# Parameters : control=handle to UI control
# Return : -
######################################################################
def onControl(self,control):
pass
######################################################################
# Description: Parse playlist file. Playlist file can be a:
# -PLX file;
# -RSS v2.0 file (e.g. podcasts);
# -RSS daily Flick file (XML1.0);
# -html Youtube file;
# Parameters : URL (optional) =URL of the playlist file.
# mediaitem (optional)=Playlist mediaitem containing
# playlist info. Replaces URL parameter.
# start_index (optional) = cursor position after loading
# playlist.
# reload (optional)= indicates if the playlist shall be
# reloaded or only displayed.
# proxy = proxy to use for loading
# Return : 0 on success, -1 if failed.
######################################################################
def ParsePlaylist(self,URL='',mediaitem=CMediaItem(),start_index=0,reload=True,proxy=""):
try:
#avoid recursive call of this function by setting state to busy.
self.state_busy=1
param_proxy=proxy
if param_proxy=="": #use caching as default
proxy="CACHING"
#The application contains 5 CPlayList objects:
#(1)main list,
#(2)favorites,
#(3)download queue
#(4)download completed list
#(5)incomplete downloads list
#Parameter 'self.pl_focus' points to the playlist in focus (1-5).
playlist=self.pl_focus
#The application contains one xbmcgui list control which displays
#the playlist in focus.
listcontrol=self.list
listcontrol.setVisible(0)
self.list2tb.setVisible(0)
self.loading.setLabel("Please wait...")
self.loading.setVisible(1)
if reload==False:
mediaitem=self.mediaitem
type=mediaitem.GetType()
if reload==True:
print 'type = '+ str(type) #load the playlist
if type=='rss_flickr_daily':
result=playlist.load_rss_flickr_daily(URL,mediaitem,proxy)
elif type[0:3]=='rss' and ('watchkodi.com' in URL): # or 'watchkodi.com' in mediaitem): #assume playlist file
#if (param_proxy=="") and (self.smartcache=='true'):
if (proxy=="CACHING") and (self.smartcache=='true'):
result=playlist.load_plx(URL,mediaitem,proxy="SMARTCACHE")
else:
result=playlist.load_plx(URL,mediaitem,proxy)
elif type[0:3]=='rss':
result=playlist.load_rss_20(URL,mediaitem,proxy)
elif type[0:4]=='atom':
result=playlist.load_atom_10(URL,mediaitem,proxy)
elif type=='opml':
result=playlist.load_opml_10(URL,mediaitem,proxy)
elif type=='xml_shoutcast':
result=playlist.load_xml_shoutcast(URL,mediaitem,proxy)
elif type=='xml_applemovie':
result=playlist.load_xml_applemovie(URL,mediaitem,proxy)
elif type=='directory':
result=playlist.load_dir(URL,mediaitem,proxy)
else: #assume playlist file
#if (param_proxy=="") and (self.smartcache=='true'):
if (proxy=="CACHING") and (self.smartcache=='true'):
result=playlist.load_plx(URL,mediaitem,proxy="SMARTCACHE")
else:
result=playlist.load_plx(URL,mediaitem,proxy)
if result == -1: #error
dialog=xbmcgui.Dialog(); dialog.ok("Error","This playlist requires a newer Navi-X version")
elif result == -2:#error
dialog=xbmcgui.Dialog(); dialog.ok("Error","Cannot open file."); print ["Error","-2","Cannot open file.","navix.py - ParsePlaylist",URL]
elif result == -3: #server error
if URL=='' : URL='Unknown URL'
dialog=xbmcgui.Dialog(); dialog.ok("Error","Can not connect to server and no cached file exists."); print 'Error with '+URL
if result != 0: #failure
print ['URL',URL]
self.loading.setVisible(0)
listcontrol.setVisible(1)
self.setFocus(listcontrol)
self.state_busy=0
if (URL==home_URL) or (URL==home_URL_old) or (URL==home_URL_mirror):
self.descr_view=False
#r2d2=self.ParsePlaylist(URL=xbmc.translatePath(os.path.join(RootDir,MyXBMC_list)),proxy=proxy)
r2d2=self.ParsePlaylist(URL=MyXBMC_list,proxy=proxy)
return r2d2
else:
self.descr_view=False
#doPageBack()
r2d2=self.ParsePlaylist(mediaitem=self.mediaitem,proxy="CACHING")
#return r2d2
return -1
#return to default view
self.listview='default'
listentry=self.list3.getListItem(3)
listentry.setLabel("View: "+self.listview)
#succesful
#the next line is for used for debugging only
#playlist.save(RootDir+source_list)
#loading finished, display the list
self.loading.setLabel("Please wait......")
self.vieworder='ascending' #ascending by default
if start_index==0:
start_index=playlist.start_index
self.URL=playlist.URL
self.type=type
if URL != '':
mediaitem.URL=URL
self.mediaitem=mediaitem
#display the page title on top of the screen
if testing:
if len(playlist.title) > 0:
title=playlist.title+' - ('+playlist.URL+')'
else:
title=playlist.URL
else:
if len(playlist.title) > 0:
title=playlist.title
else:
title=''
if title == '':
title = 'Navi-X'
self.urllbl.setLabel(title)
#####################################
#set the background image
# if self.disable_background == 'false':
# m = self.playlist.background
# else:
# m = 'default'
#
# if m == 'default':
# m = self.default_background
#
# if m == 'default': #default BG image
# self.bg.setImage(imageDir + background_image1)
# self.bg1.setImage(imageDir + background_image2)
# self.background = m
# elif m != 'previous': #URL to image located elsewhere
# ext = getFileExtension(m)
# loader = CFileLoader2(window=self) #file loader
# loader.load(m, imageCacheDir + "background." + ext, timeout=10, proxy="ENABLED", content_type='image')
# if loader.state == 0:
# self.bg.setImage(loader.localfile)
# self.bg1.setImage(imageDir + background_image2)
#######################################
newview=self.SetListView(playlist.view)
if (newview==self.list1) and (playlist.description != ""):
newview=self.list2
self.list=newview
listcontrol=newview
if newview==self.list5:
self.page_size=50
else:
self.page_size=200
self.list2tb.controlDown(self.list)
self.list2tb.controlUp(self.list)
#filter the playlist for parental control.
self.FilterPlaylist()
#Display the playlist page
self.SelectPage(start_index / self.page_size,start_index%self.page_size)
self.loading.setVisible(0)
listcontrol.setVisible(1)
self.setFocus(listcontrol)
if playlist.description != '':
self.list2tb.reset()
self.list2tb.setText(playlist.description)
self.list2tb.setVisible(1)
self.state_busy=0
return 0 #success
except Exception ,e: print '* Error during ParsePlaylist.', str(e); self.state_busy=0; return -2
######################################################################
# Description: Large playlists are splitup into pages to improve
# performance. This function select one of the playlist pages. The
# playlist size is defined by setting variable 'page_size'.
# Parameters : page = page to display
# start_pos: cursor start position
# Return : -
######################################################################
def SelectPage(self,page=0,start_pos=0,append=False):
self.state_busy=1
playlist=self.pl_focus
listcontrol=self.list
self.page=page
listcontrol.setVisible(0)
self.loading.setLabel("Please wait........")
self.loading.setVisible(1)
if append==False:
#listcontrol.reset() #clear the list control view
self.list1.reset()
self.list2.reset()
self.list5.reset()
if (page > 0) and (append==False):
item=xbmcgui.ListItem("<<<") #previous page item
listcontrol.addItem(item)
start_pos=start_pos+1
#today=datetime.datetime.today()
today=datetime.datetime.utcnow()
n=0
for i in range(page*self.page_size,playlist.size()):
m=playlist.list[i]
if int(m.version) <= int(plxVersion):
if (self.list==self.list1) or (self.list==self.list2):
icon=self.getPlEntryIcon(m)
if self.list==self.list5:
icon=self.getPlEntryThumb(m)
label2=''
#if True:
if m.date != '':
try:
dt=m.date.split()
size=len(dt)
dat=dt[0].split('-')
if size > 1:
tim=dt[1].split(':')
else:
tim=['00','00','00']
#entry_date=datetime.datetime.fromtimestamp(1311421643)
entry_date=datetime.datetime(int(dat[0]),int(dat[1]),int(dat[2]),int(tim[0]),int(tim[1]),int(tim[2]))
days_past=(today-entry_date).days
hours_past=(today-entry_date).seconds / 3600
if (size > 1) and (days_past==0) and (hours_past < 24):
label2='New '+str(hours_past)+' hrs ago'
elif days_past <= 10:
if days_past==0:
label2='New Today'
elif days_past==1:
label2='New Yesterday'
else:
label2='New '+str(days_past)+' days ago'
elif self.playlist.type != 'playlist':
label2=m.date[:10]
except:
print "ERROR: Playlist contains invalid date at entry: %d"%(n+1)
if m.infotag != '':
label2=label2+' '+m.infotag
if m.description != '':
label2=label2+' >'
item=xbmcgui.ListItem(unicode(m.name,"utf-8","ignore"),label2,"",icon)
#item.setInfo(type="pictures",infoLabels={"Title":m.name})
listcontrol.addItem(item)
n=n+1
if n >= self.page_size:
break #m
if ((page+1)*self.page_size < playlist.size()): #next page item
item=xbmcgui.ListItem(">>>")
listcontrol.addItem(item)
self.loading.setVisible(0)
listcontrol.setVisible(1)
self.setFocus(listcontrol)
pos=self.getPlaylistPosition()
self.listpos.setLabel(str(pos+1)+'/'+str(self.pl_focus.size()))
if hasattr(self,'labProtocol'):
try: self.labProtocol.setLabel("");
except: pass
listcontrol.selectItem(start_pos)
self.state_busy=0
######################################################################
# Description: Filter playlist for parental control.
# Parameters : -
# Return : -
######################################################################
def FilterPlaylist(self):
i=0
while i < self.pl_focus.size():
#for i in range(self.pl_focus.size()):
m=self.pl_focus.list[i]
for p in self.parentlist.list:
if p.URL==m.URL:
#entry found in blocked list
if self.access==False:
if self.hideblocked=="Hided":
self.pl_focus.remove(i)
i=i-1
else:
m.icon=imageDir+'lock-icon.png'
else: #access allowed
m.icon=imageDir+'unlock-icon.png'
break
i=i+1
######################################################################
# Description: Gets the playlist entry icon image for different types
# Parameters : mediaitem: item for which to retrieve the thumb
# Return : | |
import numpy as np
from .pyramid import Pyramid
from .filters import parse_filter
from .c.wrapper import corrDn, upConv
class WaveletPyramid(Pyramid):
"""Multiscale wavelet pyramid
Parameters
----------
image : `array_like`
1d or 2d image upon which to construct to the pyramid.
height : 'auto' or `int`.
The height of the pyramid. If 'auto', will automatically determine based on the size of
`image`.
filter_name : {'binomN', 'haar', 'qmf8', 'qmf12', 'qmf16', 'daub2', 'daub3', 'daub4', 'qmf5',
'qmf9', 'qmf13'}
name of filter to use when constructing pyramid. All scaled so L-2 norm is 1.0
* `'binomN'` - binomial coefficient filter of order N-1
* `'haar'` - Haar wavelet
* `'qmf8'`, `'qmf12'`, `'qmf16'` - Symmetric Quadrature Mirror Filters [1]_
* `'daub2'`, `'daub3'`, `'daub4'` - Daubechies wavelet [2]_
* `'qmf5'`, `'qmf9'`, `'qmf13'` - Symmetric Quadrature Mirror Filters [3]_, [4]_
edge_type : {'circular', 'reflect1', 'reflect2', 'repeat', 'zero', 'extend', 'dont-compute'}
Specifies how to handle edges. Options are:
* `'circular'` - circular convolution
* `'reflect1'` - reflect about the edge pixels
* `'reflect2'` - reflect, doubling the edge pixels
* `'repeat'` - repeat the edge pixels
* `'zero'` - assume values of zero outside image boundary
* `'extend'` - reflect and invert
* `'dont-compute'` - zero output when filter overhangs imput boundaries.
Attributes
----------
image : `array_like`
The input image used to construct the pyramid.
image_size : `tuple`
The size of the input image.
pyr_type : `str` or `None`
Human-readable string specifying the type of pyramid. For base class, is None.
edge_type : `str`
Specifies how edges were handled.
pyr_coeffs : `dict`
Dictionary containing the coefficients of the pyramid. Keys are `(level, band)` tuples and
values are 1d or 2d numpy arrays (same number of dimensions as the input image)
pyr_size : `dict`
Dictionary containing the sizes of the pyramid coefficients. Keys are `(level, band)`
tuples and values are tuples.
is_complex : `bool`
Whether the coefficients are complex- or real-valued. Only `SteerablePyramidFreq` can have
a value of True, all others must be False.
References
----------
.. [1] <NAME>, "A filter family designed for use in quadrature mirror filter banks",
Proc. ICASSP, pp 291-294, 1980.
.. [2] <NAME>, "Orthonormal bases of compactly supported wavelets", Commun. Pure Appl.
Math, vol. 42, pp 909-996, 1988.
.. [3] <NAME>, "Orthogonal sub-band image transforms", PhD Thesis, MIT Dept. of Elec.
Eng. and Comp. Sci. May 1988. Also available as: MIT Media Laboratory Vision and Modeling
Technical Report #100.
.. [4] <NAME> and <NAME>, "Subband image coding", Subband Transforms, chapter 4,
ed. <NAME>, Kluwer Academic Publishers, Norwell, MA, 1990, pp 143--192.
"""
def __init__(self, image, height='auto', filter_name='qmf9', edge_type='reflect1'):
super().__init__(image=image, edge_type=edge_type)
self.pyr_type = 'Wavelet'
self.filters = {}
self.filters['lo_filter'] = parse_filter(filter_name, normalize=False)
self.filters["hi_filter"] = WaveletPyramid._modulate_flip(self.filters['lo_filter'])
assert self.filters['lo_filter'].shape == self.filters['hi_filter'].shape
# Stagger sampling if filter is odd-length
self.stagger = (self.filters['lo_filter'].size + 1) % 2
self._set_num_scales('lo_filter', height)
# compute the number of channels per level
if min(self.image.shape) == 1:
self.num_orientations = 1
else:
self.num_orientations = 3
self._build_pyr()
def _modulate_flip(lo_filter):
'''construct QMF/Wavelet highpass filter from lowpass filter
modulate by (-1)^n, reverse order (and shift by one, which is handled by the convolution
routines). This is an extension of the original definition of QMF's (e.g., see
Simoncelli90).
Parameters
----------
lo_filter : `array_like`
one-dimensional array (or effectively 1d array) containing the lowpass filter to
convert into the highpass filter.
Returns
-------
hi_filter : `np.array`
The highpass filter constructed from the lowpass filter, same shape as the lowpass
filter.
'''
# check lo_filter is effectively 1D
lo_filter_shape = lo_filter.shape
assert lo_filter.size == max(lo_filter_shape)
lo_filter = lo_filter.flatten()
ind = np.arange(lo_filter.size, 0, -1) - (lo_filter.size + 1) // 2
hi_filter = lo_filter[::-1] * (-1.0) ** ind
return hi_filter.reshape(lo_filter_shape)
def _build_next(self, image):
"""Build the next level fo the Wavelet pyramid
Should not be called by users directly, this is a helper function to construct the pyramid.
Parameters
----------
image : `array_like`
image to use to construct next level.
Returns
-------
lolo : `array_like`
This is the result of applying the lowpass filter once if `image` is 1d, twice if it's
2d. It's downsampled by a factor of two from the original `image`.
hi_tuple : `tuple`
If `image` is 1d, this just contains `hihi`, the result of applying the highpass filter
. If `image` is 2d, it is `(lohi, hilo, hihi)`, the result of applying the lowpass then
the highpass, the highpass then the lowpass, and the highpass twice. All will be
downsampled by a factor of two from the original `image`.
"""
if image.shape[1] == 1:
lolo = corrDn(image=image, filt=self.filters['lo_filter'], edge_type=self.edge_type, step=(2, 1), start=(self.stagger, 0))
hihi = corrDn(image=image, filt=self.filters['hi_filter'], edge_type=self.edge_type, step=(2, 1), start=(1, 0))
return lolo, (hihi, )
elif image.shape[0] == 1:
lolo = corrDn(image=image, filt=self.filters['lo_filter'].T, edge_type=self.edge_type, step=(1, 2), start=(0, self.stagger))
hihi = corrDn(image=image, filt=self.filters['hi_filter'].T, edge_type=self.edge_type, step=(1, 2), start=(0, 1))
return lolo, (hihi, )
else:
lo = corrDn(image=image, filt=self.filters['lo_filter'], edge_type=self.edge_type, step=(2, 1), start=(self.stagger, 0))
hi = corrDn(image=image, filt=self.filters['hi_filter'], edge_type=self.edge_type, step=(2, 1), start=(1, 0))
lolo = corrDn(image=lo, filt=self.filters['lo_filter'].T, edge_type=self.edge_type, step=(1, 2), start=(0, self.stagger))
lohi = corrDn(image=hi, filt=self.filters['lo_filter'].T, edge_type=self.edge_type, step=(1, 2), start=(0, self.stagger))
hilo = corrDn(image=lo, filt=self.filters['hi_filter'].T, edge_type=self.edge_type, step=(1, 2), start=(0, 1))
hihi = corrDn(image=hi, filt=self.filters['hi_filter'].T, edge_type=self.edge_type, step=(1, 2), start=(0, 1))
return lolo, (lohi, hilo, hihi)
def _build_pyr(self):
im = self.image
for lev in range(self.num_scales):
im, higher_bands = self._build_next(im)
for j, band in enumerate(higher_bands):
self.pyr_coeffs[(lev, j)] = band
self.pyr_size[(lev, j)] = band.shape
self.pyr_coeffs['residual_lowpass'] = im
self.pyr_size['residual_lowpass'] = im.shape
def _recon_prev(self, image, lev, recon_keys, output_size, lo_filter, hi_filter, edge_type,
stagger):
"""Reconstruct the previous level of the pyramid.
Should not be called by users directly, this is a helper function for reconstructing the
input image using pyramid coefficients.
"""
if self.num_orientations == 1:
if output_size[0] == 1:
recon = upConv(image=image, filt=lo_filter.T, edge_type=edge_type, step=(1, 2), start=(0, stagger), stop=output_size)
if (lev, 0) in recon_keys:
recon += upConv(image=self.pyr_coeffs[(lev, 0)], filt=hi_filter.T, edge_type=edge_type, step=(1, 2), start=(0, 1), stop=output_size)
elif output_size[1] == 1:
recon = upConv(image=image, filt=lo_filter, edge_type=edge_type, step=(2, 1), start=(stagger, 0), stop=output_size)
if (lev, 0) in recon_keys:
recon += upConv(image=self.pyr_coeffs[(lev, 0)], filt=hi_filter, edge_type=edge_type, step=(2, 1), start=(1, 0), stop=output_size)
else:
lo_size = ([self.pyr_size[(lev, 1)][0], output_size[1]])
hi_size = ([self.pyr_size[(lev, 0)][0], output_size[1]])
tmp_recon = upConv(image=image, filt=lo_filter.T, edge_type=edge_type, step=(1, 2), start=(0, stagger), stop=lo_size)
recon = upConv(image=tmp_recon, filt=lo_filter, edge_type=edge_type, step=(2, 1), start=(stagger, 0), stop=output_size)
bands_recon_dict = {
0: [{'filt': lo_filter.T, 'start': (0, stagger), 'stop': hi_size},
{'filt': hi_filter, 'start': (1, 0)}],
1: [{'filt': hi_filter.T, 'start': (0, 1), 'stop': lo_size},
{'filt': lo_filter, 'start': (stagger, 0)}],
2: [{'filt': hi_filter.T, 'start': (0, 1), 'stop': hi_size},
{'filt': hi_filter, 'start': (1, 0)}],
}
for band in range(self.num_orientations):
if (lev, band) in recon_keys:
tmp_recon = upConv(image=self.pyr_coeffs[(lev, band)], edge_type=edge_type, step=(1, 2), **bands_recon_dict[band][0])
recon += upConv(image=tmp_recon, edge_type=edge_type, step=(2, 1), stop=output_size, **bands_recon_dict[band][1])
return recon
def recon_pyr(self, filter_name=None, edge_type=None, levels='all', bands='all'):
"""Reconstruct the input image using pyramid coefficients.
This function reconstructs the input image using pyramid coefficients.
Parameters
----------
filter_name : {None, 'binomN', 'haar', 'qmf8', 'qmf12', 'qmf16', 'daub2', 'daub3', 'daub4',
'qmf5', 'qmf9', 'qmf13'}
name of filter to use for reconstruction. All scaled so L-2 norm is 1.0
* None (default) - use `self.filter_name`, the filter used to construct the pyramid.
* `'binomN'` - binomial coefficient filter of order N-1
* `'haar'` - Haar wavelet
* `'qmf8'`, `'qmf12'`, `'qmf16'` - Symmetric Quadrature Mirror Filters [1]_
* `'daub2'`, `'daub3'`, `'daub4'` - Daubechies wavelet [2]_
* `'qmf5'`, `'qmf9'`, `'qmf13'` - Symmetric Quadrature Mirror Filters [3]_, [4]_
edge_type : {None, 'circular', 'reflect1', 'reflect2', 'repeat', 'zero', 'extend',
'dont-compute'}
Specifies how to handle edges. Options are:
* None (default) - use `self.edge_type`, the edge_type used to construct the pyramid
* `'circular'` - circular convolution
* `'reflect1'` - reflect about the edge pixels
* `'reflect2'` - reflect, doubling the edge pixels
* `'repeat'` - repeat the edge pixels
* `'zero'` - assume values of zero outside image boundary
* `'extend'` - reflect and inverts
* `'dont-compute'` - zero output when filter overhangs imput boundaries.
levels : `list`, `int`, or {`'all'`, `'residual_highpass'`}
If `list` should contain some subset of integers from `0` to `self.num_scales-1`
(inclusive) and `'residual_lowpass'`. | |
# Copyright 2021 AIPlan4EU project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This module defines the InstantaneousAction class and the ActionParameter class.
An InstantaneousAction has a name, a list of ActionParameter, a list of preconditions
and a list of effects.
"""
import upf
from upf.environment import get_env, Environment
from upf.exceptions import UPFTypeError, UPFUnboundedVariablesError, UPFProblemDefinitionError
from fractions import Fraction
from typing import Dict, List, Union
from collections import OrderedDict
class ActionParameter:
"""Represents an action parameter.
An action parameter has a name, used to retrieve the parameter
from the action, and a type, used to represent that the action
parameter is of the given type."""
def __init__(self, name: str, typename: 'upf.model.types.Type'):
self._name = name
self._typename = typename
def __repr__(self) -> str:
return f'{str(self.type())} {self.name()}'
def __eq__(self, oth: object) -> bool:
if isinstance(oth, ActionParameter):
return self._name == oth._name and self._typename == oth._typename
else:
return False
def __hash__(self) -> int:
return hash(self._name) + hash(self._typename)
def name(self) -> str:
"""Returns the parameter name."""
return self._name
def type(self) -> 'upf.model.types.Type':
"""Returns the parameter type."""
return self._typename
class Action:
"""This is the action interface."""
def __init__(self, _name: str, _parameters: 'OrderedDict[str, upf.model.types.Type]' = None,
_env: Environment = None, **kwargs: 'upf.model.types.Type'):
self._env = get_env(_env)
self._name = _name
self._parameters: 'OrderedDict[str, ActionParameter]' = OrderedDict()
if _parameters is not None:
assert len(kwargs) == 0
for n, t in _parameters.items():
self._parameters[n] = ActionParameter(n, t)
else:
for n, t in kwargs.items():
self._parameters[n] = ActionParameter(n, t)
def __eq__(self, oth: object) -> bool:
raise NotImplementedError
def __hash__(self) -> int:
raise NotImplementedError
def clone(self):
raise NotImplementedError
@property
def name(self) -> str:
"""Returns the action name."""
return self._name
@name.setter
def name(self, new_name: str):
"""Sets the parameter name."""
self._name = new_name
def parameters(self) -> List[ActionParameter]:
"""Returns the list of the action parameters."""
return list(self._parameters.values())
def parameter(self, name: str) -> ActionParameter:
"""Returns the parameter of the action with the given name."""
return self._parameters[name]
def is_conditional(self) -> bool:
"""Returns True if the action has conditional effects."""
raise NotImplementedError
class InstantaneousAction(Action):
"""Represents an instantaneous action."""
def __init__(self, _name: str, _parameters: 'OrderedDict[str, upf.model.types.Type]' = None,
_env: Environment = None, **kwargs: 'upf.model.types.Type'):
Action.__init__(self, _name, _parameters, _env, **kwargs)
self._preconditions: List[upf.model.fnode.FNode] = []
self._effects: List[upf.model.effect.Effect] = []
def __repr__(self) -> str:
s = []
s.append(f'action {self.name}')
first = True
for p in self.parameters():
if first:
s.append('(')
first = False
else:
s.append(', ')
s.append(str(p))
if not first:
s.append(')')
s.append(' {\n')
s.append(' preconditions = [\n')
for c in self.preconditions():
s.append(f' {str(c)}\n')
s.append(' ]\n')
s.append(' effects = [\n')
for e in self.effects():
s.append(f' {str(e)}\n')
s.append(' ]\n')
s.append(' }')
return ''.join(s)
def __eq__(self, oth: object) -> bool:
if isinstance(oth, InstantaneousAction):
cond = self._env == oth._env and self._name == oth._name and self._parameters == oth._parameters
return cond and set(self._preconditions) == set(oth._preconditions) and set(self._effects) == set(oth._effects)
else:
return False
def __hash__(self) -> int:
res = hash(self._name)
for ap in self._parameters.items():
res += hash(ap)
for p in self._preconditions:
res += hash(p)
for e in self._effects:
res += hash(e)
return res
def clone(self):
new_params = {}
for param_name, param in self._parameters.items():
new_params[param_name] = param.type()
new_instantaneous_action = InstantaneousAction(self._name, new_params, self._env)
new_instantaneous_action._preconditions = self._preconditions[:]
new_instantaneous_action._effects = [e.clone() for e in self._effects]
return new_instantaneous_action
def preconditions(self) -> List['upf.model.fnode.FNode']:
"""Returns the list of the action preconditions."""
return self._preconditions
def clear_preconditions(self):
"""Removes all action preconditions"""
self._preconditions = []
def effects(self) -> List['upf.model.effect.Effect']:
"""Returns the list of the action effects."""
return self._effects
def clear_effects(self):
"""Removes all effects."""
self._effects = []
def conditional_effects(self) -> List['upf.model.effect.Effect']:
"""Returns the list of the action conditional effects."""
return [e for e in self._effects if e.is_conditional()]
def is_conditional(self) -> bool:
"""Returns True if the action has conditional effects."""
return any(e.is_conditional() for e in self._effects)
def unconditional_effects(self) -> List['upf.model.effect.Effect']:
"""Returns the list of the action unconditional effects."""
return [e for e in self._effects if not e.is_conditional()]
def add_precondition(self, precondition: Union['upf.model.fnode.FNode', 'upf.model.fluent.Fluent', ActionParameter, bool]):
"""Adds the given action precondition."""
precondition_exp, = self._env.expression_manager.auto_promote(precondition)
assert self._env.type_checker.get_type(precondition_exp).is_bool_type()
free_vars = self._env.free_vars_oracle.get_free_variables(precondition_exp)
if len(free_vars) != 0:
raise UPFUnboundedVariablesError(f"The precondition {str(precondition_exp)} has unbounded variables:\n{str(free_vars)}")
if precondition_exp not in self._preconditions:
self._preconditions.append(precondition_exp)
def add_effect(self, fluent: Union['upf.model.fnode.FNode', 'upf.model.fluent.Fluent'],
value: 'upf.model.expression.Expression', condition: 'upf.model.expression.BoolExpression' = True):
"""Adds the given action effect."""
fluent_exp, value_exp, condition_exp = self._env.expression_manager.auto_promote(fluent, value, condition)
assert fluent_exp.is_fluent_exp()
if not self._env.type_checker.get_type(condition_exp).is_bool_type():
raise UPFTypeError('Effect condition is not a Boolean condition!')
if not self._env.type_checker.is_compatible_type(fluent_exp, value_exp):
raise UPFTypeError('InstantaneousAction effect has not compatible types!')
self._add_effect_instance(upf.model.effect.Effect(fluent_exp, value_exp, condition_exp))
def add_increase_effect(self, fluent: Union['upf.model.fnode.FNode', 'upf.model.fluent.Fluent'],
value: 'upf.model.expression.Expression', condition: 'upf.model.expression.BoolExpression' = True):
"""Adds the given action increase effect."""
fluent_exp, value_exp, condition_exp = self._env.expression_manager.auto_promote(fluent, value, condition)
assert fluent_exp.is_fluent_exp()
if not self._env.type_checker.get_type(condition_exp).is_bool_type():
raise UPFTypeError('Effect condition is not a Boolean condition!')
if not self._env.type_checker.is_compatible_type(fluent_exp, value_exp):
raise UPFTypeError('InstantaneousAction effect has not compatible types!')
self._add_effect_instance(upf.model.effect.Effect(fluent_exp, value_exp, condition_exp, kind = upf.model.effect.INCREASE))
def add_decrease_effect(self, fluent: Union['upf.model.fnode.FNode', 'upf.model.fluent.Fluent'],
value: 'upf.model.expression.Expression', condition: 'upf.model.expression.BoolExpression' = True):
"""Adds the given action decrease effect."""
fluent_exp, value_exp, condition_exp = self._env.expression_manager.auto_promote(fluent, value, condition)
assert fluent_exp.is_fluent_exp()
if not self._env.type_checker.get_type(condition_exp).is_bool_type():
raise UPFTypeError('Effect condition is not a Boolean condition!')
if not self._env.type_checker.is_compatible_type(fluent_exp, value_exp):
raise UPFTypeError('InstantaneousAction effect has not compatible types!')
self._add_effect_instance(upf.model.effect.Effect(fluent_exp, value_exp, condition_exp, kind = upf.model.effect.DECREASE))
def _add_effect_instance(self, effect: 'upf.model.effect.Effect'):
if effect not in self._effects:
self._effects.append(effect)
def _set_preconditions(self, preconditions: List['upf.model.fnode.FNode']):
self._preconditions = preconditions
class DurativeAction(Action):
'''Represents a durative action.'''
def __init__(self, _name: str, _parameters: 'OrderedDict[str, upf.model.types.Type]' = None,
_env: Environment = None, **kwargs: 'upf.model.types.Type'):
Action.__init__(self, _name, _parameters, _env, **kwargs)
self._duration: upf.model.timing.IntervalDuration = upf.model.timing.FixedDuration(self._env.expression_manager.Int(0))
self._conditions: Dict[upf.model.timing.Timing, List[upf.model.fnode.FNode]] = {}
self._durative_conditions: Dict[upf.model.timing.Interval, List[upf.model.fnode.FNode]] = {}
self._effects: Dict[upf.model.timing.Timing, List[upf.model.effect.Effect]] = {}
def __repr__(self) -> str:
s = []
s.append(f'durative action {self.name}')
first = True
for p in self.parameters():
if first:
s.append('(')
first = False
else:
s.append(', ')
s.append(str(p))
if not first:
s.append(')')
s.append(' {\n')
s.append(f' duration = {str(self._duration)}\n')
s.append(' conditions = [\n')
for t, cl in self.conditions().items():
s.append(f' {str(t)}:\n')
for c in cl:
s.append(f' {str(c)}\n')
s.append(' ]\n')
s.append(' durative conditions = [\n')
for i, cl in self.durative_conditions().items():
s.append(f' {str(i)}:\n')
for c in cl:
s.append(f' {str(c)}\n')
s.append(' ]\n')
s.append(' effects = [\n')
for t, el in self.effects().items():
s.append(f' {str(t)}:\n')
for e in el:
s.append(f' {str(e)}:\n')
s.append(' ]\n')
s.append(' }')
return ''.join(s)
def __eq__(self, oth: object) -> bool:
if isinstance(oth, DurativeAction):
if self._env != oth._env or self._name != oth._name or self._parameters != oth._parameters or self._duration != oth._duration:
return False
if len(self._conditions) != len(oth._conditions):
return False
for t, cl in self._conditions.items():
oth_cl = oth._conditions.get(t, None)
if oth_cl is None:
return False
elif set(cl) != set(oth_cl):
return False
if len(self._durative_conditions) != len(oth._durative_conditions):
return False
for i, dcl in self._durative_conditions.items():
oth_dcl = oth._durative_conditions.get(i, None)
if oth_dcl is None:
return False
elif set(dcl) != set(oth_dcl):
return False
if len(self._effects) != len(oth._effects):
return False
for t, el in self._effects.items():
oth_el = oth._effects.get(t, None)
if oth_el is None:
return False
elif set(el) != set(oth_el):
return False
return True
else:
return False
def __hash__(self) -> int:
res = hash(self._name) + hash(self._duration)
for ap in self._parameters.items():
res += hash(ap)
for t, cl in self._conditions.items():
res += hash(t)
for c in cl:
res += hash(c)
for i, dcl in self._durative_conditions.items():
res += hash(i)
for dc in dcl:
res += hash(dc)
for t, el in self._effects.items():
res += hash(t)
for e in el:
res += hash(e)
return res
def clone(self):
new_params = {param_name: param.type() for param_name, param in self._parameters.items()}
new_durative_action = DurativeAction(self._name, new_params, self._env)
new_durative_action._duration = self._duration
new_durative_action._conditions = {t: cl[:] for t, cl in self._conditions.items()}
new_durative_action._durative_conditions = {i : dcl[:] for i, dcl in self._durative_conditions.items()}
new_durative_action._effects = {t : [e.clone() for e in el] for t, el in self._effects.items()}
return new_durative_action
def duration(self):
'''Returns the action duration interval.'''
return self._duration
def conditions(self):
'''Returns the action conditions.'''
return self._conditions
def clear_conditions(self):
'''Removes all conditions.'''
self._conditions = {}
def durative_conditions(self):
'''Returns the | |
size:", str(batchY.size()))
if debug: print("batchX[0]:", str(batchX[0]))
if debug: print("batchY size:", str(batchY.size()))
labels = batchY.view(batch_size, -1, output_dim)
max_length = labels.size(1)
if debug: print("max_length:", str(max_length))
#if debug: print("batchX:", str(batchX.size()), "batchY:", str(batchY.size()))
# Forward + Backward + Optimize
optimizer.zero_grad() # zero the gradient buffer
loss = 0
if encoding_size > 0:
outputs, _ = self(batchX, batchX2)
#print('max_length:', max_length)
outputs.squeeze(0)
#outputs = outputs.view(max_length, -1)
print('outputs:', outputs.size())
else:
print('ERROR: Encoding size is 0 and I dont know what to do')
#outputs = self(samples).view(max_length, -1)
#if debug: print("outputs:", str(outputs.size()))
if loss_function == 'crossentropy':
for b in range(batch_size):
true_labels = torch.zeros(max_length).long()
if use_cuda:
true_labels = true_labels.cuda()
print('true_labels size:', str(true_labels.size()))
print('labels[b]', str(len(labels[b])))
for y in range(len(labels[b])):
true_label = labels[b][y].data
#print("true_label:", str(true_label.size()))
true_index = torch.max(true_label, 0)[1].long()
#print("true_index", str(true_index.size()))
true_labels[y] = true_index[0]
true_var = true_labels
print("true_var", str(true_var.size()))
loss = criterion(outputs, true_var)
loss.backward()
optimizer.step()
else:
labels = labels.view(max_length, 1)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
if (i) % print_every == 0:
if debug: print('outputs:', outputs.size(), 'labels:', labels.size())
if debug: print('outputs:', outputs, 'labels:', labels)
print('Epoch [%d/%d], Loss: %.4f' %(epoch, num_epochs, loss.data.item()))
i = i+batch_size
del batchX
del batchY
# Save checkpoint
torch.save({'epoch': epoch, 'state_dict': self.state_dict(), 'optimizer': optimizer.state_dict(), 'loss': loss},
os.path.join(self.checkpoint_dir, 'checkpoint.pth'))
print('Saved checkpoint for epoch', epoch)
print("GRU_GRU training took", str(time.time()-start), "s")
def predict(self, testX, X2=None, batch_size=1, keep_list=True, return_encodings=False):
# Test the Model
encodings = []
print_every = 1
pred = []
i = 0
length = len(testX)# .shape[0]
if debug: print("testX len:", str(len(testX)))
while i < length:
if i % print_every == 0:
if debug: print("test batch", str(i))
if (i+batch_size) > length:
batch_size = length-i
if keep_list:
if batch_size == 1:
samples = testX[i]
if X2 is not None:
x2_batch = X2[i]
else:
samples = testX[i:i+batch_size]
if X2 is not None:
x2_batch = X2[i:i+batch_size]
if debug: print("samples:", str(len(samples)))
else:
x_array = numpy.asarray(testX[i:i+batch_size]).astype('float')
#if debug: print("test x_array:", str(x_array.shape))
samples = torch.tensor(x_array, dtype=torch.float, device=tdevice)
if len(samples) == 0:
pred.append([])
encodings.append(None)
else:
with torch.no_grad():
if X2 is not None:
outputs = self(samples, x2_batch)
else:
outputs, enc = self(samples, is_test=True)
encodings.append(enc)
#print("test outputs:", str(outputs.size()))
num_items = outputs.size()[1]
predicted = outputs.view(num_items).tolist()
#_, predicted = torch.max(outputs.data, -1) # TODO: fix this
print('predicted:', predicted)
pred.append(predicted)
del samples
i = i+batch_size
if not return_encodings:
del encodings
if return_encodings:
return pred, encodings
else:
return pred
''' Ranks should be a list of integers, NOT scaled
'''
def ranks_to_indices(ranks, flatten=True):
# Given a list of n items, each with a corresponding rank
# For now, rank them sequentially even if they have the same rank
#print('ranks:', ranks)
max_rank = int(numpy.max(numpy.asarray(ranks)))
#print('ranks_to_indices: max rank:', max_rank)
indices_multiple = [None] * (max_rank+1)
print('ranks:', ranks)
if type(ranks) == float:
num_ranks = 1
ranks = [ranks]
else:
num_ranks = len(ranks)
for i in range(num_ranks):
rank = int(ranks[i])
#print('rank:', rank, 'index:', i)
if indices_multiple[rank] is None:
indices_multiple[rank] = []
indices_multiple[rank].append(i)
indices_multiple = [x for x in indices_multiple if x is not None] # Filter out none entries
print('indices_multiple:', indices_multiple)
if flatten:
print('flatten')
indices = []
for index_list in indices_multiple:
if index_list is not None:
for item in index_list:
indices.append(item)
return indices
else:
return indices_multiple
''' For now, indices is a single list of integers
'''
def indices_to_ranks(indices):
#max_index = int(numpy.max(numpy.asarray(indices)))
if type(indices) == float:
num_indices = 1
else:
if type(indices[0]) is list:
print('indices_to_rank: group')
num_indices = 0
for sub in indices:
num_indices += len(sub)
else:
print('indices_to_rank: linear')
num_indices = len(indices)
print('indices_to_ranks:', num_indices, ':', indices)
#print('num_indices:', num_indices)
ranks = [None]*(num_indices)
for i in range(len(indices)):
if type(indices[i]) is list: # Handle multiple items at the same rank
for item in indices[i]:
ranks[int(item)] = i
else:
ranks[int(indices[i])] = i
return ranks
''' Gaussian function for smoothing target distribution
'''
def gaussian(x, mu, sig):
return numpy.exp(-numpy.power(x - mu, 2.) / (2 * numpy.power(sig, 2.)))
''' Smooth the target distribution
target: a tensor of the target distribution (timesteps, n)
'''
def smooth_distribution(target, sig):
n = target.size(1)
#sig = float(n)/float(10)
#sig = 0.5
slices = []
# For each slice of the tensor
print('target', target.size())
for y in range(n):
slice = target[:, y].squeeze()
#print('slice:', slice.size())
one_index = torch.argmax(slice, dim=0).item()
for k in range(slice.size(0)):
slice[k] = max(slice[k].item(), gaussian(abs(one_index - k), 0, sig))
slices.append(slice)
# Concatenate the slices
smoothed_target = torch.t(torch.stack(slices, dim=0))
return smoothed_target
# Autoencoder modules
class EncoderRNN(nn.Module):
def __init__(self, input_size, hidden, num_layers=1, use_double=False, encoder_name='elmo'):
super(EncoderRNN, self).__init__()
self.input_size = input_size
self.hidden_size = int(hidden/2)
self.num_layers = num_layers
#self.elmo = Elmo(options_file, weight_file, 1, dropout=0).to(tdevice)
self.embedder = Embedder(encoder_name, encoding_size=hidden-2, use_gru=True)
self.lstm = nn.GRU(input_size=self.input_size, hidden_size=self.hidden_size, batch_first=True, bidirectional=True).to(tdevice)
self.relu = nn.ReLU().to(tdevice)
#self.hidden = torch.randn((2, 1, self.hidden_size)).to(tdevice)
#print('encoder hidden:', self.hidden)
self.use_double = use_double
if self.use_double:
self.lstm = self.lstm.double()
#self.hidden = self.hidden.double()
#initialize weights
#nn.init.xavier_uniform(self.lstm.weight_ih_l0, gain=numpy.sqrt(2))
#nn.init.xavier_uniform(self.lstm.weight_hh_l0, gain=numpy.sqrt(2))
def forward(self, input, noise_factor=0.0, return_emb=False):
# Input should be (1, seq_len, dim)
row = input[0]
self.hidden = None
context = row[0]
print('ae enc context:', context)
word_flags = row[1]
#time_words = row[2]
#tflags = row[3]
#time_val = row[4]
pol_flag = row[6][0]
ev_text = ['<sos>'] + context + ['<eos>']
flags = [0] + word_flags + [0]
#ae_row = (ev_text, flags)
ae_row = row
enc, emb = self.embedder(ae_row, return_emb=True, noise_factor=noise_factor)
flag_tensor = torch.tensor([pol_flag, pol_flag], device=tdevice, dtype=torch.float).view(1, 1, -1).repeat(1, enc.size(1), 1)
print('flag_tensor:', flag_tensor)
if self.use_double:
enc = enc.double()
flag_tensor = flag_tensor.double()
enc = torch.cat((enc, flag_tensor), dim=2)
#h0 = torch.FloatTensor(self.num_layers*2, 1, self.hidden_size).to(tdevice)
#c0 = torch.FloatTensor(self.num_layers*2, 1, self.hidden_size).to(tdevice)
#print('lstm input', type(uttX), 'hidden:', type(self.hidden), 'double:', self.use_double)
#encoded_input, self.hidden = self.lstm(uttX, self.hidden) # , (h0, c0))
#encoded_input = encoded_input[:, -1, :].view(1, 1, -1) # Keep just the last timestep
#print('encoded_input before relu:', enc)
encoded_input = self.relu(enc)
if return_emb:
return encoded_input, emb
else:
return encoded_input
class DecoderRNN(nn.Module):
def __init__(self, hidden, output_size, num_layers=1, use_double=False):
super(DecoderRNN, self).__init__()
self.hidden_size = hidden
self.output_size = int(output_size/2)
self.num_layers = num_layers
self.vocab = None
self.use_double = bool(use_double)
#self.isCuda = isCuda
self.lstm = nn.GRU(input_size=1, hidden_size=self.hidden_size, batch_first=True, bidirectional=False).to(tdevice)
#self.relu = nn.ReLU()
self.linear = nn.Linear(self.hidden_size, output_size).to(tdevice)
self.softmax = nn.LogSoftmax(dim=2).to(tdevice)
#self.sigmoid = nn.Tanh().to(tdevice)
if use_double:
self.lstm = self.lstm.double()
self.linear = self.linear.double()
#initialize weights
#nn.init.xavier_uniform(self.lstm.weight_ih_l0, gain=numpy.sqrt(2))
#nn.init.xavier_uniform(self.lstm.weight_hh_l0, gain=numpy.sqrt(2))
def forward(self, input_token, hidden):
#output = []
#h0 = torch.FloatTensor(self.num_layers*2, encoded_input.size(0), self.output_size).to(tdevice)
#c0 = torch.FloatTensor(self.num_layers*2, encoded_input.size(0), self.output_size).to(tdevice)
#print('h0:', h0)
#print('c0:', c0)
if self.use_double:
dec_input_token = input_token.double().view(1, 1, -1).to(tdevice)
else:
dec_input_token = input_token.float().view(1, 1, -1).to(tdevice)
print('dec: decoder input token:', dec_input_token)
decoded_output, hidden = self.lstm(dec_input_token, hidden) # , (h0, c0))
decoded_output = self.linear(decoded_output)
#print('decoded_output linear:', decoded_output.size())
decoded_output = self.softmax(decoded_output)
#decoded_token = torch.argmax[decoded_output].item()
#token = self.vocab.itos[decoded_token]
#print('decoded token:', decoded_token, token)
return decoded_output, hidden
class Autoencoder(nn.Module):
def __init__(self, input_size, hidden_size, batch_size=1, num_layers=1, use_double=False, autoencoder_file=None, encoder_name='elmo', checkpoint_dir=None):
super(Autoencoder, self).__init__()
self.checkpoint_dir = checkpoint_dir
self.input_size = input_size
self.hidden_size = hidden_size
self.batch_size = batch_size
#self.elmo = elmo
self.num_layers = num_layers
self.use_double = use_double
self.encoder_name = encoder_name
self.gamma = 0.5
self.noise_factor = 0.15
print('autoencoder use_double:', self.use_double, 'encoder_name:', encoder_name)
#if autoencoder_file is None:
# self.file = autoencoder_file
#else:
print('autoencoder_file:', autoencoder_file)
self.file = autoencoder_file
assert(self.file is not None)
def forward(self, input, noise_factor=0.0):
encoded_input, emb = self.encoder(input, return_emb=True, noise_factor=noise_factor)
print('ae encoded input:', encoded_input)
#decoded_output = self.decoder(encoded_input)
return encoded_input, emb
def fit(self, X, epochs=30):
start = time.time()
learning_rate = 0.001
#criterion = torch.nn.MSELoss()
# Linearize the event examples
events = []
for row in X:
for ev in row:
#print('ev:', ev[0])
# Add start and end tokens
#ev_text = ['<sos>'] + ev[0] + ['<eos>']
#flags = [0] + ev[1] + [0]
#print('ae event text:', ev_text)
#new_event = (ev_text, flags, ev[2], ev[3], ev[4])
events.append(ev)
X = events
num_examples = len(X)
print('Autoencoder examples:', num_examples)
# Create the vocab object
counter = Counter()
#tokenizer = WordPunctTokenizer()
for row in X:
text = row[0]
#print('ae row:', row, 'text:', text)
#for word in tokenizer.tokenize(text):
for word in text:
counter[word] += 1
vocab = Vocab(counter)
vocab_size = len(vocab.itos)
print('ae vocab size:', vocab_size)
#self.decoder.vocab = vocab
self.encoder = EncoderRNN(self.input_size, self.hidden_size, self.num_layers, self.use_double, encoder_name=self.encoder_name)
self.decoder = DecoderRNN(self.hidden_size, vocab_size, self.num_layers, self.use_double)
rec_criterion = torch.nn.CrossEntropyLoss()
emb_criterion = torch.nn.MSELoss()
enc_optimizer = optim.Adam(self.encoder.parameters(), lr=learning_rate)
dec_optimizer = optim.Adam(self.decoder.parameters(), lr=learning_rate)
# Check for model checkpoint
start_epoch = 0
enc_checkpoint_file = os.path.join(self.checkpoint_dir, 'ae_enc_checkpoint.pth')
dec_checkpoint_file = os.path.join(self.checkpoint_dir, 'ae_dec_checkpoint.pth')
if os.path.exists(enc_checkpoint_file):
enc_check = torch.load(enc_checkpoint_file)
dec_check = torch.load(dec_checkpoint_file)
self.encoder.load_state_dict(enc_check['state_dict'])
self.decoder.load_state_dict(dec_check['state_dict'])
enc_optimizer.load_state_dict(enc_check['optimizer'])
dec_optimizer.load_state_dict(dec_check['optimizer'])
loss = dec_check['loss']
start_epoch = dec_check['epoch'] + 1
print('loading from checkpoint, restarting | |
of `multiply_diag` is the same as
``self.dot(diag_vect.diag(), (axis, 0))``
if `direction` is "right" or "r" (the diagonal matrix comes from the
right) or
``self.dot(diag_vect.diag(), (axis, 1))``
if `direction` is "left" or "l". This operation is just done without
constructing the full diagonal matrix.
"""
assert diag_vect.qodulus == self.qodulus
assert diag_vect.charge == 0
assert len(diag_vect.shape) == 1
assert direction in {"r", "l", "left", "right"}
res = self.empty_like()
if axis < 0:
axis = len(self.shape) + axis
right = direction == "r" or direction == "right"
# Flip axes as needed.
if (right and self.dirs[axis] != -diag_vect.dirs[0]) or (
not right and self.dirs[axis] != diag_vect.dirs[0]
):
warnings.warn(
"Automatically flipping dir 0 of diag_vect in "
"multiply_diag.",
stacklevel=2,
)
diag_vect = diag_vect.flip_dir(0)
for k, v in self.sects.items():
q_sum = k[axis]
v = np.swapaxes(v, -1, axis)
v = v * diag_vect[(q_sum,)]
v = np.swapaxes(v, -1, axis)
res[k] = v
res.qhape[axis] = [
self._qod_func(q + diag_vect.charge) for q in diag_vect.qhape[0]
]
res.charge = self._qod_func(self.charge + diag_vect.charge)
return res
def matrix_dot(self, other):
"""Take the dot product of two tensors of order < 3.
If either one is a matrix, it must be invariant and have ``defval ==
0``.
"""
assert self.qodulus == other.qodulus
# The following essentially is a massive case statement on whether self
# and other are scalars, vectors or matrices. Unwieldly, but efficient
# and clear.
if self.isscalar() and other.isscalar():
return self * other
else:
res_dtype = np.result_type(self.dtype, other.dtype)
res_charge = self._qod_func(self.charge + other.charge)
res_invar = self.invar and other.invar
# Vector times vector
if len(self.shape) == 1 and len(other.shape) == 1:
assert self.compatible_indices(other, 0, 0)
if self.dirs[0] + other.dirs[0] != 0:
warnings.warn(
"Automatically flipping dir 0 of other in dot."
)
other = other.flip_dir(0)
res = 0
for qnum in self.qhape[0]:
try:
a = self[(qnum,)]
b = other[(qnum,)]
except KeyError:
# This block doesn't exist in one or the other matrix,
# so it contributes zero.
continue
prod = np.dot(a, b)
if prod:
res += prod
# Turn the single scalar number into a scalar tensor.
res = type(self)(
[],
qhape=[],
qodulus=self.qodulus,
sects={},
defval=res,
dirs=[],
dtype=res_dtype,
charge=res_charge,
invar=res_invar,
)
else:
res_sects = {}
# Vector times matrix
if len(self.shape) == 1:
assert other.invar
assert other.defval == 0
assert self.compatible_indices(other, 0, 0)
if self.dirs[0] + other.dirs[0] != 0:
warnings.warn(
"Automatically flipping dir 0 of self in dot."
)
self = self.flip_dir(0)
res_shape = [other.shape[1]]
res_qhape = [other.qhape[1]]
res_dirs = [other.dirs[1]]
flux = -other.dirs[0] * other.dirs[1]
for sum_qnum in self.qhape[0]:
b_qnum = self._qod_func(
sum_qnum * flux + other.dirs[1] * other.charge
)
try:
a = self[(sum_qnum,)]
b = other[(sum_qnum, b_qnum)]
res_sects[(b_qnum,)] = np.dot(a, b)
except KeyError:
# One of the blocks was zero so the resulting block
# will be zero.
continue
# Matrix times vector
elif len(other.shape) == 1:
assert self.invar
assert self.defval == 0
assert self.compatible_indices(other, 1, 0)
if self.dirs[1] + other.dirs[0] != 0:
warnings.warn(
"Automatically flipping dir 0 of other in dot."
)
other = other.flip_dir(0)
res_shape = [self.shape[0]]
res_qhape = [self.qhape[0]]
res_dirs = [self.dirs[0]]
flux = -self.dirs[0] * self.dirs[1]
for sum_qnum in self.qhape[1]:
a_qnum = self._qod_func(
sum_qnum * flux + self.dirs[0] * self.charge
)
try:
a = self[(a_qnum, sum_qnum)]
b = other[(sum_qnum,)]
res_sects[(a_qnum,)] = np.dot(a, b)
except KeyError:
# One of the blocks was zero so the resulting block
# will be zero.
continue
# Matrix times matrix
else:
assert self.invar and other.invar
assert self.defval == other.defval == 0
assert self.compatible_indices(other, 1, 0)
if self.dirs[1] + other.dirs[0] != 0:
warnings.warn(
"Automatically flipping dir 0 of other in dot."
)
other = other.flip_dir(0)
res_shape = [self.shape[0], other.shape[1]]
res_qhape = [self.qhape[0], other.qhape[1]]
res_dirs = [self.dirs[0], other.dirs[1]]
a_flux = -self.dirs[0] * self.dirs[1]
b_flux = -other.dirs[0] * other.dirs[1]
for sum_qnum in self.qhape[1]:
a_qnum = self._qod_func(
sum_qnum * a_flux + self.dirs[0] * self.charge
)
b_qnum = self._qod_func(
sum_qnum * b_flux + other.dirs[1] * other.charge
)
try:
a = self[a_qnum, sum_qnum]
b = other[sum_qnum, b_qnum]
res_sects[a_qnum, b_qnum] = np.dot(a, b)
except KeyError:
# One of the blocks was zero so the resulting block
# will be zero.
continue
# Turn the dictionary of sectors into a tensor.
res = type(self)(
res_shape,
qhape=res_qhape,
qodulus=self.qodulus,
sects=res_sects,
dtype=res_dtype,
dirs=res_dirs,
charge=res_charge,
invar=res_invar,
)
return res
def matrix_eig(
self,
chis=None,
eps=0,
print_errors="deprecated",
hermitian=False,
break_degenerate=False,
degeneracy_eps=1e-6,
sparse=False,
trunc_err_func=None,
evenTrunc = False,
):
"""Find eigenvalues and eigenvectors of a matrix.
The input must have ``defval == 0``, ``invar == True``, ``charge ==
0``, and must be square in the sense that the dimensions must have the
same `qim` and `dim` and opposing `dirs`.
If `hermitian` is True the matrix is assumed to be hermitian.
Truncation works like for SVD, see the docstring there for more.
If `sparse` is True, a sparse eigenvalue decomposition, using power
methods from `scipy.sparse.eigs` or `eigsh`, is used. This
decomposition is done to find ``max(chis)`` eigenvalues, after which
the decomposition may be truncated further if the truncation error so
allows. Thus ``max(chis)`` should be much smaller than the full size of
the matrix, if `sparse` is True.
The return value is ``S, U, rel_err``, where `S` is a non-invariant
vector of eigenvalues and `U` is a matrix that has as its columns the
eigenvectors. Both have the same `dim` and `qim` as self. `rel_err` is
the truncation error.
"""
if print_errors != "deprecated":
msg = (
"The `print_errors` keyword argument has been deprecated, "
"and has no effect. Rely instead on getting the error as a "
"return value, and print it yourself."
)
warnings.warn(msg)
# If chis is not specfied, there is no even truncation scheme; else, we
# keep track of the chi we specfied
if chis is None:
evenTrunc = False
else:
try:
chis = list(chis)
except TypeError:
chis = [chis]
chiSpec = max(chis)
chis = self._matrix_decomp_format_chis(chis, eps)
maxchi = max(chis)
assert self.defval == 0
assert self.invar
assert self.charge == 0
assert self.dirs[0] + self.dirs[1] == 0
assert set(zip(self.qhape[0], self.shape[0])) == set(
zip(self.qhape[1], self.shape[1])
)
S_dtype = np.float_ if hermitian else np.complex_
U_dtype = self.dtype if hermitian else np.complex_
# Eigenvalue decompose each sector at a time.
# While doing so, also keep track of a list of all eigenvalues, as well
# as a heap that gives the negative of the absolute value of the
# largest eigenvalue in each sector. These will be needed later when
# deciding how to truncate the eigenvalues.
eigdecomps = {}
dims = {}
minusabs_next_eigs = []
all_eigs = []
for k, v in self.sects.items():
if 0 in v.shape:
# This matrix is empty and trivial.
shp = v.shape
m = min(shp)
u = np.empty((shp[0], m), dtype=U_dtype)
s = np.empty((m,), dtype=S_dtype)
eigdecomp = (s, u)
else:
if sparse and maxchi < min(v.shape) - 1:
if hermitian:
s, u = spsla.eighs(
v, k=maxchi, return_eigenvectors=True
)
else:
s, u = spsla.eigs(
v, k=maxchi, return_eigenvectors=True
)
else:
if hermitian:
s, u = np.linalg.eigh(v)
else:
s, u = np.linalg.eig(v)
order = np.argsort(-np.abs(s))
s = s[order]
u = u[:, order]
s = s.astype(S_dtype)
u = u.astype(U_dtype)
eigdecomp = (s, u)
eigdecomps[k] = eigdecomp
dims[k] = 0
all_eigs.append(s)
if 0 not in s.shape:
heapq.heappush(minusabs_next_eigs, (-np.abs(s[0]), k))
try:
all_eigs = np.concatenate(all_eigs)
except ValueError:
all_eigs = np.array((0,))
if sparse:
norm_sq = self.norm_sq()
else:
norm_sq = None
# Figure out what bond dimension to truncate to, how this bond
# dimension is distributed over the different sectors, and what the
# truncation error is.
chi, dims, rel_err = type(self)._find_trunc_dim(
all_eigs,
eigdecomps,
minusabs_next_eigs,
dims,
chis=chis,
eps=eps,
break_degenerate=break_degenerate,
degeneracy_eps=degeneracy_eps,
trunc_err_func=trunc_err_func,
norm_sq=norm_sq,
)
# truncate in both sectors evenly
if evenTrunc and chiSpec == chi:
# This piece of codes is only designed
# with Z2 symmetry tensor in mind
errmeg = "The matrix should have two sectors (0,0) and (1,1)."
assert len(dims) == 2, errmeg
if chiSpec % 2 == 0:
dims[(0, 0)] = int(chiSpec / 2)
dims[(1, 1)] = int(chiSpec / | |
<reponame>Sunnyfred/Atlantic_Hurricane_Simulations<gh_stars>0
import csv
import matplotlib.pyplot as plt
import numpy as np
from collections import OrderedDict
import matplotlib as mpl
# import matplotlib.gridspec as gridspec
from matplotlib.ticker import MaxNLocator
from matplotlib.ticker import StrMethodFormatter
import matplotlib.font_manager as font_manager
from matplotlib.patches import Patch
import string
from netCDF4 import Dataset
import json
from cartopy.feature import NaturalEarthFeature
import cartopy.crs as crs
import pickle
from wrf import (to_np, getvar, smooth2d, get_cartopy, cartopy_xlim,
cartopy_ylim, latlon_coords)
import cartopy
import os
from PIL import Image
Image.MAX_IMAGE_PIXELS = None
map_location = "C:/Users/limgr/.spyder-py3/Map"
os.environ["CARTOPY_USER_BACKGROUNDS"] = map_location
# List the colors that will be used for tracing the track.
csfont = {'fontname':'Times New Roman'}
font = font_manager.FontProperties(family='Times New Roman', size=30)
fontbar = font_manager.FontProperties(family='Times New Roman', size=12)
font_wt = font_manager.FontProperties(family='Times New Roman', size=15)
colors = ['k','blue','cyan','purple', 'red', \
'blue', 'cyan', 'lightcoral', 'turquoise','red','blue','green','pink']
patterns = ['-', '--','-.','-',':',':','--','--', ':','-', '--', ':','-', '--', ':',\
'-.', '-.', '-.', ':', '--', '-']
markers = ['s','D','^','o','*','s','+','x','X','D','^','<','>','v']
sizes = [3, 7, 7, 7, 7, 3, 4, 3, 3, 3, 3, 3, 6,5,4,3,2,2]
options = ["Katrina's Best Track",\
"Maria's Best Track",\
"Irma's Best Track",\
"Dorian's Best Track",\
"Lorenzo's Best Track"]
options2 = ["Cristobal's Best Track",\
"Gert's Best Track",\
"Ike's Best Track",\
"Joaquin's Best Track",\
"Nicole's Best Track"]
hurricanes = ["Katrina",\
"Maria",\
"Irma",\
"Dorian",\
"Lorenzo"]
hurricanes2 = ["Cristobal",\
"Gert",\
"Ike",\
"Joaquin",\
"Nicole"]
# subplot positions
position = [[0,0,2],[0,2,4],[0,4,6],[1,0,2],[1,2,4]]
position2 = [[0,4,0,7],[0,4,8,15],[0,4,16,23],[5,9,0,7],[5,9,8,15]]
position = [[0,8,0,8],[9,17,0,8]]
linestyles = OrderedDict(
[('solid', (0, ())),
('dashdotted', (0, (3, 3, 1, 3))),
('dashdotdotted', (0, (3, 2, 1, 2, 1, 2))),
('dashed', (0, (3, 3))),
('dotted', (0, (1, 3))),
('dashed', (0, (3, 3))),
('loosely dotted', (0, (1, 10))),
('densely dotted', (0, (1, 1))),
('loosely dashed', (0, (5, 10))),
('densely dashed', (0, (5, 1))),
('loosely dashdotted', (0, (3, 10, 1, 10))),
('densely dashdotted', (0, (3, 1, 1, 1))),
('loosely dashdotdotted', (0, (3, 10, 1, 10, 1, 10))),
('densely dashdotdotted', (0, (3, 1, 1, 1, 1, 1)))])
R = 6373.0 # approxiamte radius of earth in km
# folder for wi and wt files
dir_wt = ['C:/Users/limgr/Desktop/Katrina_track_8km.txt',\
'C:/Users/limgr/Desktop/Maria_track_8km.txt',\
'C:/Users/limgr/Desktop/Irma_track_8km.txt',\
'C:/Users/limgr/Desktop/Dorian_track_8km.txt',\
'C:/Users/limgr/Desktop/Lorenzo_track_8km.txt']
dir_p = ['C:/Users/limgr/Desktop/Katrina_8km.p',\
'C:/Users/limgr/Desktop/Maria_8km.p',\
'C:/Users/limgr/Desktop/Irma_8km.p',\
'C:/Users/limgr/Desktop/Dorian_8km.p',\
'C:/Users/limgr/Desktop/Lorenzo_8km.p']
dir_wt2 = ['C:/Users/limgr/Desktop/Cristobal_track_8km.txt',\
'C:/Users/limgr/Desktop/Gert_track_8km.txt',\
'C:/Users/limgr/Desktop/Ike_track_8km.txt',\
'C:/Users/limgr/Desktop/Joaquin_track_8km.txt',\
'C:/Users/limgr/Desktop/Nicole_track_8km.txt']
dir_p2 = ['C:/Users/limgr/Desktop/Cristobal_8km.p',\
'C:/Users/limgr/Desktop/Gert_8km.p',\
'C:/Users/limgr/Desktop/Ike_8km.p',\
'C:/Users/limgr/Desktop/Joaquin_8km.p',\
'C:/Users/limgr/Desktop/Nicole_8km.p']
lat_log_bound = [[-90.5, -84.5, 23, 29],\
[-74, -68, 19.5, 25.5],\
[-47, -39, 14, 22],\
[-76.5, -70.5, 23, 29],\
[-45.5, -39.5, 16.5, 22.5]]
lat_log_bound = [[-93, -83, 24, 34],\
[-77, -67, 19, 29],\
[-51, -39, 14, 22],\
[-80, -69, 23, 29],\
[-47, -40, 16.5, 25.5]]
lat_log_bound = [[-91, -85, 24, 30],\
[-77, -67, 19, 29],\
[-51, -39, 14, 22],\
[-78, -70, 23, 29],\
[-47, -40, 16.5, 25.5]]
lat_log_bound = [[-95, -35, 12, 31],\
[-77, -67, 19, 29],\
[-51, -39, 14, 22],\
[-78, -70, 23, 29],\
[-47, -40, 16.5, 25.5]]
lat_log_bound2 = [[-95, -35, 21, 40],\
[-77, -67, 19, 29],\
[-51, -39, 14, 22],\
[-78, -70, 23, 29],\
[-47, -40, 16.5, 25.5]]
# lat_log_bound = [[-92, -86, 25, 30],\
# [-74, -68, 21.5, 25.5],\
# [-46, -43.5, 17, 19.5],\
# [-76, -73.5, 25.5, 28],\
# [-46, -42, 19, 23]]
def Calculate_Distance_Haversine1(x):
return (np.sin(x[0]/2))**2
def Calculate_Distance_Haversine2(x):
return np.cos(x[0])
def Calculate_Distance_Haversine3(x):
return (np.sin(x[1]/2))**2
########################
# Plot hurricane track #
########################
real = []
real2 = []
for kk in range(len(hurricanes)):
# if hurricanes[kk]=='Katrina':
# cons=6
# elif hurricanes[kk]=='Dorian':
# cons=8
# else:
# cons=10
cons=10
real1=[]
oussama1=[]
wrf1=[]
simu1=[]
with open( dir_wt[kk], 'r' ) as f :
data0 = f.read()
data = json.loads('[' + data0.replace('}{', '},{') + ']')
for i in range(0,len(data)):
data2 = list(data[i].values())
data3 = [e for sl in data2 for e in sl]
for j in range(len(data3)):
data3[j].pop(0)
if i==0:
real1.append(data3)
else:
continue
real1 = np.array(real1, dtype=np.float32)
real.append(real1)
# simu1 = np.array(simu1, dtype=np.float32)
# real_r = np.radians(real1)
# simu_r = np.radians(simu1)
# term1=np.apply_along_axis(Calculate_Distance_Haversine1, 2, simu_r-real_r)
# term2=np.apply_along_axis(Calculate_Distance_Haversine2, 2, simu_r)* \
# np.apply_along_axis(Calculate_Distance_Haversine2, 2, real_r)* \
# np.apply_along_axis(Calculate_Distance_Haversine3, 2, simu_r-real_r)
# simu_error1=2*R*np.arcsin(np.sqrt(term1+term2))
for kk in range(len(hurricanes2)):
real1=[]
oussama1=[]
wrf1=[]
simu1=[]
with open( dir_wt2[kk], 'r' ) as f :
data0 = f.read()
data = json.loads('[' + data0.replace('}{', '},{') + ']')
for i in range(0,len(data)):
data2 = list(data[i].values())
data3 = [e for sl in data2 for e in sl]
for j in range(len(data3)):
data3[j].pop(0)
if i==0:
real1.append(data3)
else:
continue
real1 = np.array(real1, dtype=np.float32)
real2.append(real1)
for kk in range(1):
# if hurricanes[kk]=='Katrina':
# cons=6
# elif hurricanes[kk]=='Dorian':
# cons=8
# else:
# cons=10
# ax = fig.add_subplot(spec[position[kk][0],position[kk][1]:position[kk][2]])
# ax.text(0.05, 0.9, '('+string.ascii_lowercase[kk]+')', transform=ax.transAxes,
# size=30)
slp2D = pickle.load( open( dir_p[kk], "rb" ) )
lats, lons = latlon_coords(slp2D)
# Get the cartopy mapping object (use original data, rather than any processed data)
cart_proj = get_cartopy(slp2D)
# Set the GeoAxes to the projection used by WRF
#ax = plt.axes(projection=cart_proj)
fig = plt.figure(figsize=(12,12))
spec = mpl.gridspec.GridSpec(ncols=8, nrows=17)
ax = fig.add_subplot(spec[position[0][0]:position[0][1],position[0][2]:position[0][3]], projection=cart_proj)
# ax.stock_img()
# Download and add the states and coastlines
states = NaturalEarthFeature(category="cultural", scale="50m",
facecolor="none",
name="admin_1_states_provinces_shp")
ax.add_feature(states, linewidth=.5, edgecolor="black")
ax.coastlines('50m', linewidth=0.8)
# Set the map bounds
# ax.set_xlim(cartopy_xlim(slp2D))
# ax.set_ylim(cartopy_ylim(slp2D))
ax.set_extent(lat_log_bound[0])
ax.background_img(name='SR', resolution='high')
# Show grid lines.
gl = ax.gridlines(crs=crs.PlateCarree(), draw_labels=True,
linewidth=1.5, color='gray', alpha=0.8, linestyle=':')
gl.xlabel_style = {'size': 20, 'color': 'k','fontname':'Times New Roman'}
gl.ylabel_style = {'size': 20, 'color': 'k','fontname':'Times New Roman'}
gl.xlabels_top = False
gl.ylabels_right = False
c=0
real_kk = []
real_kk = real[0]
ll=[]
rr=[]
for i in range(real_kk.shape[0]):
for j in range(real_kk.shape[1]):
if j<cons:
ll.append(real_kk[i][j][0])
rr.append(real_kk[i][j][1])
ax.plot( rr, ll, color = colors[c], marker=markers[0],linewidth=2, \
linestyle=list(linestyles.values())[0],\
markersize=sizes[0], transform=crs.PlateCarree())
c+=1
real_kk = []
real_kk = real[1]
ll=[]
rr=[]
for i in range(real_kk.shape[0]):
for j in range(real_kk.shape[1]):
if j<cons:
ll.append(real_kk[i][j][0])
rr.append(real_kk[i][j][1])
ax.plot( rr, ll, color = colors[c], marker=markers[0],linewidth=2, \
linestyle=list(linestyles.values())[0],\
markersize=sizes[0], transform=crs.PlateCarree())
c+=1
real_kk = []
real_kk = real[2]
ll=[]
rr=[]
for i in range(real_kk.shape[0]):
for j in range(real_kk.shape[1]):
if j<cons:
ll.append(real_kk[i][j][0])
rr.append(real_kk[i][j][1])
ax.plot( rr, ll, color = colors[c], marker=markers[0],linewidth=2, \
linestyle=list(linestyles.values())[0],\
markersize=sizes[0], transform=crs.PlateCarree())
c+=1
real_kk = []
real_kk = real[3]
ll=[]
rr=[]
for i in range(real_kk.shape[0]):
for j in range(real_kk.shape[1]):
if j<cons:
ll.append(real_kk[i][j][0])
rr.append(real_kk[i][j][1])
ax.plot( rr, ll, color = colors[c], marker=markers[0],linewidth=2, \
linestyle=list(linestyles.values())[0],\
markersize=sizes[0], transform=crs.PlateCarree())
c+=1
real_kk = []
real_kk = real[4]
ll=[]
rr=[]
for i in range(real_kk.shape[0]):
for j in range(real_kk.shape[1]):
if j<cons:
ll.append(real_kk[i][j][0])
rr.append(real_kk[i][j][1])
ax.plot( rr, ll, color = colors[c], marker=markers[0],linewidth=2, \
linestyle=list(linestyles.values())[0],\
markersize=sizes[0], transform=crs.PlateCarree())
c+=1
for axis in ['top','bottom','left','right']:
ax.spines[axis].set_linewidth(15)
fig.legend(options, bbox_to_anchor=(0.9, 0.87), prop=font_wt, \
frameon=False)
plt.title('Strong Hurricanes', {'size': 25}, **csfont)
# plt.legend(['Real track','C0.0001', 'C0.01', 'C1', 'C100'],\
# loc = "upper right", prop={'size': 7})
# plt.xlabel("Lon", fontsize=135)
# plt.ylabel("Lat", fontsize=135)
# plt.title(hurricanes[kk], {'size': 35}, **csfont)
# plt.show()
ax = fig.add_subplot(spec[position[1][0]:position[1][1],position[1][2]:position[1][3]], projection=cart_proj)
# ax.stock_img()
# Download and add the states and coastlines
states = NaturalEarthFeature(category="cultural", scale="50m",
facecolor="none",
name="admin_1_states_provinces_shp")
ax.add_feature(states, linewidth=.5, edgecolor="black")
ax.coastlines('50m', linewidth=0.8)
# Set the map bounds
# ax.set_xlim(cartopy_xlim(slp2D))
# ax.set_ylim(cartopy_ylim(slp2D))
ax.set_extent(lat_log_bound2[0])
ax.background_img(name='SR', resolution='high')
# Show grid lines.
gl = ax.gridlines(crs=crs.PlateCarree(), draw_labels=True,
linewidth=1.5, color='gray', alpha=0.8, linestyle=':')
gl.xlabel_style = {'size': 20, 'color': 'k','fontname':'Times New Roman'}
gl.ylabel_style = {'size': 20, 'color': 'k','fontname':'Times New Roman'}
gl.xlabels_top = False
gl.ylabels_right = False
c=0
real_kk = []
real_kk = real2[0]
ll=[]
rr=[]
for i in range(real_kk.shape[0]):
for j in range(real_kk.shape[1]):
if j<cons:
ll.append(real_kk[i][j][0])
rr.append(real_kk[i][j][1])
ax.plot( rr, ll, color = colors[c], marker=markers[0],linewidth=2, \
linestyle=list(linestyles.values())[3],\
markersize=sizes[0], transform=crs.PlateCarree())
c+=1
real_kk = []
real_kk = real2[1]
ll=[]
rr=[]
for i in range(real_kk.shape[0]):
for j in range(real_kk.shape[1]):
if j<cons:
ll.append(real_kk[i][j][0])
rr.append(real_kk[i][j][1])
ax.plot( rr, ll, color = colors[c], marker=markers[0],linewidth=2, \
linestyle=list(linestyles.values())[3],\
markersize=sizes[0], transform=crs.PlateCarree())
c+=1
real_kk = []
real_kk = real2[2]
ll=[]
rr=[]
for i in range(real_kk.shape[0]):
for j in range(real_kk.shape[1]):
if j<cons:
ll.append(real_kk[i][j][0])
rr.append(real_kk[i][j][1])
ax.plot( rr, ll, color = colors[c], marker=markers[0],linewidth=2, \
linestyle=list(linestyles.values())[3],\
markersize=sizes[0], transform=crs.PlateCarree())
c+=1
real_kk = []
real_kk = real2[3]
ll=[]
rr=[]
for i in range(real_kk.shape[0]):
for j in range(real_kk.shape[1]):
if j<cons:
ll.append(real_kk[i][j][0])
rr.append(real_kk[i][j][1])
ax.plot( rr, ll, color = colors[c], marker=markers[0],linewidth=2, \
linestyle=list(linestyles.values())[3],\
markersize=sizes[0], transform=crs.PlateCarree())
c+=1
real_kk = []
real_kk = real2[4]
ll=[]
rr=[]
for i in range(real_kk.shape[0]):
for j in range(real_kk.shape[1]):
if j<cons:
ll.append(real_kk[i][j][0])
rr.append(real_kk[i][j][1])
ax.plot( rr, ll, color = colors[c], marker=markers[0],linewidth=2, \
linestyle=list(linestyles.values())[3],\
markersize=sizes[0], | |
# Created on 2017-8-31
# Usage: Lower Limit of Energy Requirements (LLER) for area of interest.
#--------------------------------------------------------------------------------------------------------------
#Import all necessary module dependencies
import arcpy
from arcpy.sa import *
import numpy
import os
import string
import re
import shutil
arcpy.CheckOutExtension("Spatial")
arcpy.env.overwriteOutput = True
#Set whether tool is licensed
def isLicensed():
try:
if arcpy.CheckExtension("Spatial") == "Available":
arcpy.CheckOutExtension("Spatial")
else:
raise Exception
except:
return False
return True
def autoIncrement(pInterval = 1):
global rec
rec = rec + pInterval
return rec
def nutritionMetrics(AOI, year, maleStature, femaleStature, rasterList):
#Create intermediate folder where output will be temporarily saved
arcpy.CreateFolder_management(arcpy.env.scratchFolder, "intOutput")
#arcpy.AddMessage("scratch folder was created")
os.chdir(os.path.join(arcpy.env.scratchFolder, "intOutput"))
arcpy.AddMessage("Calculating nutrition metrics... ")
#Return only rasters with the year selected by the user
rasterYearList = []
for r in rasterList:
if year in r:
rasterYearList.append(r)
#Clip the continent raster by the AOI
for r in rasterYearList:
rasterName = r[:-4] + "_clip.img"
outExtractByMask = ExtractByMask(r, AOI)
outExtractByMask.save(os.path.join(arcpy.env.scratchFolder, "intOutput", rasterName))
#Loop through the exported rasters and calculate nutrition metrics for each raster
#Get a list of the clipped rasters
arcpy.env.workspace = os.path.join(arcpy.env.scratchFolder, "intOutput")
clippedRasterList = arcpy.ListRasters()
#Calculate nutrition metrics
LLER = 0 #This will keep the tally of 'Lower Limit of Energy Requirement' in kcal/day for the study area (AOI).
totalPop = 0 #This will keep the tally of total population in the study area (AOI).
groupList = []
maleStatureInt = float(maleStature)
femaleStatureInt = float(femaleStature)
#arcpy.AddMessage(clippedRasterList)
for r in clippedRasterList:
#The appropriate equation is added to each age group and a running tally of LLER is kept.
#The female 00-04 age group
if "0004_F" in r:
height = (femaleStatureInt / 161.8) * 84.97556 #note that 2014 heights for Americans are used to standardized height values (CDC, 2016)
kg50 = 15.5 * ((height / 100)**2)
array = arcpy.RasterToNumPyArray(r, "", "", "", 0)
popf0004 = array.sum()
nutf0004 = ((263.4 + 65.3 * kg50 - 0.454 * (kg50)**2) + (6.3 * 2)) * popf0004
LLER += nutf0004
totalPop += popf0004
sexAge = "0004_F"
groupList.append(sexAge)
#The male 00-04 age group
elif "0004_M" in r:
height = (maleStatureInt / 175.7) * 86.4522
kg50 = 15.8 * ((height / 100)**2)
array = arcpy.RasterToNumPyArray(r, "", "", "", 0)
popm0004 = array.sum()
nutm0004 = ((310.2 + 63.3 * kg50 - 0.263 * (kg50)**2) + (6.3 * 2)) * popm0004
LLER += nutm0004
totalPop += popm0004
sexAge = "0004_M"
groupList.append(sexAge)
#The female 05-09 age group
elif "0509_F" in r:
height = (femaleStatureInt / 161.8) * 121.7617
kg50 = 15.52 * ((height / 100)**2)
array = arcpy.RasterToNumPyArray(r, "", "", "", 0)
popf0509 = array.sum()
nutf0509 = ((263.4 + 65.3 * kg50 - 0.454 * (kg50)**2) + (8.22 * 2)) * popf0509
LLER += nutf0509
totalPop += popf0509
sexAge = "0509_F"
groupList.append(sexAge)
#The male 05-09 age group
elif "0509_M" in r:
height = (maleStatureInt / 175.7) * 122.0305
kg50 = 15.6 * ((height / 100)**2)
array = arcpy.RasterToNumPyArray(r, "", "", "", 0)
popm0509 = array.sum()
nutm0509 = ((310.2 + 63.3 * kg50 - 0.263 * (kg50)**2) + (6.58 * 2)) * popm0509
LLER += nutm0509
totalPop += popm0509
sexAge = "0509_M"
groupList.append(sexAge)
#The female 10-14 age group
elif "1014_F" in r:
height = (femaleStatureInt / 161.8) * 151.4866
kg5 = 15.19 * ((height / 100)**2)
array = arcpy.RasterToNumPyArray(r, "", "", "", 0)
popf1014 = array.sum()
nutf1014 = (0.85 * (263.4 + 65.3 * kg5 - 0.454 * (kg5)**2) + (9.86 * 2)) * popf1014
LLER += nutf1014
totalPop += popf1014
sexAge = "1014_F"
groupList.append(sexAge)
#The male 10-14 age group
elif "1014_M" in r:
height = (maleStatureInt / 175.7) * 149.3088
kg5 = 15.14 * ((height / 100)**2)
array = arcpy.RasterToNumPyArray(r, "", "", "", 0)
popm1014 = array.sum()
nutm1014 = (0.85 * (310.2 + 63.3 * kg5 - 0.263 * (kg5)**2) + (10.41 * 2)) * popm1014
LLER += nutm1014
totalPop += popm1014
sexAge = "1014_M"
groupList.append(sexAge)
#The female 15-19 age group
elif "1519_F" in r:
height = (femaleStatureInt / 161.8) * 163.1308
kg5 = 17.19 * ((height / 100)**2)
array = arcpy.RasterToNumPyArray(r, "", "", "", 0)
popf1519 = array.sum()
nutf1519 = (1.55 * (486.6 + 8.126 * kg5)) * popf1519
LLER += nutf1519
totalPop += popf1519
sexAge = "1519_F"
groupList.append(sexAge)
#The male 15-19 age group
elif "1519_M" in r:
height = (maleStatureInt / 175.7) * 176.185
kg5 = 18.10 * ((height / 100)**2)
array = arcpy.RasterToNumPyArray(r, "", "", "", 0)
popm1519 = array.sum()
nutm1519 = (1.55 * (692.2 + 15.057 * kg5)) * popm1519
LLER += nutm1519
totalPop += popm1519
sexAge = "1519_M"
groupList.append(sexAge)
#The female 20-24 age group
elif "2024_F" in r:
height = (femaleStatureInt / 161.8) * 163.3383
kg5 = 17.38 * ((height / 100)**2)
array = arcpy.RasterToNumPyArray(r, "", "", "", 0)
popf2024 = array.sum()
nutf2024 = (1.55 * (486.6 + 8.126 * kg5)) * popf2024
LLER += nutf2024
totalPop += popf2024
sexAge = "2024_F"
groupList.append(sexAge)
#The male 20-24 age group
elif "2024_M" in r:
height = (maleStatureInt / 175.7) * 176.8492
kg5 = 18.66 * ((height / 100)**2)
array = arcpy.RasterToNumPyArray(r, "", "", "", 0)
popm2024 = array.sum()
nutm2024 = (1.55 * (692.2 + 15.057 * kg5)) * popm2024
LLER += nutm2024
totalPop += popm2024
sexAge = "2024_M"
groupList.append(sexAge)
#The female 25-29 age group
elif "2529_F" in r:
height = (femaleStatureInt / 161.8) * 163.3383
kg5 = 17.38 * ((height / 100)**2)
array = arcpy.RasterToNumPyArray(r, "", "", "", 0)
popf2529 = array.sum()
nutf2529 = (1.55 * (486.6 + 8.126 * kg5)) * popf2529
LLER += nutf2529
totalPop += popf2529
sexAge = "2529_F"
groupList.append(sexAge)
#The male 25-29 age group
elif "2529_M" in r:
height = (maleStatureInt / 175.7) * 176.8492
kg5 = 18.66 * ((height / 100)**2)
array = arcpy.RasterToNumPyArray(r, "", "", "", 0)
popm2529 = array.sum()
nutm2529 = (1.55 * (692.2 + 15.057 * kg5)) * popm2529
LLER += nutm2529
totalPop += popm2529
sexAge = "2529_M"
groupList.append(sexAge)
#The female 30-34 age group
elif "3034_F" in r:
height = (femaleStatureInt / 161.8) * 163.3383
kg5 = 17.38 * ((height / 100)**2)
array = arcpy.RasterToNumPyArray(r, "", "", "", 0)
popf3034 = array.sum()
nutf3034 = (1.55 * (845.6 + 8.118 * kg5)) * popf3034
LLER += nutf3034
totalPop += popf3034
sexAge = "3034_F"
groupList.append(sexAge)
#The male 30-34 age group
elif "3034_M" in r:
height = (maleStatureInt / 175.7) * 176.8492
kg5 = 18.66 * ((height / 100)**2)
array = arcpy.RasterToNumPyArray(r, "", "", "", 0)
popm3034 = array.sum()
nutm3034 = (1.55 * (873.1 + 11.472 * kg5)) * popm3034
LLER += nutm3034
totalPop += popm3034
sexAge = "3034_M"
groupList.append(sexAge)
#The female 35-39 age group
elif "3539_F" in r:
height = (femaleStatureInt / 161.8) * 163.3383
kg5 = 17.38 * ((height / 100)**2)
array = arcpy.RasterToNumPyArray(r, "", "", "", 0)
popf3539 = array.sum()
nutf3539 = (1.55 * (845.6 + 8.118 * kg5)) * popf3539
LLER += nutf3539
totalPop += popf3539
sexAge = "3539_F"
groupList.append(sexAge)
#The male 35-39 age group
elif "3539_M" in r:
height = (maleStatureInt / 175.7) * 176.8492
kg5 = 18.66 * ((height / 100)**2)
array = arcpy.RasterToNumPyArray(r, "", "", "", 0)
popm3539 = array.sum()
nutm3539 = (1.55 * (873.1 + 11.472 * kg5)) * popm3539
LLER += nutm3539
totalPop += popm3539
sexAge = "3539_M"
groupList.append(sexAge)
#The female 40-44 age group
elif "4044_F" in r:
height = (femaleStatureInt / 161.8) * 163.3383
kg5 = 17.38 * ((height / 100)**2)
array = arcpy.RasterToNumPyArray(r, "", "", "", 0)
popf4044 = array.sum()
nutf4044 = (1.55 * (845.6 + 8.118 * kg5)) * popf4044
LLER += nutf4044
totalPop += popf4044
sexAge = "4044_F"
groupList.append(sexAge)
#The male 40-44 age group
elif "4044_M" in r:
height = (maleStatureInt / 175.7) * 176.8492
kg5 = 18.66 * ((height / 100)**2)
array = arcpy.RasterToNumPyArray(r, "", "", "", 0)
popm4044 = array.sum()
nutm4044 = (1.55 * (873.1 + 11.472 * kg5)) * popm4044
LLER += nutm4044
totalPop += popm4044
sexAge = "4044_M"
groupList.append(sexAge)
#The female 45-49 age group
elif "4549_F" in r:
height = (femaleStatureInt / 161.8) * 163.3383
kg5 = 17.38 * ((height / 100)**2)
array = arcpy.RasterToNumPyArray(r, "", "", "", 0)
popf4549 = array.sum()
nutf4549 = (1.55 * (845.6 + 8.118 * kg5)) * popf4549
LLER += nutf4549
totalPop += popf4549
sexAge = "4549_F"
groupList.append(sexAge)
#The male 45-49 age group
elif "4549_M" in r:
height = (maleStatureInt / 175.7) * 176.8492
kg5 = 18.66 * ((height / 100)**2)
array = arcpy.RasterToNumPyArray(r, "", "", "", 0)
popm4549 = array.sum()
nutm4549 = (1.55 * (873.1 + 11.472 * kg5)) * popm4549
LLER += nutm4549
totalPop += popm4549
sexAge = "4549_M"
groupList.append(sexAge)
#The female 50-54 age group
elif "5054_F" in r:
height = (femaleStatureInt / 161.8) * 163.3383
kg5 = 17.38 * ((height / 100)**2)
array = arcpy.RasterToNumPyArray(r, "", "", "", 0)
popf5054 = array.sum()
nutf5054 = (1.55 * (845.6 + 8.118 * kg5)) * popf5054
LLER += nutf5054
totalPop += popf5054
sexAge = "5054_F"
groupList.append(sexAge)
#The male 50-54 age group
elif "5054_M" in r:
height = (maleStatureInt / 175.7) * 176.8492
kg5 = 18.66 * ((height / 100)**2)
array = arcpy.RasterToNumPyArray(r, "", "", "", 0)
popm5054 = array.sum()
nutm5054 = (1.55 * (873.1 + 11.472 * kg5)) * popm5054
LLER += nutm5054
totalPop += popm5054
sexAge = "5054_M"
groupList.append(sexAge)
#The female 55-59 age group
elif "5559_F" in r:
height = (femaleStatureInt / 161.8) * 163.3383
kg5 = 17.38 * ((height / 100)**2)
array = arcpy.RasterToNumPyArray(r, "", "", "", 0)
popf5559 = array.sum()
nutf5559 = (1.55 * (845.6 + 8.118 * kg5)) * popf5559
LLER += nutf5559
totalPop += popf5559
sexAge = "5559_F"
groupList.append(sexAge)
#The male 55-59 age group
elif "5559_M" in r:
height = (maleStatureInt / 175.7) * 176.8492
kg5 = 18.66 * ((height / 100)**2)
array = arcpy.RasterToNumPyArray(r, "", "", "", 0)
popm5559 = array.sum()
nutm5559 = (1.55 * (873.1 + 11.472 * kg5)) * popm5559
LLER += | |
<reponame>veot/ifcb-features
# The following includes a modified version of the phasecong function from
# phasepack.
# It skips several steps that are not used in IFCB segmentation
# Original license reproduced below
# MIT License:
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, subject to the following
# conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# The software is provided "as is", without warranty of any kind.
# Original MATLAB version by <NAME>
# <http://www.csse.uwa.edu.au/~pk/research/matlabfns/PhaseCongruency/phasecong3.m>
# Python translation by <NAME>
# <alistair muldal<EMAIL> ox ac uk>
# IFCB-specific optimizations and utilites by <NAME> @ WHOI 2016
import numpy as np
from scipy.fftpack import fftshift, ifftshift
from phasepack.tools import rayleighmode as _rayleighmode
from phasepack.tools import lowpassfilter as _lowpassfilter
# Try and use the faster Fourier transform functions from the pyfftw module if
# available
from phasepack.tools import fft2, ifft2
def faster_phasecong(
img,
nscale=5,
norient=6,
minWaveLength=3,
mult=2.1,
sigmaOnf=0.55,
k=2.0,
cutOff=0.5,
g=10.0,
noiseMethod=-1,
):
"""
Function for computing phase congruency on an image. This is a contrast-
invariant edge and corner detector.
Arguments:
-----------
<Name> <Default> <Description>
img N/A The input image
nscale 5 Number of wavelet scales, try values 3-6
norient 6 Number of filter orientations.
minWaveLength 3 Wavelength of smallest scale filter.
mult 2.1 Scaling factor between successive filters.
sigmaOnf 0.55 Ratio of the standard deviation of the Gaussian
describing the log Gabor filter's transfer function
in the frequency domain to the filter center
frequency.
k 2.0 No. of standard deviations of the noise energy
beyond the mean at which we set the noise threshold
point. You may want to vary this up to a value of
10 or 20 for noisy images.
cutOff 0.5 The fractional measure of frequency spread below
which phase congruency values get penalized.
g 10 Controls the 'sharpness' of the transition in the
sigmoid function used to weight phase congruency
for frequency spread.
noiseMethod -1 Parameter specifies method used to determine
noise statistics.
-1 use median of smallest scale filter responses
-2 use mode of smallest scale filter responses
>=0 use this value as the fixed noise threshold
Returns:
---------
M Maximum moment of phase congruency covariance, which can be used as
a measure of edge strength
m Minimum moment of phase congruency covariance, which can be used as
a measure of corner strength
The convolutions are done via the FFT. Many of the parameters relate to
the specification of the filters in the frequency plane. The values do
not seem to be very critical and the defaults are usually fine. You may
want to experiment with the values of 'nscales' and 'k', the noise
compensation factor.
Notes on filter settings to obtain even coverage of the spectrum
sigmaOnf .85 mult 1.3
sigmaOnf .75 mult 1.6 (filter bandwidth ~1 octave)
sigmaOnf .65 mult 2.1
sigmaOnf .55 mult 3 (filter bandwidth ~2 octaves)
For maximum speed the input image should have dimensions that correspond
to powers of 2, but the code will operate on images of arbitrary size.
See also: phasecongmono, which uses monogenic filters for improved
speed, but does not return m, PC or EO.
References:
------------
<NAME>, "Image Features From Phase Congruency". Videre: A Journal of
Computer Vision Research. MIT Press. Volume 1, Number 3, Summer 1999
http://mitpress.mit.edu/e-journals/Videre/001/v13.html
<NAME>, "Phase Congruency Detects Corners and Edges". Proceedings
DICTA 2003, Sydney Dec 10-12
"""
if img.dtype not in ["float32", "float64"]:
img = np.float64(img)
imgdtype = "float64"
else:
imgdtype = img.dtype
if img.ndim == 3:
img = img.mean(2)
rows, cols = img.shape
epsilon = 1e-4 # used to prevent /0.
IM = fft2(img) # Fourier transformed image
zeromat = np.zeros((rows, cols), dtype=imgdtype)
# matrices for covariance data
covx2 = zeromat.copy()
covy2 = zeromat.copy()
covxy = zeromat.copy()
# Pre-compute some stuff to speed up filter construction
# Set up X and Y matrices with ranges normalised to +/- 0.5
if cols % 2:
xvals = np.arange(-(cols - 1) / 2.0, ((cols - 1) / 2.0) + 1) / float(cols - 1)
else:
xvals = np.arange(-cols / 2.0, cols / 2.0) / float(cols)
if rows % 2:
yvals = np.arange(-(rows - 1) / 2.0, ((rows - 1) / 2.0) + 1) / float(rows - 1)
else:
yvals = np.arange(-rows / 2.0, rows / 2.0) / float(rows)
x, y = np.meshgrid(xvals, yvals, sparse=True)
# normalised distance from centre
radius = np.sqrt(x * x + y * y)
# polar angle (-ve y gives +ve anti-clockwise angles)
theta = np.arctan2(-y, x)
# Quadrant shift radius and theta so that filters are constructed with 0
# frequency at the corners
radius = ifftshift(radius) # need to use ifftshift to bring 0 to (0,0)
theta = ifftshift(theta)
# Get rid of the 0 radius value at the 0 frequency point (now at top-left
# corner) so that taking the log of the radius will not cause trouble.
radius[0, 0] = 1.0
sintheta = np.sin(theta)
costheta = np.cos(theta)
del x, y, theta
# Construct a bank of log-Gabor filters at different spatial scales
# Filters are constructed in terms of two components.
# 1) The radial component, which controls the frequency band that the
# filter responds to
# 2) The angular component, which controls the orientation that the filter
# responds to.
# The two components are multiplied together to construct the overall
# filter.
# Construct the radial filter components... First construct a low-pass
# filter that is as large as possible, yet falls away to zero at the
# boundaries. All log Gabor filters are multiplied by this to ensure no
# extra frequencies at the 'corners' of the FFT are incorporated as this
# seems to upset the normalisation process when calculating phase
# congrunecy.
# Updated filter parameters 6/9/2013: radius .45, 'sharpness' 15
lp = _lowpassfilter((rows, cols), 0.45, 15)
logGaborDenom = 2.0 * np.log(sigmaOnf) ** 2.0
logGabor = []
wavelengths = minWaveLength * mult ** np.arange(nscale)
for wavelength in wavelengths:
# centre of frequency filter
fo = 1.0 / wavelength
# log Gabor
logRadOverFo = np.log(radius / fo)
tmp = np.exp(-(logRadOverFo * logRadOverFo) / logGaborDenom)
# apply low-pass filter
tmp = tmp * lp
# set the value at the 0 frequency point of the filter back to
# zero (undo the radius fudge).
tmp[0, 0] = 0.0
logGabor.append(tmp)
# MAIN LOOP
# for each orientation...
# Construct the angular filter spread function
angls = np.arange(norient) * (np.pi / norient)
for angl in angls:
# For each point in the filter matrix calculate the angular distance
# from the specified filter orientation. To overcome the angular wrap-
# around problem sine difference and cosine difference values are first
# computed and then the arctan2 function is used to determine angular
# distance.
# difference in sine and cosine
sin_angl = np.sin(angl)
cos_angl = np.cos(angl)
ds = sintheta * cos_angl - costheta * sin_angl
dc = costheta * cos_angl + sintheta * sin_angl
# absolute angular difference
dtheta = np.abs(np.arctan2(ds, dc))
# Scale theta so that cosine spread function has the right wavelength
# and clamp to pi.
np.clip(dtheta * norient / 2.0, a_min=0, a_max=np.pi, out=dtheta)
# The spread function is cos(dtheta) between -pi and pi. We add 1, and
# then divide by 2 so that the value ranges 0-1
spread = (np.cos(dtheta) + 1.0) / 2.0
# Initialize accumulators
sumE_ThisOrient = zeromat.copy()
sumO_ThisOrient = zeromat.copy()
sumAn_ThisOrient = zeromat.copy()
Energy = zeromat.copy()
EOscale = []
# for each scale...
for ss in range(nscale):
# Multiply radial and angular components to get filter
filt = logGabor[ss] * spread
# Convolve image with even and odd filters
thisEO = ifft2(IM * filt)
# Even + odd filter response amplitude
An = np.abs(thisEO)
# Sum of amplitudes for even | |
picking is enabled and all remaining command fields are
ignored (valid only in the GUI). A component name may also be
substituted for NP1 (NP2 and NINC are ignored).
dx, dy, dz
Keypoint location increments in the active coordinate system (DR,
Dθ, DZ for cylindrical, DR, Dθ, DΦ for spherical).
kinc
Keypoint increment between generated sets. If zero, the lowest
available keypoint numbers are assigned [NUMSTR].
noelem
Specifies if elements and nodes are also to be generated:
0 - Generate nodes and point elements associated with the original keypoints, if
they exist.
1 - Do not generate nodes and elements.
imove
Specifies whether keypoints will be moved or newly defined:
0 - Generate additional keypoints as requested with the ITIME argument.
1 - Move original keypoints to new position retaining the same keypoint numbers
(ITIME, KINC, and NOELEM are ignored). Valid only if the old
keypoints are no longer needed at their original positions.
Corresponding meshed items are also moved if not needed at
their original position.
Notes
-----
Generates additional keypoints (and corresponding mesh) from a given
keypoint pattern. The MAT, TYPE, REAL, and ESYS attributes are based
upon the keypoints in the pattern and not upon the current settings.
Generation is done in the active coordinate system. Keypoints in the
pattern may have been defined in any coordinate system. However, solid
modeling in a toroidal coordinate system is not recommended.
"""
command = f"KGEN,{itime},{np1},{np2},{ninc},{dx},{dy},{dz},{kinc},{noelem},{imove}"
return self.run(command, **kwargs)
def kl(self, nl1="", ratio="", nk1="", **kwargs) -> int:
"""Generates a keypoint at a specified location on an existing line.
APDL Command: KL
Parameters
----------
nl1
Number of the line. If negative, the direction of line
(as interpreted for RATIO) is reversed.
ratio
Ratio of line length to locate keypoint. Must be between
0.0 and 1.0. Defaults to 0.5 (divide the line in half).
nk1
Number to be assigned to keypoint generated at division
location (defaults to lowest available keypoint number
[NUMSTR]).
Returns
-------
int
Keypoint number of the generated keypoint.
Examples
--------
Create a keypoint on a line from (0, 0, 0) and (10, 0, 0)
>>> kp0 = (0, 0, 0)
>>> kp1 = (10, 0, 0)
>>> knum0 = mapdl.k("", *kp0)
>>> knum1 = mapdl.k("", *kp1)
>>> lnum = mapdl.l(knum0, knum1)
>>> lnum
1
"""
msg = self.run(f"KL,{nl1},{ratio},{nk1}", **kwargs)
if msg:
res = re.search(r'KEYPOINT\s+(\d+)\s+', msg)
if res is not None:
return int(res.group(1))
def klist(self, np1="", np2="", ninc="", lab="", **kwargs):
"""Lists the defined keypoints or hard points.
APDL Command: KLIST
Parameters
----------
np1, np2, ninc
List keypoints from NP1 to NP2 (defaults to NP1) in steps of NINC
(defaults to 1). If NP1 = ALL (default), NP2 and NINC are ignored
and all selected keypoints [KSEL] are listed. If NP1 = P,
graphical picking is enabled and all remaining command fields are
ignored (valid only in the GUI). A component name may also be
substituted for NP1 (NP2 and NINC are ignored).
lab
Coordinate listing key:
(blank) - List all keypoint information.
COORD - Suppress all but the keypoint coordinates (shown to a higher degree of accuracy
than when displayed with all information).
HPT - List only hard point information.
Notes
-----
Lists keypoints in the active display coordinate system [DSYS]. An
attribute (TYPE, MAT, REAL, or ESYS) listed as a zero is unassigned;
one listed as a positive value indicates that the attribute was
assigned with the KATT command (and will not be reset to zero if the
mesh is cleared); one listed as a negative value indicates that the
attribute was assigned using the attribute pointer [TYPE, MAT, REAL, or
ESYS] that was active during meshing (and will be reset to zero if the
mesh is cleared).
This command is valid in any processor.
"""
command = f"KLIST,{np1},{np2},{ninc},{lab}"
return self.run(command, **kwargs)
def kmodif(self, npt="", x="", y="", z="", **kwargs):
"""Modifies an existing keypoint.
APDL Command: KMODIF
Parameters
----------
npt
Modify coordinates of this keypoint. If NPT = ALL, modify
coordinates of all selected keypoints [KSEL]. If NPT = P,
graphical picking is enabled and all remaining command fields are
ignored (valid only in the GUI). A component name may also be
substituted for NPT.
x, y, z
Replace the previous coordinate values assigned to this keypoint
with these corresponding coordinate values. Values are interpreted
according to the active coordinate system (R, θ, Z for cylindrical,
R, θ,Φ for spherical). If X = P, graphical picking is used to
locate keypoint and Y and Z are ignored. A blank retains the
previous value. You cannot specify Y = P.
Notes
-----
Lines, areas, and volumes attached to the modified keypoint (if any)
must all be selected and will be redefined using the active coordinate
system. However, solid modeling in a toroidal coordinate system is not
recommended.
Caution:: : Redefined entities may be removed from any defined
components and assemblies. Nodes and elements will be automatically
cleared from any redefined keypoints, lines, areas, or volumes.
The KMODIF command moves keypoints for geometry modification without
validating underlying entities. To merge keypoints and update higher
order entities, issue the NUMMRG command instead.
"""
command = f"KMODIF,{npt},{x},{y},{z}"
return self.run(command, **kwargs)
def kmove(self, npt="", kc1="", x1="", y1="", z1="", kc2="", x2="", y2="",
z2="", **kwargs):
"""Calculates and moves a keypoint to an intersection.
APDL Command: KMOVE
Parameters
----------
npt
Move this keypoint. If NPT = P, graphical picking is enabled and
all remaining command fields are ignored (valid only in the GUI).
A component name may also be substituted for NPT.
kc1
First coordinate system number. Defaults to 0 (global Cartesian).
x1, y1, z1
Input one or two values defining the location of the keypoint in
this coordinate system. Input "U" for unknown value(s) to be
calculated and input "E" to use an existing coordinate value.
Fields are R1, θ1, Z1 for cylindrical, or R1, θ1, ϕ1 for spherical.
kc2
Second coordinate system number.
x2, y2, z2
Input two or one value(s) defining the location of the keypoint in
this coordinate system. Input "U" for unknown value(s) to be
calculated and input "E" to use an existing coordinate value.
Arguments are R2, θ2, Z2 for cylindrical, or R2, θ2, ϕ2 for
spherical.
Notes
-----
Calculates and moves a keypoint to an intersection location. The
keypoint must have been previously defined (at an approximate location)
or left undefined (in which case it is internally defined at the SOURCE
location). The actual location is calculated from the intersection of
three surfaces (implied from three coordinate constants in two
different coordinate systems). Note that solid modeling in a toroidal
coordinate system is not recommended. See the MOVE command for surface
and intersection details. The three (of six) constants easiest to
define should be used. The program will calculate the remaining three
coordinate constants. All arguments, except KC1, must be input. Use
the repeat command [*REPEAT] after the KMOVE command to move a series
of keypoints, if desired.
"""
command = f"KMOVE,{npt},{kc1},{x1},{y1},{z1},{kc2},{x2},{y2},{z2}"
return self.run(command, **kwargs)
def knode(self, npt="", node="", **kwargs) -> int:
"""Defines a keypoint at an existing node location.
APDL Command: KNODE
Parameters
----------
npt
Arbitrary reference number for keypoint. If zero, the
lowest available number is assigned [NUMSTR].
node
Node number defining global X, Y, Z keypoint location. A
component name may also be substituted for NODE.
Returns
-------
int
Keypoint number of the generated keypoint.
Examples
--------
Create a keypoint at a node at (1, 2, 3)
>>> nnum = mapdl.n('', 1, 2, 3)
>>> knum1 = mapdl.knode('', nnum)
>>> knum1
1
"""
msg = self.run(f"KNODE,{npt},{node}", **kwargs)
if msg:
res = re.search(r'KEYPOINT NUMBER =\s+(\d+)', msg)
if res is not None:
return int(res.group(1))
def kplot(self, np1="", np2="", ninc="", lab="", **kwargs):
"""Displays the selected keypoints.
APDL Command: KPLOT
Parameters
----------
np1, np2, ninc
Display keypoints from NP1 to NP2 (defaults | |
start positions, run lengths, run values
"""
where = np.flatnonzero
if not isinstance(array, h5py.Dataset):
array = np.asarray(array)
n = len(array)
if n == 0:
return (np.array([], dtype=int),
np.array([], dtype=int),
np.array([], dtype=array.dtype))
if chunksize is None:
chunksize = n
starts, values = [], []
last_val = np.nan
for i in range(0, n, chunksize):
x = array[i:i+chunksize]
locs = where(x[1:] != x[:-1]) + 1
if x[0] != last_val:
locs = np.r_[0, locs]
starts.append(i + locs)
values.append(x[locs])
last_val = x[-1]
starts = np.concatenate(starts)
lengths = np.diff(np.r_[starts, n])
values = np.concatenate(values)
return starts, lengths, values
def get_unicode_utcnow():
"""
datetime.utcnow().isoformat() will return bytestring with python 2
"""
date_str = datetime.utcnow().isoformat()
if not isinstance(date_str, type(u'')):
return date_str.decode('utf-8')
return date_str
def write_zooms_for_higlass(h5res):
"""
NOT CURRENTLY USED
Add max-zoom column needed for higlass, but only if the resolutions are
successively divisible by two. Otherwise, warn that this file will not be
higlass compatible
"""
resolutions = sorted(f['resolutions'].keys(), key=int, reverse=True)
# test if resolutions are higlass compatible
higlass_compat = True
for i in range(len(resolutions)):
if i == len(resolutions) - 1:
break
if resolutions[i] * 2 != resolutions[i+1]:
higlass_compat = False
break
if not higlass_compat:
print_stderr('!!! WARNING: This hic file is not higlass compatible! Will not add [max-zoom] attribute.')
return
print('... INFO: This hic file is higlass compatible! Adding [max-zoom] attribute.')
max_zoom = len(resolutions) - 1
# Assign max-zoom attribute
h5res.attrs['max-zoom'] = max_zoom
print('... max-zoom: {}'.format(max_zoom))
# Make links to zoom levels
for i, res in enumerate(resolutions):
print('... zoom {}: {}'.format(i, res))
h5res[str(i)] = h5py.SoftLink('/resolutions/{}'.format(res))
def print_formatted_updates(updates, writefile):
"""
Simple function to display updates for the user to confirm
Updates are populated using prepare_hic2cool_updates
"""
print('### Updates found. Will upgrade hic2cool file to version %s' % __version__)
print('### This is what will change:')
for upd in updates:
print('- %s\n - Effect: %s\n - Detail: %s' % (upd['title'], upd['effect'], upd['detail']))
print('### Will write to %s\n### Continue? [y/n]' % writefile)
def run_hic2cool_updates(updates, infile, writefile):
"""
Actually run the updates given by the updates array
"""
# create a copy of the infile, if writefile differs
if infile != writefile:
shutil.copy(infile, writefile)
print('### Updating...')
for upd in updates:
print('... Running: %s' % upd['title'])
upd['function'](writefile)
print('... Finished: %s' % upd['title'])
# now update the generated-by attr and add update-d
generated_by = 'hic2cool-' + __version__
update_data = get_unicode_utcnow()
with h5py.File(writefile, 'r+') as h5_file:
if 'resolutions' in h5_file:
for res in h5_file['resolutions']:
h5_file['resolutions'][res].attrs['generated-by'] = generated_by
h5_file['resolutions'][res].attrs['update-date'] = update_data
print('... Updated metadata for resolution %s' % res)
else:
h5_file.attrs['generated-by'] = generated_by
h5_file.attrs['update-date'] = update_data
print('... Updated metadata')
print('### Finished! Output written to: %s' % writefile)
def hic2cool_convert(infile, outfile, resolution=0, nproc=1, show_warnings=False, silent=False):
"""
Main function that coordinates the reading of header and footer from infile
and uses that information to parse the hic matrix.
Opens outfile and writes in form of .cool file
Params:
<infile> str .hic filename
<outfile> str .cool output filename
<resolution> int bp bin size. If 0, use all. Defaults to 0.
Final .cool structure will change depending on this param (see README)
<show_warnings> bool. If True, print out WARNING messages
<silent> bool. If true, hide standard output
<nproc> number of processes to use
"""
unit = 'BP' # only using base pair unit for now
resolution = int(resolution)
# Global hic normalization types used
global NORMS
NORMS = []
global WARN
WARN = False
req = open(infile, 'rb')
global reqarr
reqarr = []
for i in range(0, nproc):
reqarr.append(open(infile, 'rb'))
global mmap_buf
mmap_buf = mmap.mmap(req.fileno(), 0, access=mmap.ACCESS_READ)
used_chrs, resolutions, masteridx, genome, metadata = read_header(req)
pair_footer_info, expected, factors, norm_info = read_footer(req, mmap_buf, masteridx)
# expected/factors unused for now
del expected
del factors
# used to hold chr_chr key intersections missing from the hic file
warn_chr_keys = []
if not silent: # print hic header info for command line usage
chr_names = [used_chrs[key][1] for key in used_chrs.keys()]
print('##########################')
print('### hic2cool / convert ###')
print('##########################')
print('### Header info from hic')
print('... Chromosomes: ', chr_names)
print('... Resolutions: ', resolutions)
print('... Normalizations: ', NORMS)
print('... Genome: ', genome)
# ensure user input binsize is a resolution supported by the hic file
if resolution != 0 and resolution not in resolutions:
error_str = (
'!!! ERROR. Given binsize (in bp) is not a supported resolution in '
'this file.\nPlease use 0 (all resolutions) or use one of: ' +
str(resolutions))
force_exit(error_str, req)
use_resolutions = resolutions if resolution == 0 else [resolution]
multi_res = len(use_resolutions) > 1
# do some formatting on outfile filename
# .mcool is the 4DN supported multi-res format, but allow .multi.cool too
if outfile[-11:] == '.multi.cool':
if not multi_res:
outfile = ''.join([outfile[:-11] + '.cool'])
elif outfile[-6:] == '.mcool':
if not multi_res:
outfile = ''.join([outfile[:-6] + '.cool'])
elif outfile[-5:] == '.cool':
if multi_res:
outfile = ''.join([outfile[:-5] + '.mcool'])
else:
# unexpected file ending. just append .cool or .cool
if multi_res:
outfile = ''.join([outfile + '.mcool'])
else:
outfile = ''.join([outfile + '.cool'])
# check if the desired path exists. try to remove, if so
if os.path.exists(outfile):
try:
os.remove(outfile)
except OSError:
error_string = ("!!! ERROR. Output file path %s already exists. This"
" can cause issues with the hdf5 structure. Please remove that"
" file or choose a different output name." % (outfile))
force_exit(error_string, req)
if WARN:
print_stderr('!!! WARNING: removed pre-existing file: %s' % (outfile))
print('### Converting')
pool = Pool(processes=nproc)
for binsize in use_resolutions:
t_start = time.time()
# initialize cooler file. return per resolution bin offset maps
chr_offset_map, chr_bins = initialize_res(outfile, req, mmap_buf, unit, used_chrs,
genome, metadata, binsize, norm_info, multi_res, show_warnings)
covered_chr_pairs = []
for chr_a in used_chrs:
total_chunk = np.zeros(shape=0, dtype=CHUNK_DTYPE)
if used_chrs[chr_a][1].lower() == 'all':
continue
for chr_b in used_chrs:
if used_chrs[chr_b][1].lower() == 'all':
continue
c1 = min(chr_a, chr_b)
c2 = max(chr_a, chr_b)
chr_key = str(c1) + "_" + str(c2)
# since matrices are upper triangular, no need to cover c1-c2
# and c2-c1 reciprocally
if chr_key in covered_chr_pairs:
continue
tmp_chunk = parse_hic(req, pool, nproc, chr_key, unit, binsize,
pair_footer_info, chr_offset_map, chr_bins,
used_chrs, show_warnings)
total_chunk = np.concatenate((total_chunk, tmp_chunk), axis=0)
del tmp_chunk
covered_chr_pairs.append(chr_key)
# write at the end of every chr_a
write_pixels_chunk(outfile, binsize, total_chunk, multi_res)
del total_chunk
# finalize to remove chunks and write a bit of metadata
finalize_resolution_cool(outfile, binsize, multi_res)
t_parse = time.time()
elapsed_parse = t_parse - t_start
if not silent:
print('... Resolution %s took: %s seconds.' % (binsize, elapsed_parse))
req.close()
for i in range(0, nproc):
reqarr[i].close()
pool.close()
pool.join()
if not silent:
if WARN and not show_warnings:
print('... Warnings were found in this run. Run with -v to display them.')
print('### Finished! Output written to: %s' % outfile)
if multi_res:
print('... This file is higlass compatible.')
else:
print('... This file is single resolution and NOT higlass compatible. Run with `-r 0` for multi-resolution.')
def hic2cool_extractnorms(infile, outfile, exclude_mt=False, show_warnings=False, silent=False):
"""
Find all normalization vectors in the given hic file at all resolutions and
attempts to add them to the given cooler file. Does not add any metadata
to the cooler file. TODO: should we add `extract-norms-date` attr?
Params:
<infile> str .hic filename
<outfile> str .cool output filename
<exclude_mt> bool. If True, ignore MT contacts. Defaults to False.
<show_warnings> bool. If True, print out WARNING messages
<silent> bool. If true, hide standard output
"""
unit = 'BP' # only using base pair unit for now
# Global hic normalization types used
global NORMS
NORMS = []
global WARN
WARN = False
req = open(infile, 'rb')
buf = mmap.mmap(req.fileno(), 0, access=mmap.ACCESS_READ)
used_chrs, resolutions, masteridx, genome, metadata = read_header(req)
pair_footer_info, expected, factors, norm_info = read_footer(req, buf, masteridx)
# expected/factors unused for now
del expected
del factors
chr_names = [used_chrs[key][1] for key in used_chrs.keys()]
if not silent: # print hic header info for command line usage
print('################################')
print('### hic2cool / extract-norms ###')
print('################################')
print('Header info from hic:')
print('... Chromosomes: ', chr_names)
print('... Resolutions: ', resolutions)
print('... Normalizations: ', NORMS)
print('... Genome: ', genome)
if exclude_mt: # remove mitchondrial chr by name if this flag is set
# try to | |
<filename>src/train_utils.py
import os
import random
import tempfile
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import numpy
import numpy as np
import tensorflow as tf
from sklearn.metrics import roc_curve, auc
from tensorflow import keras
from tensorflow.keras import backend as K
from tensorflow.keras import layers
from yawn_train.src.model_config import IMAGE_PAIR_SIZE
# plot diagnostic learning curves
# https://medium.com/edureka/tensorflow-image-classification-19b63b7bfd95
# https://www.tensorflow.org/hub/tutorials/tf2_text_classification
# https://keras.io/examples/vision/image_classification_from_scratch/
# https://www.kaggle.com/darthmanav/dog-vs-cat-classification-using-cnn
# Input Layer: It represent input image data. It will reshape image into single diminsion array. Example your image is 100x100=10000, it will convert to (100,1) array.
# Conv Layer: This layer will extract features from image.
# Pooling Layer: This layer reduces the spatial volume of input image after convolution.
# Fully Connected Layer: It connect the network from a layer to another layer
# Output Layer: It is the predicted values layer.
def gray_to_rgb(img):
return np.repeat(img, 3, 2)
def plot_data_generator_first_20(train_generator):
img_list = []
for file in train_generator.filepaths[:20]:
img = tf.keras.preprocessing.image.load_img(file)
img_list.append(img)
columns = 4
rows = 5
fig = plt.gcf()
fig.set_size_inches(columns * 4, rows * 4)
for i in range(1, columns * rows + 1):
plt.subplot(rows, columns, i)
img = img_list[i - 1]
plt.imshow(img)
plt.show()
def predict_image(model, input_img, grayscale: bool):
loaded_img = keras.preprocessing.image.load_img(
input_img,
target_size=IMAGE_PAIR_SIZE,
color_mode='grayscale' if grayscale else 'rgb'
)
img_array = keras.preprocessing.image.img_to_array(loaded_img)
# scale pixel values to [0, 1]
img_array = img_array.astype(np.float32)
img_array /= 255.0
img_array = tf.expand_dims(img_array, 0) # Create batch axis
model_predictions = model.predict(img_array)
score = model_predictions[0]
print(
"This image (%s) is %.2f %% opened."
% (input_img, 100 * score)
)
def show_pred_actual_lables(fig_dst, predictions, test_labels, test_images, class_names, class_indices):
# Plot the first X test images, their predicted labels, and the true labels.
# Color correct predictions in blue and incorrect predictions in red.
num_rows = 10
num_cols = 3
num_images = num_rows * num_cols
plt.figure(figsize=(2 * 2 * num_cols, 2 * num_rows))
for i in range(num_images):
idx = random.choice(range(len(predictions)))
plt.subplot(num_rows, 2 * num_cols, 2 * i + 1)
is_correct_pred = plot_image(idx, predictions[idx], test_labels, test_images, class_names,
class_indices)
plt.subplot(num_rows, 2 * num_cols, 2 * i + 2)
plot_value_array(predictions[idx], is_correct_pred)
plt.tight_layout()
plt.savefig(fig_dst)
plt.show()
def plot_image(i, predictions_item, true_label_id, images, class_names, class_indices) -> bool:
true_label_id, img = true_label_id[i], images[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
img_filename = os.path.basename(img)
img_obj = mpimg.imread(img)
plt.imshow(img_obj, cmap="gray")
# predicted_label_id = np.argmax(predictions_item) # take class with highest confidence
predicted_confidence = np.max(predictions_item)
predicted_score = 100 * predicted_confidence
is_mouth_opened = True if predicted_confidence >= 0.2 else False
# classes taken from input data
predicted_label_id = class_indices['opened' if is_mouth_opened else 'closed']
predicted_class = class_names[predicted_label_id]
is_correct_prediction = predicted_label_id == true_label_id
if is_correct_prediction:
color = 'blue'
else:
color = 'red'
plt.xlabel("{}, {} {:2.0f}% ({})".format(img_filename, predicted_class,
predicted_score,
class_names[true_label_id]),
color=color)
return is_correct_prediction
def plot_value_array(predictions_array, is_correct_prediction: bool):
plt.grid(False)
plt.xticks([])
plt.yticks([])
predicted_confidence = np.max(predictions_array)
thisplot = plt.bar(range(1), predicted_confidence, color="#777777")
plt.ylim([0, 1])
predicted_label = np.argmax(predictions_array)
if is_correct_prediction:
color = 'blue'
else:
color = 'red'
thisplot[predicted_label].set_color(color)
def summarize_diagnostics(
history_dict, plot_accuracy_path, plot_loss_path, plot_lr_path):
acc = history_dict['accuracy']
val_acc = history_dict['val_accuracy']
loss = history_dict['loss']
val_loss = history_dict['val_loss']
epochs = range(1, len(acc) + 1)
if 'lr' in history_dict:
learning_rate = history_dict['lr']
plt.plot(epochs, learning_rate, 'b', label='Learning rate')
plt.minorticks_on()
plt.grid(which='major')
plt.grid(which='minor', linestyle=':')
plt.title('Learning rate')
plt.xlabel('Epochs')
plt.ylabel('LR')
plt.legend()
plt.savefig(plot_lr_path)
plt.show()
plt.clf() # clear figure
# Plot Epochs / Training and validation loss
# "r" is for "solid red line"
plt.plot(epochs, loss, 'b', label='Training loss')
# b is for "solid blue line"
plt.plot(epochs, val_loss, 'r', label='Validation loss')
plt.minorticks_on()
plt.grid(which='major')
plt.grid(which='minor', linestyle=':')
plt.title('Training and validation loss (Cross Entropy Loss)')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.savefig(plot_loss_path)
plt.show()
# Plot Epochs / Training and validation accuracy
plt.clf() # clear figure
plt.plot(epochs, acc, 'b', label='Training acc')
plt.plot(epochs, val_acc, 'r', label='Validation acc')
plt.minorticks_on()
plt.grid(which='major')
plt.grid(which='minor', linestyle=':')
plt.title('Training and validation accuracy (Classification Accuracy)')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
plt.savefig(plot_accuracy_path)
plt.show()
def recall_m(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
def precision_m(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
def f1_m(y_true, y_pred):
precision = precision_m(y_true, y_pred)
recall = recall_m(y_true, y_pred)
return 2 * ((precision * recall) / (precision + recall + K.epsilon()))
# This is a sample of a scheduler I used in the past
def lr_scheduler(epoch, lr):
decay_rate = 0.85
decay_step = 1
if epoch % decay_step == 0 and epoch:
return lr * pow(decay_rate, np.floor(epoch / decay_step))
return lr
# Define the Required Callback Function
class printlearningrate(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
optimizer = self.model.optimizer
lr = K.eval(optimizer.lr)
epoch_count = epoch + 1
print('\n', "Epoch:", epoch_count, ', LR: {:.2f}'.format(lr))
# Taken from https://github.com/AvinashNath2/Image-Classification-using-Keras
def create_compiled_model_lite(input_shape, opt=keras.optimizers.Adam(lr=0.001)) -> keras.Model:
model = keras.Sequential()
model.add(layers.Convolution2D(32, (3, 3), input_shape=input_shape))
model.add(layers.Activation('relu'))
model.add(layers.MaxPooling2D(pool_size=(2, 2)))
model.add(layers.Convolution2D(32, (3, 3)))
model.add(layers.Activation('relu'))
model.add(layers.MaxPooling2D(pool_size=(2, 2)))
model.add(layers.Convolution2D(64, (3, 3)))
model.add(layers.Activation('relu'))
model.add(layers.MaxPooling2D(pool_size=(2, 2)))
model.add(layers.Flatten())
model.add(layers.Dense(64))
model.add(layers.Activation('relu'))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(1))
model.add(layers.Activation('sigmoid'))
# compile model
model.compile(optimizer=opt, loss='binary_crossentropy',
metrics=['accuracy', f1_m, precision_m, recall_m])
return model
def create_compiled_model_mobilenet2(input_shape, opt=keras.optimizers.Adam(lr=0.001)) -> keras.Model:
model = keras.Sequential()
model.add(keras.applications.MobileNetV2(
include_top=False, weights="imagenet", input_shape=input_shape))
model.add(tf.keras.layers.GlobalAveragePooling2D())
model.add(layers.Dense(1, activation='sigmoid'))
model.layers[0].trainable = False # Freeze the convolutional base
# compile model
model.compile(optimizer=opt, loss='binary_crossentropy',
metrics=['accuracy', f1_m, precision_m, recall_m])
return model
def create_compiled_model_vgg16(input_shape, opt=keras.optimizers.Adam(lr=0.001)) -> keras.Model:
vgg16_base = tf.keras.applications.VGG16(input_shape=input_shape, include_top=False, weights='imagenet')
model = keras.Sequential()
model.add(vgg16_base)
model.add(tf.keras.layers.GlobalAveragePooling2D())
model.add(layers.Dense(1, activation='sigmoid'))
model.layers[0].trainable = False # Freeze the convolutional base
# compile model
model.compile(optimizer=opt, loss='binary_crossentropy',
metrics=['accuracy', f1_m, precision_m, recall_m])
return model
def create_flowernet(input_shape, opt=keras.optimizers.Adam(lr=0.001)) -> keras.Model:
model = tf.keras.models.Sequential()
model.add(layers.Conv2D(16, (3, 3),
input_shape=input_shape, # dimensions = 100X100, color channel = B&W
activation='relu'))
model.add(layers.MaxPooling2D(2, 2))
model.add(layers.Conv2D(32, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D(2, 2))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D(2, 2))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D(2, 2))
model.add(layers.Flatten())
model.add(layers.Dense(512, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))
# compile model
model.compile(optimizer=opt, loss='binary_crossentropy',
metrics=['accuracy', f1_m, precision_m, recall_m])
return model
def create_alexnet(input_shape, opt=keras.optimizers.Adam(lr=0.001)) -> keras.Model:
model_alexnet = keras.Sequential()
# 1st Convolutional Layer
model_alexnet.add(layers.Conv2D(32, (3, 3),
input_shape=input_shape, # dimensions = 100X100, color channel = B&W
padding='same',
activation='relu'))
# pooling
model_alexnet.add(layers.MaxPooling2D(pool_size=(2, 2), padding='same'))
model_alexnet.add(layers.BatchNormalization())
# 2nd Convolutional Layer
model_alexnet.add(layers.Conv2D(64, (3, 3),
padding='same',
activation='relu'))
# pooling
model_alexnet.add(layers.MaxPooling2D(pool_size=(2, 2), padding='same'))
model_alexnet.add(layers.BatchNormalization())
# 3rd Convolutional Layer
model_alexnet.add(layers.Conv2D(64, (3, 3),
padding='same',
activation='relu'))
model_alexnet.add(layers.BatchNormalization())
# 4th Convolutional Layer
model_alexnet.add(layers.Conv2D(128, (3, 3),
padding='same',
activation='relu'))
model_alexnet.add(layers.BatchNormalization())
# 5th Convolutional Layer
model_alexnet.add(layers.Conv2D(128, (3, 3),
padding='same',
activation='relu'))
# pooling
model_alexnet.add(layers.MaxPooling2D(pool_size=(3, 3), padding='same'))
model_alexnet.add(layers.BatchNormalization())
# Flatten
model_alexnet.add(layers.Flatten())
# 1st Dense Layer
model_alexnet.add(layers.Dense(128,
activation='relu', kernel_initializer='glorot_uniform'))
model_alexnet.add(layers.Dropout(0.10))
model_alexnet.add(layers.BatchNormalization())
# 2nd Dense Layer
model_alexnet.add(layers.Dense(256,
activation='relu', kernel_initializer='glorot_uniform'))
model_alexnet.add(layers.Dropout(0.20))
model_alexnet.add(layers.BatchNormalization())
# # 3rd Dense Layer
model_alexnet.add(layers.Dense(512,
activation='relu', kernel_initializer='glorot_uniform'))
model_alexnet.add(layers.Dropout(0.2))
model_alexnet.add(layers.BatchNormalization())
# output layer
model_alexnet.add(layers.Dense(1, activation='sigmoid'))
# Compile
model_alexnet.compile(loss='binary_crossentropy', optimizer=opt,
metrics=['accuracy'])
return model_alexnet
def create_compiled_model(input_shape, opt=keras.optimizers.Adam(lr=0.001)) -> keras.Model:
# Note that when using the delayed-build pattern (no input shape specified),
# the model gets built the first time you call `fit`, `eval`, or `predict`,
# or the first time you call the model on some input data.
model = keras.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same',
input_shape=input_shape))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Dropout(0.2)) # Layer 1
model.add(layers.Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Dropout(0.2)) # Layer 2
model.add(layers.Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Dropout(0.2)) # Layer 3
model.add(layers.Flatten()) # Fully connected layer
model.add(layers.Dense(128, activation='relu', kernel_initializer='he_uniform')) # Fully connected layer
model.add(layers.Dropout(0.5)) # Fully connected layer
model.add(layers.Dense(1, activation='sigmoid')) # Fully connected layer
# compile model
model.compile(optimizer=opt, loss='binary_crossentropy',
metrics=['accuracy', f1_m, precision_m, recall_m])
return model
def evaluate_model(interpreter, test_images, test_labels):
input_index = interpreter.get_input_details()[0]["index"]
output_index = interpreter.get_output_details()[0]["index"]
floating_model = interpreter.get_input_details()[0]['dtype'] == np.float32
# Run predictions on ever y image in the "test" dataset.
prediction_confs = []
class_indices = {'closed': 0, 'opened': 1}
for i, test_image in enumerate(test_images):
if i % 1000 == 0:
print('Evaluated on {n} results so far.'.format(n=i))
# Pre-processing: add batch dimension and convert to float32 to match with
# the model's input data format.
# load image by path
loaded_img = keras.preprocessing.image.load_img(
test_image, target_size=IMAGE_PAIR_SIZE, color_mode="grayscale"
)
img_array = keras.preprocessing.image.img_to_array(loaded_img)
img_array = img_array.astype('float32')
if floating_model:
# Normalize to [0, 1]
image_frame = img_array / 255.0
images_data = np.expand_dims(image_frame, 0).astype(np.float32) # or [img_data]
else: # 0.00390625 * q
images_data = np.expand_dims(img_array, 0).astype(np.uint8) # or [img_data]
interpreter.set_tensor(input_index, images_data)
# Run inference.
interpreter.invoke()
# Post-processing: remove batch dimension and find the digit with highest
# probability.
output = interpreter.tensor(output_index)
pred_confidence = np.argmax(output()[0])
is_mouth_opened = True if pred_confidence >= 0.2 else False
# classes taken from input data
predicted_label_id = class_indices['opened' if is_mouth_opened else 'closed']
prediction_confs.append(predicted_label_id)
print('\n')
# Compare prediction results with |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.