input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
other extra goodies': 'remera, catering y otros artículos promocionales',
't-shirt, catering, closing party, pro listing (micro-sponsor: logo in badge and web site), and other extra goodies': 'remera, catering, fiesta de cierre, listado profesional (micro-sponsor: logo en credencial y sitio web) y otros artículos promocionales',
'Table': 'Tabla',
'Table name': 'Nombre de la tabla',
'talk': 'charla',
'Talk Info': 'Información sobre la charla',
'Talk Proposal': 'Propuesta de charla',
'Talk Proposals': 'Propuestas de charlas',
'Talks': 'Charlas',
'test': 'test',
'test %(key)s': 'test %(key)s',
'test /%(key)s': 'test /%(key)s',
'text': 'text',
'Thanks for joining the partakers list': 'Thanks for joining the partakers list',
'The activity %(activity)s received a comment': 'The activity %(activity)s received a comment',
'The activity %(activity)s received a comment by %(user)s:\n%(comment)s': 'The activity %(activity)s received a comment by %(user)s:\r\n%(comment)s',
'The activity %(activity)s was confirmed': 'The activity %(activity)s was confirmed',
'The Financial Aid request process is described here:': 'El proceso de solicitud de Ayuda Financiera es descrito aqui:',
'The map below shows the home location of all attendees who agreed to make their information public.': 'El mapa muestra el lugar de origen de todos los asistentes que aceptaron hacer pública su información.\r\n',
'The payment process has failed.': 'El proceso de pago ha fallado.',
'There are errors in your form below': 'Su forma contiene errores',
'This information will be encoded on your badge and can be provided to sponsors and exhibitors in the expo hall. These fields are optional unless otherwise noted. Mailing address information is required to send receipts for PSF donations.': 'Esta información será codificada en su identificación y puede ser provista a los patrocinadores y expositores en el salón. Estos campos son opcionales a menos que se especifique lo contrario. La dirección de correspondencia es requerida para enviar los recibos de donación a PSF.',
'this invoice': 'esta factura',
'Thursday': 'Jueves',
'Time': 'Hora',
'Time extension': 'Time extension',
'Time left': 'Time left',
'Time to Pay!': 'Hora de efectuar el pago!',
'time: %s': 'time: %s',
'Timeline': 'Timeline',
'Timestamp': 'Fecha y hora',
'Timetable': 'Cronograma',
'TIP: To change the sort order of the tables, click over the column headers': 'TIP: To change the sort order of the tables, click over the column headers',
'Title': 'Título',
'To': 'Para',
'to your payment': 'a su pago',
'Toggle Editor': 'Toggle Editor',
'Total Amount Billed': 'Monto total facturado',
'Total Amount Received': 'Monto total recibido',
'Total Amount Still Due': 'Monto que todavía adeuda',
'Track': 'Track',
'Transfer cancelled': 'Transferencia cancelada',
'Transfers Balance From': 'Transfiere Balance De',
'Traveling': 'Viajando',
'True': 'True',
'Tryton': 'Tryton',
'Tuesday': 'Martes',
'Tutorial': 'Tutorial',
'tutorial': 'tutorial',
'Tutorials': 'Tutoriales',
'Tutorials Only (early), $80': 'Solo Tutoriales (temprana), $80',
'Tutorials Only (on site), $120': 'Solo Tutoriales (en el sitio), $120',
'Tutorials Only (regular), $100': 'Solo Tutoriales (regular), $100',
'Tutorials+food': 'Tutorials+food',
'Tweet feature disabled (user not logged in)': 'Tweet feature disabled (user not logged in)',
'Twitter username': 'Nombre usuario Twitter',
'Type': 'Tipo',
'Type in the box the tokens of the people you want to pay the balance from. You can insert multiple tokens separated by a comma. They can find their tokens on the [PAY NOW] page.': 'Escriba en el espacio las fichas de las personas por las cuales desea pagar. Puede escribir varias fichas separadas por comas. Ellos pueden encontrar sus fichas en la página [PAGUE AHORA].',
'Type of notification': 'Type of notification',
'Type of operation': 'Type of operation',
'Unable to download because:': 'Unable to download because:',
'Unable to download tweets:': 'Unable to download tweets:',
'unable to parse csv file': 'imposible convertir archivo CSV',
'unable to retrieve data': 'imposible recuperar datos',
'Unable to send email': 'Imposible enviar email',
'Unconfirmed activities are shown shaded (grey) until author confirm scheduled date, time and room.': 'Unconfirmed activities are shown shaded (grey) until author confirm scheduled date, time and room.',
'Unconfirmed activities are shown shaded until author confirm scheduled date, time and room.': 'Las actividades no confirmadas se muestran en gris hasta que el autor confirme fecha, hora y salón.',
'Unconfirmed activities are shown shaded until author confirms scheduled date, time and room.': 'Las actividades no confirmadas se muestran en gris hasta que el autor confirme fecha, hora y salón.',
'Unsure': 'No estoy seguro',
'Update my project application': 'Update my project application',
'Update Record': 'Actualize el registro',
'Update result': 'Update result',
'Update this Activity Proposal': 'Actualizar esta Propuesta de Actividad',
'Update this Talk Proposal': 'Actualize esta propuesta de charla',
'Updated %s operations': 'Updated %s operations',
'Upload': 'Upload',
'USD': 'USD',
'user': 'user',
'User': 'Usuario',
'User %(created_signature)s on %(created_on)s says: %(body)s': 'El usuario %(created_signature)s el %(created_on)s dijo: %(body)s',
'User %(id)s is impersonating %(other_id)s': 'User %(id)s is impersonating %(other_id)s',
'User %(id)s Logged-in': 'Sesión Iniciada Usuario %(id)s',
'User %(id)s Logged-out': 'Sesión Cerrada Usuario %(id)s',
'User %(id)s Password reset': 'User %(id)s Password reset',
'User %(id)s Password retrieved': 'Constraseña recuperada para usuario %(id)s',
'User %(id)s Registered': 'Usuario %(id)s Registrado',
'User %(id)s Username retrieved': 'User %(id)s Username retrieved',
'User %(id)s Verification email sent': 'Usuario %(id)s Verificación por email enviada',
'User ID': 'ID Usuario',
'User name': 'User name',
'User Votes': 'User Votes',
'Username': 'Username',
'Value': 'Value',
'value already in database or empty': 'value already in database or empty',
'value not allowed': 'value not allowed',
'value not in database': 'value not in database',
'Value or record reference': 'Value or record reference',
'vegan': 'vegan',
'vegetarian': 'vegetariano',
'Venue': 'Lugar',
'Verify Password': 'Verificar contraseña',
'Video': 'Video',
'View': 'Vista',
'Viewing page version: %s': 'Viendo versión de página: %s',
'Volunteer': 'Voluntariado',
'Voting': 'Votar',
'Voting is disabled': 'Voting is disabled',
'Voting is not allowed yet': 'Voting is not allowed yet',
'Voto Aceptado!': 'Voto Aceptado!',
'Warning': 'Advertencia',
'web2conf': 'web2conf',
'web2conf Confirmación de Inscripción': 'web2conf Confirmación de Inscripción',
'web2py': 'web2py',
'Wednesday': 'Miércoles',
'Welcome': 'Welcome',
'Welcome to PyCon': 'Bienvenido a PyCon',
'Welcome to web2py': 'Bienvenido a web2py',
"Whats's included?": '¿Que incluye?',
'WIKI format: ': 'WIKI format: ',
"women's/large": 'mujer/grande',
"women's/medium": 'mujer/mediana',
"women's/small": 'mujer/pequeña',
'Workshop': 'Workshop',
'workshop': 'workshop',
"Write a comment for the project's owner": "Write a comment for the project's owner",
"Write a comment or for the project's owner": "Write a comment or for the project's owner",
'xlarge': 'xlarge',
'xxlarge': 'xxlarge',
'xxxlarge': 'xxxlarge',
'Yes. Give them your "payment token":': 'Si. Entregueles su ficha de pago.',
'You can only choose one tutorial per each session': 'You can only choose one tutorial per each session',
'You can only choose tutorial per each session': 'Usted solo puede escoger un tutorial por sesión.',
'You can pay register somebody else here and transfer their balance. Make sure the email address is correct or they will be unable to change tutorials of update profile. You can register multiple attendees one at the time.': 'Aqui usted puede registrar a alguien más y transferir su balance. Asegure que la dirección de correo es correcta o ellos no podrán cambiar tutoriales o actualizar el perfil. Usted puede registrar multiples asistentes uno a la vez.',
'You can pay somebody else\'s conference fees by transferring their balance. The transfer is pending until you pay your conference fees. To transfer the balance type below the "payment token" for the registrants, separated by a comma': 'Usted puede pagar los costos de conferencia de otro transfiriendo su balance. La transferencia quedará pendiente hasta que efectue el pago. Para transferir el balance coloque a continuación la ficha de pago de cada asistente, separadas por comas',
'You can register somebody else here and transfer their balance. Be sure their email address is correct - it is required to verify registration and to log on. You can register multiple attendees one at the time.': 'Aqui usted puede registrar a alguien más y transferir su balance. Asegure que la dirección de correo es correcta, es necesaria para verificar el registro y para conectarse. Usted puede registrar multiples asistentes uno a la vez.',
'You can tweet here': 'You can tweet here',
'You can use markmin syntax here': 'You can use markmin syntax here',
'You dismissed the project': 'You dismissed the project',
'You dismissed the project Crawley': 'You dismissed the project Crawley',
'You dismissed the project Tryton': 'You dismissed the project Tryton',
'You dismissed the project web2py': 'You dismissed the project web2py',
'You have %s payments generated, click here to see the status': 'Ud. tiene %s pagos generados, presione aquí para ver su estado',
'You have a credit of': 'Usted tiene un crédito por',
'You have not paid for your registration; the cost is': 'Usted no ha pagado su registro, el costo es:',
'You have successfully finished the payment process. Thanks you.': 'You have successfully finished the payment process. Thanks you.',
'You joined': 'You joined',
'Your Activities': 'Sus Actividades',
'Your activity %(activity)s has been confirmed': 'Su actividad %(activity)s ha sido confirmada',
'Your activity %(activity)s has been confirmed.\nYou can access the current activity information at %(link)s': 'Su actividad %(activity)s ha sido confirmada. Puede acceder a la información actual | |
<reponame>GLOMICON/emp
#!/usr/bin/env python
from __future__ import division
__author__ = "<NAME>"
__copyright__ = "Copyright 2012, The QIIME project"
__credits__ = ["<NAME>", "<NAME>", "<NAME>"]
__license__ = "GPL"
__version__ = "1.5.0-dev"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
"""Contains functions used in the most_wanted_otus.py script."""
from collections import defaultdict
from itertools import cycle
from operator import itemgetter
from os import makedirs
from os.path import basename, join, normpath, splitext
from pickle import dump
from tempfile import NamedTemporaryFile
from pylab import axes, figlegend, figure, legend, pie, savefig
from biom.parse import parse_biom_table
from cogent import DNA, LoadSeqs
from cogent.app.blast import blast_seqs, Blastall
from cogent.app.formatdb import build_blast_db_from_fasta_path
from cogent.parse.blast import BlastResult
from cogent.parse.fasta import MinimalFastaParser
from cogent.util.misc import remove_files
from qiime.colors import data_colors, data_color_order
from qiime.parse import parse_mapping_file_to_dict
from qiime.util import (add_filename_suffix, parse_command_line_parameters,
get_options_lookup, make_option, qiime_system_call)
from qiime.workflow.util import generate_log_fp, WorkflowError, WorkflowLogger
html_header = '<html lang="en"><head><meta http-equiv="Content-Type" content="text/html; charset=utf-8"> <title>Most Wanted OTUs</title><link rel="stylesheet" type="text/css" href="most_wanted_otus.css"></head><body>'
html_footer = '</body></html>'
def generate_most_wanted_list(output_dir, otu_table_fps, rep_set_fp, gg_fp,
nt_fp, mapping_fp, mapping_category, top_n, min_abundance,
max_abundance, min_categories, num_categories_to_plot,
max_gg_similarity, max_nt_similarity, e_value, word_size,
merged_otu_table_fp, suppress_taxonomic_output, jobs_to_start,
command_handler, status_update_callback, force):
try:
makedirs(output_dir)
except OSError:
if not force:
raise WorkflowError("Output directory '%s' already exists. Please "
"choose a different directory, or force overwrite with -f."
% output_dir)
logger = WorkflowLogger(generate_log_fp(output_dir))
commands, blast_results_fp, rep_set_cands_failures_fp, \
master_otu_table_ms_fp = _get_most_wanted_filtering_commands(
output_dir, otu_table_fps,
rep_set_fp, gg_fp, nt_fp, mapping_fp, mapping_category,
min_abundance, max_abundance, min_categories, max_gg_similarity,
e_value, word_size, merged_otu_table_fp, jobs_to_start)
# Execute the commands, but keep the logger open because
# we're going to write additional status updates as we process the data.
command_handler(commands, status_update_callback, logger,
close_logger_on_success=False)
commands = []
# We'll sort the BLAST results by percent identity (ascending) and pick the
# top n.
logger.write("Reading in BLAST results, sorting by percent identity, "
"and picking the top %d OTUs.\n\n" % top_n)
top_n_mw = _get_top_n_blast_results(open(blast_results_fp, 'U'), top_n,
max_nt_similarity)
# Read in our filtered down candidate seqs file and latest filtered and
# collapsed OTU table. We'll need to compute some stats on these to include
# in our report.
logger.write("Reading in filtered candidate sequences and latest filtered "
"and collapsed OTU table.\n\n")
mw_seqs = _get_rep_set_lookup(open(rep_set_cands_failures_fp, 'U'))
master_otu_table_ms = parse_biom_table(open(master_otu_table_ms_fp, 'U'))
# Write results out to tsv and HTML table.
logger.write("Writing most wanted OTUs results to TSV and HTML "
"tables.\n\n")
output_img_dir = join(output_dir, 'img')
try:
makedirs(output_img_dir)
except OSError:
# It already exists, which is okay since we already know we are in
# 'force' mode from above.
pass
tsv_lines, html_table_lines, mw_fasta_lines, plot_fps, plot_data_fps = \
_format_top_n_results_table(top_n_mw,
mw_seqs, master_otu_table_ms, output_img_dir, mapping_category,
suppress_taxonomic_output, num_categories_to_plot)
mw_tsv_rel_fp = 'most_wanted_otus.txt'
mw_tsv_fp = join(output_dir, mw_tsv_rel_fp)
mw_tsv_f = open(mw_tsv_fp, 'w')
mw_tsv_f.write(tsv_lines)
mw_tsv_f.close()
mw_fasta_rel_fp = 'most_wanted_otus.fasta'
mw_fasta_fp = join(output_dir, mw_fasta_rel_fp)
mw_fasta_f = open(mw_fasta_fp, 'w')
mw_fasta_f.write(mw_fasta_lines)
mw_fasta_f.close()
html_dl_links = ('<a href="%s" target="_blank">Download table in tab-'
'separated value (TSV) format</a><br /><a href="%s" '
'target="_blank">Download OTU sequence data in FASTA format</a>' %
(mw_tsv_rel_fp, mw_fasta_rel_fp))
html_lines = '%s<div>%s<br /><br />%s<br />%s</div>%s' % (html_header, html_dl_links,
html_table_lines, html_dl_links, html_footer)
mw_html_f = open(join(output_dir, 'most_wanted_otus.html'), 'w')
mw_html_f.write(html_lines)
mw_html_f.close()
logger.close()
def _get_most_wanted_filtering_commands(output_dir, otu_table_fps, rep_set_fp,
gg_fp, nt_fp, mapping_fp, mapping_category, min_abundance,
max_abundance, min_categories, max_gg_similarity, e_value, word_size,
merged_otu_table_fp, jobs_to_start):
commands = []
otu_tables_to_merge = []
if merged_otu_table_fp is None:
for otu_table_fp in otu_table_fps:
# First filter to keep only new (non-GG) OTUs.
novel_otu_table_fp = join(output_dir, add_filename_suffix(otu_table_fp,
'_novel'))
commands.append([('Filtering out all GG reference OTUs',
'filter_otus_from_otu_table.py -i %s -o %s -e %s' %
(otu_table_fp, novel_otu_table_fp, gg_fp))])
# Next filter to keep only abundant otus in the specified range
# (looking only at extremely abundant OTUs has the problem of yielding
# too many that are similar to stuff in the nt database).
novel_abund_otu_table_fp = join(output_dir,
add_filename_suffix(novel_otu_table_fp, '_min%d_max%d' %
(min_abundance, max_abundance)))
commands.append([('Filtering out all OTUs that do not fall within the '
'specified abundance threshold',
'filter_otus_from_otu_table.py -i %s -o %s -n %d -x %d' %
(novel_otu_table_fp, novel_abund_otu_table_fp, min_abundance,
max_abundance))])
# Remove samples from the table that aren't in the mapping file.
novel_abund_filtered_otu_table_fp = join(output_dir,
add_filename_suffix(novel_abund_otu_table_fp,
'_known_samples'))
commands.append([('Filtering out samples that are not in the mapping '
'file',
'filter_samples_from_otu_table.py -i %s -o %s '
'--sample_id_fp %s' % (novel_abund_otu_table_fp,
novel_abund_filtered_otu_table_fp, mapping_fp))])
# Next, collapse by mapping_category.
otu_table_by_samp_type_fp = join(output_dir,
add_filename_suffix(novel_abund_filtered_otu_table_fp, '_%s' %
mapping_category))
commands.append([('Collapsing OTU table by %s' % mapping_category,
'summarize_otu_by_cat.py -c %s -o %s -m %s -i %s' %
(novel_abund_filtered_otu_table_fp, otu_table_by_samp_type_fp,
mapping_category, mapping_fp))])
otu_tables_to_merge.append(otu_table_by_samp_type_fp)
# Merge all collapsed OTU tables.
master_otu_table_fp = join(output_dir,
'master_otu_table_novel_min%d_max%d_%s.biom' %
(min_abundance, max_abundance, mapping_category))
commands.append([('Merging collapsed OTU tables',
'merge_otu_tables.py -i %s -o %s' %
(','.join(otu_tables_to_merge), master_otu_table_fp))])
else:
master_otu_table_fp = merged_otu_table_fp
# Filter to contain only otus in the specified minimum number of sample
# types.
master_otu_table_ms_fp = join(output_dir, add_filename_suffix(
master_otu_table_fp, '_ms%d' % min_categories))
commands.append([('Filtering OTU table to include only OTUs that appear '
'in at least %d sample groups' % min_categories,
'filter_otus_from_otu_table.py -i %s -o %s -s %d' %
(master_otu_table_fp, master_otu_table_ms_fp, min_categories))])
# Now that we have a filtered down OTU table of good candidate OTUs, filter
# the corresponding representative set to include only these candidate
# sequences.
rep_set_cands_fp = join(output_dir,
add_filename_suffix(rep_set_fp, '_candidates'))
commands.append([('Filtering representative set to include only the '
'latest candidate OTUs',
'filter_fasta.py -f %s -o %s -b %s' %
(rep_set_fp, rep_set_cands_fp, master_otu_table_ms_fp))])
# Find the otus that don't hit GG at a certain maximum similarity
# threshold.
uclust_output_dir = join(output_dir, 'most_wanted_candidates_%s_%s' %
(basename(gg_fp), str(max_gg_similarity)))
commands.append([('Running uclust to get list of sequences that don\'t '
'hit the maximum GG similarity threshold',
'parallel_pick_otus_uclust_ref.py -i %s -o %s -r %s -s %s -O %d' %
(rep_set_cands_fp, uclust_output_dir, gg_fp,
str(max_gg_similarity), jobs_to_start))])
# Filter the rep set to only include the failures from uclust.
rep_set_cands_failures_fp = join(output_dir,
add_filename_suffix(rep_set_cands_fp, '_failures'))
commands.append([('Filtering candidate sequences to only include uclust '
'failures',
'filter_fasta.py -f %s -s %s -o %s' %
(rep_set_cands_fp, join(uclust_output_dir,
splitext(basename(rep_set_cands_fp))[0] + '_failures.txt'),
rep_set_cands_failures_fp))])
# BLAST the failures against nt.
blast_output_dir = join(output_dir, 'blast_output')
commands.append([('BLASTing filtered candidate sequences against nt '
'database',
'parallel_blast.py -i %s -o %s -r %s -D -e %f -w %d -O %d' %
(rep_set_cands_failures_fp, blast_output_dir, nt_fp, e_value,
word_size, jobs_to_start))])
blast_results_fp = join(blast_output_dir,
splitext(basename(rep_set_cands_failures_fp))[0] +
'_blast_out.txt')
return commands, blast_results_fp, rep_set_cands_failures_fp, \
master_otu_table_ms_fp
def _get_top_n_blast_results(blast_results_f, top_n, max_nt_similarity):
"""blast_results should only contain a single hit per query sequence"""
result = []
seen_otus = {}
for line in blast_results_f:
# Skip headers and comments.
line = line.strip()
if line and not line.startswith('#'):
otu_id, subject_id, percent_identity = line.split('\t')[:3]
percent_identity = float(percent_identity)
# Skip otus that are too similar to their subject, and skip
# duplicate query hits.
if ((percent_identity / 100.0) <= max_nt_similarity and
otu_id not in seen_otus):
result.append((otu_id, subject_id, percent_identity))
seen_otus[otu_id] = True
return sorted(result, key=itemgetter(2))[:top_n]
def _get_rep_set_lookup(rep_set_f):
result = {}
for seq_id, seq in MinimalFastaParser(rep_set_f):
seq_id = seq_id.strip().split()[0]
result[seq_id] = seq
return result
def _format_top_n_results_table(top_n_mw, mw_seqs, master_otu_table_ms,
output_img_dir, mapping_category,
suppress_taxonomic_output,
num_categories_to_plot):
tsv_lines = ''
html_lines = ''
mw_fasta_lines = ''
plot_fps = []
plot_data_fps = []
tsv_lines += '#\tOTU ID\tSequence\t'
if not suppress_taxonomic_output:
tsv_lines += 'Greengenes taxonomy\t'
tsv_lines += 'NCBI nt closest match\tNCBI nt % identity\n'
html_lines += ('<table id="most_wanted_otus_table" border="border">'
'<tr><th>#</th><th>OTU</th>')
if not suppress_taxonomic_output:
html_lines += '<th>Greengenes taxonomy</th>'
html_lines += ('<th>NCBI nt closest match</th>'
'<th>Abundance by %s</th></tr>' % mapping_category)
for mw_num, (otu_id, subject_id, percent_identity) in enumerate(top_n_mw):
# Grab all necessary information to be included in our report.
seq = mw_seqs[otu_id]
mw_fasta_lines += '>%s\n%s\n' % (otu_id, seq)
# Splitting code taken from
# http://code.activestate.com/recipes/496784-split-string-into-n-
# size-pieces/
split_seq = [seq[i:i+40] for i in range(0, len(seq), 40)]
if not suppress_taxonomic_output:
tax = master_otu_table_ms.ObservationMetadata[
master_otu_table_ms.getObservationIndex(otu_id)]['taxonomy']
gb_id = subject_id.split('|')[3]
ncbi_link = 'http://www.ncbi.nlm.nih.gov/nuccore/%s' % gb_id
# Compute the abundance of each most wanted OTU in each sample
# grouping and create a pie chart to go in the HTML table.
samp_types = master_otu_table_ms.SampleIds
counts = master_otu_table_ms.observationData(otu_id)
plot_data = _format_pie_chart_data(samp_types, counts,
num_categories_to_plot)
# Piechart code based on:
# http://matplotlib.sourceforge.net/examples/pylab_examples/
# pie_demo.html
# http://www.saltycrane.com/blog/2006/12/example-pie-charts-using-
# python-and/
figure(figsize=(8,8))
ax = axes([0.1, 0.1, 0.8, 0.8])
patches = pie(plot_data[0], colors=plot_data[2], shadow=True)
# We need a relative path to the image.
pie_chart_filename = 'abundance_by_%s_%s.png' % (mapping_category,
otu_id)
pie_chart_rel_fp = join(basename(normpath(output_img_dir)),
pie_chart_filename)
pie_chart_abs_fp = join(output_img_dir, pie_chart_filename)
savefig(pie_chart_abs_fp, transparent=True)
plot_fps.append(pie_chart_abs_fp)
# Write out pickled data for easy plot editing post-creation.
plot_data_fp = join(output_img_dir, 'abundance_by_%s_%s.p' %
(mapping_category, otu_id))
dump(plot_data, open(plot_data_fp, 'wb'))
plot_data_fps.append(plot_data_fp)
tsv_lines += '%d\t%s\t%s\t' % (mw_num + 1, otu_id, seq)
if not suppress_taxonomic_output:
| |
# -*- coding: utf-8 -*-
################################################################################
## Form generated from reading UI file 'pyEtude.ui'
##
## Created by: Qt User Interface Compiler version 5.15.0
##
## WARNING! All changes made in this file will be lost when recompiling UI file!
################################################################################
from PySide2.QtCore import (QCoreApplication, QDate, QDateTime, QMetaObject,
QObject, QPoint, QRect, QSize, QTime, QUrl, Qt)
from PySide2.QtGui import (QBrush, QColor, QConicalGradient, QCursor, QFont,
QFontDatabase, QIcon, QKeySequence, QLinearGradient, QPalette, QPainter,
QPixmap, QRadialGradient)
from PySide2.QtWidgets import *
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
if not MainWindow.objectName():
MainWindow.setObjectName(u"MainWindow")
MainWindow.setEnabled(True)
MainWindow.resize(728, 471)
sizePolicy = QSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(MainWindow.sizePolicy().hasHeightForWidth())
MainWindow.setSizePolicy(sizePolicy)
MainWindow.setMinimumSize(QSize(728, 471))
MainWindow.setMaximumSize(QSize(728, 471))
MainWindow.setMouseTracking(False)
MainWindow.setStyleSheet(u"QWidget {\n"
" background-color: #262626;\n"
" border: 0px solid #444444;\n"
" padding: 0px;\n"
" color: #FFFFFF;\n"
" selection-background-color: #444444;\n"
" selection-color: #FFFFFF;\n"
"}\n"
"\n"
"QWidget:disabled {\n"
" background-color: #252424;\n"
" color: #787878;\n"
" selection-background-color: #14506E;\n"
" selection-color: #787878;\n"
"}\n"
"\n"
"/* QWidget::item:selected {\n"
" background-color: none;\n"
"}\n"
"\n"
"QWidget::item:hover {\n"
" background-color: #148CD2;\n"
" color: #32414B;\n"
"} */\n"
"\n"
"/* QMainWindow ------------------------------------------------------------\n"
"\n"
"This adjusts the splitter in the dock widget, not qsplitter\n"
"\n"
"https://doc.qt.io/qt-5/stylesheet-examples.html#customizing-qmainwindow\n"
"\n"
"--------------------------------------------------------------------------- */\n"
"QMainWindow::separator {\n"
" background-color: #32414B;\n"
" border: 0px solid #19232D;\n"
" spacing: 0px;\n"
" padding: 2px;\n"
"}\n"
"\n"
"QMainWindow::separato"
"r:hover {\n"
" background-color: #505F69;\n"
" border: 0px solid #148CD2;\n"
"}\n"
"\n"
"QMainWindow::separator:horizontal {\n"
" width: 5px;\n"
" margin-top: 2px;\n"
" margin-bottom: 2px;\n"
" image: url(\":/qss_icons/rc/toolbar_separator_vertical.png\");\n"
"}\n"
"\n"
"QMainWindow::separator:vertical {\n"
" height: 5px;\n"
" margin-left: 2px;\n"
" margin-right: 2px;\n"
" image: url(\":/qss_icons/rc/toolbar_separator_horizontal.png\");\n"
"}\n"
"\n"
"/* QToolTip ---------------------------------------------------------------\n"
"\n"
"https://doc.qt.io/qt-5/stylesheet-examples.html#customizing-qtooltip\n"
"\n"
"--------------------------------------------------------------------------- */\n"
"QToolTip {\n"
" background-color: #2b2b2b;\n"
" border: 1px solid #767676;\n"
" color: #FFFFFF;\n"
" /* Remove padding, for fix combo box tooltip */\n"
" padding: 0px;\n"
" /* Reducing transparency to read better */\n"
" opacity: 230;\n"
"}\n"
"\n"
"/* QStatusBar ------------------------------------------"
"-------------------\n"
"\n"
"https://doc.qt.io/qt-5/stylesheet-examples.html#customizing-qstatusbar\n"
"\n"
"--------------------------------------------------------------------------- */\n"
"QStatusBar {\n"
" border: 1px solid #32414B;\n"
" /* Fixes Spyder #9120, #9121 */\n"
" background: #32414B;\n"
"}\n"
"\n"
"QStatusBar QToolTip {\n"
" background-color: #148CD2;\n"
" border: 1px solid #19232D;\n"
" color: #19232D;\n"
" /* Remove padding, for fix combo box tooltip */\n"
" padding: 0px;\n"
" /* Reducing transparency to read better */\n"
" opacity: 230;\n"
"}\n"
"\n"
"QStatusBar QLabel {\n"
" /* Fixes Spyder #9120, #9121 */\n"
" background: transparent;\n"
"}\n"
"\n"
"/* QCheckBox --------------------------------------------------------------\n"
"\n"
"https://doc.qt.io/qt-5/stylesheet-examples.html#customizing-qcheckbox\n"
"\n"
"--------------------------------------------------------------------------- */\n"
"QCheckBox {\n"
" background-color: #19232D;\n"
" color: #F0F0F0;\n"
" spacing: 4px;\n"
""
" outline: none;\n"
" padding-top: 4px;\n"
" padding-bottom: 4px;\n"
"}\n"
"\n"
"QCheckBox:focus {\n"
" border: none;\n"
"}\n"
"\n"
"QCheckBox QWidget:disabled {\n"
" background-color: #19232D;\n"
" color: #787878;\n"
"}\n"
"\n"
"QCheckBox::indicator {\n"
" margin-left: 4px;\n"
" margin-right: 4px;\n"
" height: 13px;\n"
" width: 13px;\n"
" border: 1px solid #605e5c;\n"
" background: #323130;\n"
"}\n"
"\n"
"QCheckBox::indicator:checked {\n"
" border: 1px solid #605e5c;\n"
" background: #484644;\n"
"}\n"
"\n"
"/* QGroupBox --------------------------------------------------------------\n"
"\n"
"https://doc.qt.io/qt-5/stylesheet-examples.html#customizing-qgroupbox\n"
"\n"
"--------------------------------------------------------------------------- */\n"
"QGroupBox {\n"
" font-weight: bold;\n"
" border: 1px solid #444444;\n"
" border-radius: 4px;\n"
" padding: 4px;\n"
" margin-top: 16px;\n"
"}\n"
"\n"
"QGroupBox::title {\n"
" subcontrol-origin: margin;\n"
" subcontrol-position: top left;\n"
" l"
"eft: 3px;\n"
" padding-left: 3px;\n"
" padding-right: 5px;\n"
" padding-top: 8px;\n"
" padding-bottom: 16px;\n"
"}\n"
"\n"
"QGroupBox::indicator {\n"
" margin-left: 2px;\n"
" height: 12px;\n"
" width: 12px;\n"
" background-color: #262626;\n"
" border: 1px solid #605e5c;\n"
" color: #F0F0F0;\n"
"}\n"
"\n"
"QGroupBox::indicator:checked {\n"
" background-color: #484644;\n"
" border: 3px solid #605e5c;\n"
"}\n"
"\n"
"/* QRadioButton -----------------------------------------------------------\n"
"\n"
"https://doc.qt.io/qt-5/stylesheet-examples.html#customizing-qradiobutton\n"
"\n"
"--------------------------------------------------------------------------- */\n"
"QRadioButton {\n"
" background-color: #19232D;\n"
" color: #F0F0F0;\n"
" spacing: 4px;\n"
" padding: 0px;\n"
" border: none;\n"
" outline: none;\n"
"}\n"
"\n"
"QRadioButton:focus {\n"
" border: none;\n"
"}\n"
"\n"
"QRadioButton:disabled {\n"
" background-color: #19232D;\n"
" color: #787878;\n"
" border: none;\n"
" outline: none;\n"
""
"}\n"
"\n"
"QRadioButton QWidget {\n"
" background-color: #19232D;\n"
" color: #F0F0F0;\n"
" spacing: 0px;\n"
" padding: 0px;\n"
" outline: none;\n"
" border: none;\n"
"}\n"
"\n"
"QRadioButton::indicator {\n"
" border: none;\n"
" outline: none;\n"
" margin-left: 4px;\n"
" height: 16px;\n"
" width: 16px;\n"
"}\n"
"\n"
"QRadioButton::indicator:unchecked {\n"
" image: url(\":/qss_icons/rc/radio_unchecked.png\");\n"
"}\n"
"\n"
"QRadioButton::indicator:unchecked:hover, QRadioButton::indicator:unchecked:focus, QRadioButton::indicator:unchecked:pressed {\n"
" border: none;\n"
" outline: none;\n"
" image: url(\":/qss_icons/rc/radio_unchecked_focus.png\");\n"
"}\n"
"\n"
"QRadioButton::indicator:unchecked:disabled {\n"
" image: url(\":/qss_icons/rc/radio_unchecked_disabled.png\");\n"
"}\n"
"\n"
"QRadioButton::indicator:checked {\n"
" border: none;\n"
" outline: none;\n"
" image: url(\":/qss_icons/rc/radio_checked.png\");\n"
"}\n"
"\n"
"QRadioButton::indicator:checked:hover, QRadioButton::indicator:chec"
"ked:focus, QRadioButton::indicator:checked:pressed {\n"
" border: none;\n"
" outline: none;\n"
" image: url(\":/qss_icons/rc/radio_checked_focus.png\");\n"
"}\n"
"\n"
"QRadioButton::indicator:checked:disabled {\n"
" outline: none;\n"
" image: url(\":/qss_icons/rc/radio_checked_disabled.png\");\n"
"}\n"
"\n"
"/* QMenuBar ---------------------------------------------------------------\n"
"\n"
"https://doc.qt.io/qt-5/stylesheet-examples.html#customizing-qmenubar\n"
"\n"
"--------------------------------------------------------------------------- */\n"
"QMenuBar {\n"
" background-color: #32414B;\n"
" padding: 2px;\n"
" border: 1px solid #19232D;\n"
" color: #F0F0F0;\n"
"}\n"
"\n"
"QMenuBar:focus {\n"
" border: 1px solid #148CD2;\n"
"}\n"
"\n"
"QMenuBar::item {\n"
" background: transparent;\n"
" padding: 4px;\n"
"}\n"
"\n"
"QMenuBar::item:selected {\n"
" padding: 4px;\n"
" background: transparent;\n"
" border: 0px solid #32414B;\n"
"}\n"
"\n"
"QMenuBar::item:pressed {\n"
" padding: 4px;\n"
" border"
": 0px solid #32414B;\n"
" background-color: #148CD2;\n"
" color: #F0F0F0;\n"
" margin-bottom: 0px;\n"
" padding-bottom: 0px;\n"
"}\n"
"\n"
"/* QMenu ------------------------------------------------------------------\n"
"\n"
"https://doc.qt.io/qt-5/stylesheet-examples.html#customizing-qmenu\n"
"\n"
"--------------------------------------------------------------------------- */\n"
"QMenu {\n"
" border: 0.5px solid #787878;\n"
" color: #F0F0F0;\n"
" margin: 0px;\n"
"}\n"
"\n"
"QMenu::separator {\n"
" height: 1px;\n"
" background-color: #444444;\n"
"}\n"
"\n"
"QMenu::item {\n"
" background-color: #262626;\n"
" padding: 4px 4px 4px 4px;\n"
" /* Reserve space for selection border */\n"
" border: 1px transparent #32414B;\n"
"}\n"
"\n"
"QMenu::item:selected {\n"
" color: #F0F0F0;\n"
" background-color: #605e5c;\n"
"}\n"
"\n"
"/* QAbstractItemView ------------------------------------------------------\n"
"\n"
"https://doc.qt.io/qt-5/stylesheet-examples.html#customizing-qcombobox\n"
""
"\n"
"--------------------------------------------------------------------------- */\n"
"QAbstractItemView {\n"
" alternate-background-color: #19232D;\n"
" color: #F0F0F0;\n"
" border: 1px solid #32414B;\n"
" border-radius: 4px;\n"
"}\n"
"\n"
"QAbstractItemView QLineEdit {\n"
" padding: 2px;\n"
"}\n"
"\n"
"/* QAbstractScrollArea ----------------------------------------------------\n"
"\n"
"https://doc.qt.io/qt-5/stylesheet-examples.html#customizing-qabstractscrollarea\n"
"\n"
"--------------------------------------------------------------------------- */\n"
"QAbstractScrollArea {\n"
" background-color: #262626;\n"
" border: 1px solid #969696;\n"
" border-radius: 4px;\n"
" padding: 2px;\n"
" /* fix #159 */\n"
" min-height: 1.25em;\n"
" /* fix #159 */\n"
" color: #F0F0F0;\n"
"}\n"
"\n"
"QAbstractScrollArea:disabled {\n"
" color: #262626;\n"
"}\n"
"\n"
"/* QScrollArea ------------------------------------------------------------\n"
"\n"
"----------------------------------------------------------------"
"----------- */\n"
"QScrollArea QWidget QWidget:disabled {\n"
" background-color: #262626;\n"
"}\n"
"\n"
"/* QScrollBar -------------------------------------------------------------\n"
"\n"
"https://doc.qt.io/qt-5/stylesheet-examples.html#customizing-qscrollbar\n"
"\n"
"--------------------------------------------------------------------------- */\n"
"\n"
"QScrollBar:vertical {\n"
" background-color: #484644;\n"
" width: 16px;\n"
" margin: 16px 2px 16px 2px;\n"
" border: 1px solid #444444;\n"
" border-radius: 4px;\n"
"}\n"
"\n"
"QScrollBar::handle:vertical {\n"
" background-color: #787878;\n"
" border: 1px solid #787878;\n"
" min-height: 8px;\n"
" border-radius: 4px;\n"
"}\n"
"\n"
"QScrollBar::handle:vertical:hover {\n"
" background-color: #969696;\n"
" border: 1px solid #484644;\n"
" border-radius: 4px;\n"
" min-height: 8px;\n"
"}\n"
"\n"
"QScrollBar::add-line:vertical {\n"
" margin: 3px 0px 3px 0px;\n"
" border-image: url(\":/qss_icons/rc/arrow_down_disabled.png\");\n"
" height: 12px;\n"
" w"
"idth: 12px;\n"
" subcontrol-position: bottom;\n"
" subcontrol-origin: margin;\n"
"}\n"
"\n"
"QScrollBar::add-line:vertical:hover, QScrollBar::add-line:vertical:on {\n"
" border-image: url(\":/qss_icons/rc/arrow_down.png\");\n"
" height: 12px;\n"
" width: 12px;\n"
" subcontrol-position: bottom;\n"
" subcontrol-origin: margin;\n"
"}\n"
"\n"
"QScrollBar::sub-line:vertical {\n"
" margin: 3px 0px 3px 0px;\n"
" border-image: url(\":/qss_icons/rc/arrow_up_disabled.png\");\n"
" height: 12px;\n"
" width: 12px;\n"
" subcontrol-position: top;\n"
" subcontrol-origin: margin;\n"
"}\n"
"\n"
"QScrollBar::sub-line:vertical:hover, QScrollBar::sub-line:vertical:on {\n"
" border-image: url(\":/qss_icons/rc/arrow_up.png\");\n"
" height: 12px;\n"
" width: 12px;\n"
" subcontrol-position: top;\n"
" subcontrol-origin: margin;\n"
"}\n"
"QScrollBar::up-arrow:vertical, QScrollBar::down-arrow:vertical {\n"
" background: none;\n"
"}\n"
"QScrollBar::add-page:vertical, QScrollBar::sub-page:vertical {\n"
" background: non"
"e;\n"
"}\n"
"\n"
"/* QTextEdit --------------------------------------------------------------\n"
"\n"
"https://doc.qt.io/qt-5/stylesheet-examples.html#customizing-specific-widgets\n"
"\n"
"--------------------------------------------------------------------------- */\n"
"QTextEdit {\n"
" background-color: #19232D;\n"
" color: #F0F0F0;\n"
" border: 1px solid #32414B;\n"
"}\n"
"\n"
"QTextEdit:hover {\n"
" border: 1px solid #148CD2;\n"
" color: #F0F0F0;\n"
"}\n"
"\n"
"QTextEdit:selected {\n"
" background: none;\n"
" color: #32414B;\n"
"}\n"
"\n"
"/* QPlainTextEdit ---------------------------------------------------------\n"
"\n"
"--------------------------------------------------------------------------- */\n"
"QPlainTextEdit {\n"
" background-color: #19232D;\n"
" color: #F0F0F0;\n"
" border-radius: 4px;\n"
" border: 1px solid #32414B;\n"
"}\n"
"\n"
"QPlainTextEdit:hover {\n"
" border: 1px solid #148CD2;\n"
" color: #F0F0F0;\n"
"}\n"
"\n"
"QPlainTextEdit:selected {\n"
" background: none;\n"
" color"
": #32414B;\n"
"}\n"
"\n"
"/* QSizeGrip --------------------------------------------------------------\n"
"\n"
"https://doc.qt.io/qt-5/stylesheet-examples.html#customizing-qsizegrip\n"
"\n"
"--------------------------------------------------------------------------- */\n"
"QSizeGrip {\n"
" background: transparent;\n"
" width: 12px;\n"
" height: 12px;\n"
" image: url(\":/qss_icons/rc/window_grip.png\");\n"
"}\n"
"\n"
"/* QStackedWidget ---------------------------------------------------------\n"
"\n"
"--------------------------------------------------------------------------- */\n"
"QStackedWidget {\n"
" padding: 2px;\n"
" border: 1px solid #32414B;\n"
" border: 1px solid #19232D;\n"
"}\n"
"\n"
"/* QToolBar ---------------------------------------------------------------\n"
"\n"
"https://doc.qt.io/qt-5/stylesheet-examples.html#customizing-qtoolbar\n"
"\n"
"--------------------------------------------------------------------------- */\n"
"QToolBar {\n"
" background-color: #32414B;\n"
" border-bottom: 1px "
"solid #19232D;\n"
" padding: 2px;\n"
" font-weight: bold;\n"
"}\n"
"\n"
"QToolBar QToolButton {\n"
" background-color: #32414B;\n"
" border: 1px solid #32414B;\n"
"}\n"
"\n"
"QToolBar QToolButton:hover {\n"
" border: 1px solid #148CD2;\n"
"}\n"
"\n"
"QToolBar QToolButton:checked {\n"
" border: 1px solid #19232D;\n"
" background-color: #19232D;\n"
"}\n"
"\n"
"QToolBar QToolButton:checked:hover {\n"
" border: 1px solid #148CD2;\n"
"}\n"
"\n"
"QToolBar::handle:horizontal {\n"
" width: 16px;\n"
" image: url(\":/qss_icons/rc/toolbar_move_horizontal.png\");\n"
"}\n"
"\n"
"QToolBar::handle:vertical {\n"
" height: 16px;\n"
" image: url(\":/qss_icons/rc/toolbar_move_horizontal.png\");\n"
"}\n"
"\n"
"QToolBar::separator:horizontal {\n"
" width: 16px;\n"
" image: url(\":/qss_icons/rc/toolbar_separator_horizontal.png\");\n"
"}\n"
"\n"
"QToolBar::separator:vertical {\n"
" height: 16px;\n"
" image: url(\":/qss_icons/rc/toolbar_separator_vertical.png\");\n"
"}\n"
"\n"
"QToolButton#qt_toolbar_ext_button {\n"
""
" background: #32414B;\n"
" border: 0px;\n"
" color: #F0F0F0;\n"
" image: url(\":/qss_icons/rc/arrow_right.png\");\n"
"}\n"
"\n"
"/* QAbstractSpinBox -------------------------------------------------------\n"
"\n"
"--------------------------------------------------------------------------- */\n"
"QAbstractSpinBox {\n"
" background-color: #19232D;\n"
" border: 1px solid #32414B;\n"
" color: #F0F0F0;\n"
" /* This fixes 103, 111 */\n"
" padding-top: 2px;\n"
" /* This fixes 103, 111 */\n"
" padding-bottom: 2px;\n"
" padding-left: 4px;\n"
" padding-right: 4px;\n"
" border-radius: 4px;\n"
" /* min-width: 5px; removed to fix 109 */\n"
"}\n"
"\n"
"QAbstractSpinBox:up-button {\n"
" background-color: transparent #19232D;\n"
" subcontrol-origin: border;\n"
" subcontrol-position: top right;\n"
" border-left: 1px solid #32414B;\n"
" margin: 1px;\n"
"}\n"
"\n"
"QAbstractSpinBox::up-arrow, QAbstractSpinBox::up-arrow:disabled, QAbstractSpinBox::up-arrow:off {\n"
" image: url(\":/qss_icons/rc/arrow_up_disab"
"led.png\");\n"
" height: 12px;\n"
" width: 12px;\n"
"}\n"
"\n"
"QAbstractSpinBox::up-arrow:hover {\n"
" image: url(\":/qss_icons/rc/arrow_up.png\");\n"
"}\n"
"\n"
"QAbstractSpinBox:down-button {\n"
" background-color: transparent #19232D;\n"
" subcontrol-origin: border;\n"
" subcontrol-position: bottom right;\n"
" border-left: 1px solid #32414B;\n"
" margin: 1px;\n"
"}\n"
"\n"
"QAbstractSpinBox::down-arrow, QAbstractSpinBox::down-arrow:disabled, QAbstractSpinBox::down-arrow:off {\n"
" image: url(\":/qss_icons/rc/arrow_down_disabled.png\");\n"
" height: 12px;\n"
" width: 12px;\n"
"}\n"
"\n"
"QAbstractSpinBox::down-arrow:hover {\n"
" image: url(\":/qss_icons/rc/arrow_down.png\");\n"
"}\n"
"\n"
"QAbstractSpinBox:hover {\n"
" border: 1px solid #148CD2;\n"
" color: #F0F0F0;\n"
"}\n"
"\n"
"QAbstractSpinBox:selected {\n"
" background: none;\n"
" color: #32414B;\n"
"}\n"
"\n"
"/* ------------------------------------------------------------------------ */\n"
"/* DISPLAYS --------------------------------"
"------------------------------- */\n"
"/* ------------------------------------------------------------------------ */\n"
"/* QLabel -----------------------------------------------------------------\n"
"\n"
"https://doc.qt.io/qt-5/stylesheet-examples.html#customizing-qframe\n"
"\n"
"--------------------------------------------------------------------------- */\n"
"QLabel {\n"
" background-color: #262626;\n"
" border: 0px solid #32414B;\n"
" padding: 2px;\n"
" margin: 0px;\n"
" color: #F0F0F0;\n"
"}\n"
"\n"
"QLabel::disabled {\n"
" background-color: #262626;\n"
" border: 0px solid #32414B;\n"
" color: #787878;\n"
"}\n"
"\n"
"/* QTextBrowser -----------------------------------------------------------\n"
"\n"
"https://doc.qt.io/qt-5/stylesheet-examples.html#customizing-qabstractscrollarea\n"
"\n"
"--------------------------------------------------------------------------- */\n"
"QTextBrowser {\n"
" background-color: #19232D;\n"
" border: 1px solid #32414B;\n"
" color: #F0F0F0;\n"
" border-radius: 4px;\n"
""
"}\n"
"\n"
"QTextBrowser:disabled {\n"
" background-color: #19232D;\n"
" border: 1px solid #32414B;\n"
" color: #787878;\n"
" border-radius: 4px;\n"
"}\n"
"\n"
"QTextBrowser:hover, QTextBrowser:!hover, QTextBrowser::selected, QTextBrowser::pressed {\n"
" border: 1px solid #32414B;\n"
"}\n"
"\n"
"/* QGraphicsView ----------------------------------------------------------\n"
"\n"
"--------------------------------------------------------------------------- */\n"
"QGraphicsView {\n"
" background-color: #19232D;\n"
" border: 1px solid #32414B;\n"
" color: #F0F0F0;\n"
" border-radius: 4px;\n"
"}\n"
"\n"
"QGraphicsView:disabled {\n"
" background-color: #19232D;\n"
" border: 1px solid #32414B;\n"
" color: #787878;\n"
" border-radius: 4px;\n"
"}\n"
"\n"
"QGraphicsView:hover, QGraphicsView:!hover, QGraphicsView::selected, QGraphicsView::pressed {\n"
" border: 1px solid #32414B;\n"
"}\n"
"\n"
"/* QCalendarWidget --------------------------------------------------------\n"
"\n"
"-------------------------------"
"-------------------------------------------- */\n"
"QCalendarWidget QAbstractItemView {\n"
" alternate-background-color: #484644;\n"
" color: #F0F0F0;\n"
" border: 1px solid #32414B;\n"
" border-radius: 4px;\n"
"}\n"
"\n"
"QCalendarWidget QWidget {\n"
" background-color: #262626;\n"
" border: 0px solid #444444;\n"
" padding: 0px;\n"
" color: #FFFFFF;\n"
" selection-background-color: #444444;\n"
" selection-color: #FFFFFF;\n"
"}\n"
"\n"
"QCalendarWidget QWidget::item:selected {\n"
" background-color: #1464A0;\n"
"}\n"
"\n"
"QCalendarWidget QWidget::item:hover {\n"
" background-color: #148CD2;\n"
" color: #32414B;\n"
"}\n"
"\n"
"QCalendarWidget {\n"
" border: 1px solid #32414B;\n"
" border-radius: 4px;\n"
"}\n"
"\n"
"/* QLCDNumber -------------------------------------------------------------\n"
"\n"
"--------------------------------------------------------------------------- */\n"
"QLCDNumber {\n"
" background-color: #19232D;\n"
" color: #F0F0F0;\n"
"}\n"
"\n"
"QLCD"
"Number:disabled {\n"
" background-color: #19232D;\n"
" color: #787878;\n"
"}\n"
"\n"
"/* QProgressBar -----------------------------------------------------------\n"
"\n"
"https://doc.qt.io/qt-5/stylesheet-examples.html#customizing-qprogressbar\n"
"\n"
"--------------------------------------------------------------------------- */\n"
"QProgressBar {\n"
" background-color: #19232D;\n"
" border: 1px solid #32414B;\n"
" color: #F0F0F0;\n"
" border-radius: 4px;\n"
" text-align: center;\n"
"}\n"
"\n"
"QProgressBar:disabled {\n"
" background-color: #19232D;\n"
" border: 1px solid #32414B;\n"
" color: #787878;\n"
" border-radius: 4px;\n"
" text-align: center;\n"
"}\n"
"\n"
"QProgressBar::chunk {\n"
" background-color: none;\n"
" color: #19232D;\n"
" border-radius: 4px;\n"
"}\n"
"\n"
"QProgressBar::chunk:disabled {\n"
" background-color: #14506E;\n"
" color: #787878;\n"
" border-radius: 4px;\n"
"}\n"
"\n"
"/* ------------------------------------------------------------------------ */\n"
"/* BUTTONS --"
"-------------------------------------------------------------- */\n"
"/* ------------------------------------------------------------------------ */\n"
"/* QPushButton ------------------------------------------------------------\n"
"\n"
"https://doc.qt.io/qt-5/stylesheet-examples.html#customizing-qpushbutton\n"
"\n"
"--------------------------------------------------------------------------- */\n"
"QPushButton {\n"
" background-color: #484644;\n"
" border: 1px solid #605e5c;\n"
" color: #FFFFFF;\n"
" border-radius: 4px;\n"
" padding: 3px;\n"
" outline: none;\n"
"}\n"
"\n"
"QPushButton:disabled {\n"
" background-color: #323130;\n"
" border: 1px solid #32414B;\n"
" color: #787878;\n"
" border-radius: 4px;\n"
" padding: 3px;\n"
"}\n"
"\n"
"QPushButton:checked {\n"
" background-color: #32414B;\n"
" border: 1px solid #32414B;\n"
" border-radius: 4px;\n"
" padding: 3px;\n"
" outline: none;\n"
"}\n"
"\n"
"QPushButton:checked:disabled {\n"
" background-color: #19232D;\n"
" border: 1px solid #32414B;\n"
""
" color: #787878;\n"
" border-radius: 4px;\n"
" padding: 3px;\n"
" outline: none;\n"
"}\n"
"\n"
"QPushButton:checked:selected {\n"
" background: none;\n"
" color: #32414B;\n"
"}\n"
"\n"
"QPushButton:checked:hover {\n"
" border: 1px solid #148CD2;\n"
" color: #F0F0F0;\n"
"}\n"
"\n"
"QPushButton::menu-indicator {\n"
" subcontrol-origin: padding;\n"
" subcontrol-position: center right;\n"
"}\n"
"\n"
"QPushButton:pressed {\n"
" background-color: #323130;\n"
"}\n"
"QPushButton:pressed:hover {\n"
" background-color: #323130;\n"
"}\n"
"QPushButton:hover {\n"
" background-color: #605e5c;\n"
"}\n"
"\n"
"QPushButton:selected {\n"
" background: none;\n"
" color: #32414B;\n"
" border: 1px solid #444444;\n"
"}\n"
"\n"
"/* QToolButton | |
"""
Tests different implementations of solve functions.
"""
from __future__ import print_function
from itertools import product
from unittest import TestCase, skipIf
import numpy as np
from numpy.testing import run_module_suite, assert_allclose
from pkg_resources import parse_version
import gulinalg
class TestSolveTriangular(TestCase):
"""
Test A * x = B and it's variants where A is a triangular matrix.
Since names are abbreviated, here is what they mean:
LO - A is a Lower triangular matrix.
UP - A is a Upper diagonal matrix.
TRANS N - No tranpose, T - Transpose, C - Conjuagte Transpose
DIAG N - A is non-unit triangular, U - A is unit triangular
B - By default B is a matrix, otherwise we specify it in test name.
"""
def test_LO_TRANS_N_DIAG_N_B_VECTOR(self):
"""Test A * x = B where A is a lower triangular matrix"""
a = np.array([[3, 0, 0, 0], [2, 1, 0, 0], [1, 0, 1, 0], [1, 1, 1, 1]])
b = np.array([4, 2, 4, 2])
x = gulinalg.solve_triangular(a, b)
assert_allclose(np.dot(a, x), b, atol=1e-15)
def test_UP_TRANS_N_DIAG_N(self):
"""Test A * x = B where A is a upper triangular matrix"""
a = np.array([[1, 2, 3, 4], [0, 2, 3, 4], [0, 0, 3, 4], [0, 0, 0, 4]])
b = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1], [0, 0, 0]])
x = gulinalg.solve_triangular(a, b, UPLO='U')
assert_allclose(np.dot(a, x), b, atol=1e-15)
def test_UP_TRANS_T_DIAG_N(self):
"""Test A.T * x = B where A is a upper triangular matrix"""
a = np.array([[1, 2, 3, 4], [0, 2, 3, 4], [0, 0, 3, 4], [0, 0, 0, 4]])
b = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1], [0, 0, 0]])
x = gulinalg.solve_triangular(a, b, UPLO='U', transpose_type='T')
assert_allclose(np.dot(a.T, x), b, atol=1e-15)
def test_UP_TRANS_C_DIAG_N(self):
"""Test A.H * x = B where A is a upper triangular matrix"""
a = np.array([[1 + 2j, 2 + 2j], [0, 1 + 1j]])
b = np.array([[1 + 0j, 0], [0, 1 + 0j]])
ref = np.array([[0.2+0.4j, -0.0+0.j], [-0.4-0.8j, 0.5+0.5j]])
x = gulinalg.solve_triangular(a, b, UPLO='U', transpose_type='C')
assert_allclose(x, ref, atol=1e-15)
def test_UP_TRANS_N_DIAG_U(self):
"""
Test A * x = B where A is a upper triangular matrix and diagonal
elements are considered unit diagonal.
"""
a = np.array([[1, 2, 3, 4], [0, 2, 3, 4], [0, 0, 3, 4], [0, 0, 0, 4]])
b = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1], [0, 0, 0]])
res = gulinalg.solve_triangular(a, b, UPLO='U', unit_diagonal=True)
# DIAG='U' assumes that diagonal elements are 1.
a_unit_diag = np.array([[1, 2, 3, 4], [0, 1, 3, 4],
[0, 0, 1, 4], [0, 0, 0, 1]])
ref = gulinalg.solve_triangular(a_unit_diag, b, UPLO='U')
assert_allclose(res, ref, atol=1e-15)
def test_UP_TRANS_T_DIAG_U(self):
"""
Test A.T * x = B where A is a upper triangular matrix and diagonal
elements are considered unit diagonal.
"""
a = np.array([[1, 2, 3, 4], [0, 2, 3, 4], [0, 0, 3, 4], [0, 0, 0, 4]])
b = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1], [0, 0, 0]])
res = gulinalg.solve_triangular(
a, b, UPLO='U', transpose_type='T', unit_diagonal=True)
# DIAG='U' assumes that diagonal elements are 1.
a_unit_diag = np.array([[1, 2, 3, 4], [0, 1, 3, 4],
[0, 0, 1, 4], [0, 0, 0, 1]])
ref = gulinalg.solve_triangular(
a_unit_diag, b, UPLO='U', transpose_type='T')
assert_allclose(res, ref, atol=1e-15)
def test_UP_TRANS_C_DIAG_U(self):
"""
Test A.H * x = B where A is a upper triangular matrix and diagonal
elements are considered unit diagonal.
"""
a = np.array([[1 + 2j, 2 + 2j], [0, 1 + 1j]])
b = np.array([[1, 0], [0, 1]])
res = gulinalg.solve_triangular(
a, b, UPLO='U', transpose_type='C', unit_diagonal=True)
# DIAG='U' assumes that diagonal elements are 1.
a_unit_diag = np.array([[1, 2 + 2j], [0, 1]])
ref = gulinalg.solve_triangular(
a_unit_diag, b, UPLO='U', transpose_type='C')
assert_allclose(res, ref, atol=1e-15)
def test_fortran_layout_matrix(self):
"""Input matrices have fortran layout"""
a = np.asfortranarray([[1, 2, 3, 4], [0, 2, 3, 4],
[0, 0, 3, 4], [0, 0, 0, 4]])
b = np.asfortranarray([[1, 0, 0], [0, 1, 0], [0, 0, 1], [0, 0, 0]])
res = gulinalg.solve_triangular(
a, b, UPLO='U', transpose_type='T', unit_diagonal=True)
# DIAG='U' assumes that diagonal elements are 1.
a_unit_diag = np.asfortranarray([[1, 2, 3, 4], [0, 1, 3, 4],
[0, 0, 1, 4], [0, 0, 0, 1]])
ref = gulinalg.solve_triangular(
a_unit_diag, b, UPLO='U', transpose_type='T'
)
assert_allclose(res, ref, atol=1e-15)
def test_input_matrix_non_contiguous(self):
"""Input matrix is not a contiguous matrix"""
a = np.asfortranarray(
[[[1, 2, 3, 4], [0, 2, 3, 4], [0, 0, 3, 4], [0, 0, 0, 4]],
[[1, 2, 3, 4], [0, 2, 3, 4], [0, 0, 3, 4], [0, 0, 0, 4]]])[0]
b = np.ascontiguousarray([[1, 0, 0], [0, 1, 0], [0, 0, 1], [0, 0, 0]])
assert not a.flags.c_contiguous and not a.flags.f_contiguous
x = gulinalg.solve_triangular(a, b, UPLO='U')
assert_allclose(np.dot(a, x), b, atol=1e-15)
@skipIf(parse_version(np.__version__) < parse_version('1.13'),
"Prior to 1.13, numpy low level iterators didn't support removing "
"empty axis. So gufunc couldn't be called with empty inner loop")
def test_m_and_n_zero(self):
"""Corner case of solving where m = 0 and n = 0"""
a = np.ascontiguousarray(np.random.randn(0, 0))
b = np.ascontiguousarray(np.random.randn(0, 0))
x = gulinalg.solve_triangular(a, b, UPLO='U')
assert x.shape == (0, 0)
assert_allclose(np.dot(a, x), b, atol=1e-15)
@skipIf(parse_version(np.__version__) < parse_version('1.13'),
"Prior to 1.13, numpy low level iterators didn't support removing "
"empty axis. So gufunc couldn't be called with empty inner loop")
def test_m_zero(self):
"""Corner case of solving where m = 0"""
a = np.ascontiguousarray(np.random.randn(0, 0))
b = np.ascontiguousarray(np.random.randn(0, 2))
x = gulinalg.solve_triangular(a, b, UPLO='U')
assert x.shape == (0, 2)
assert_allclose(np.dot(a, x), b, atol=1e-15)
@skipIf(parse_version(np.__version__) < parse_version('1.13'),
"Prior to 1.13, numpy low level iterators didn't support removing "
"empty axis. So gufunc couldn't be called with empty inner loop")
def test_n_zero(self):
"""Corner case of solving where n = 0"""
a = np.ascontiguousarray(np.random.randn(2, 2))
b = np.ascontiguousarray(np.random.randn(2, 0))
x = gulinalg.solve_triangular(a, b, UPLO='U')
assert x.shape == (2, 0)
assert_allclose(np.dot(a, x), b, atol=1e-15)
def test_size_one_matrices(self):
"""Corner case of decomposing where m = 1 and n = 1"""
a = np.ascontiguousarray(np.random.randn(1, 1))
b = np.ascontiguousarray(np.random.randn(1, 1))
x = gulinalg.solve_triangular(a, b, UPLO='U')
assert x.shape == (1, 1)
assert_allclose(np.dot(a, x), b, atol=1e-15)
def test_vector(self):
"""test vectorized solve triangular"""
e = np.array([[1, 2, 3, 4], [0, 2, 3, 4], [0, 0, 3, 4], [0, 0, 0, 4]])
a = np.stack([e for _ in range(10)])
b = np.stack([np.array([[1, 0, 0], [0, 1, 0],
[0, 0, 1], [0, 0, 0]]) for _ in range(10)])
for workers in [1, -1]:
x = gulinalg.solve_triangular(a, b, UPLO='U', workers=workers)
res = np.stack([np.dot(a[i], x[i]) for i in range(len(a))])
assert_allclose(res, b, atol=1e-15)
@skipIf(parse_version(np.__version__) < parse_version('1.13'),
"Prior to 1.13, numpy low level iterators didn't support removing "
"empty axis. So gufunc couldn't be called with empty inner loop")
def test_vector_m_and_n_zero(self):
"""Corner case of solving where m = 0 and n = 0"""
a = np.ascontiguousarray(np.random.randn(10, 0, 0))
b = np.ascontiguousarray(np.random.randn(10, 0, 0))
x = gulinalg.solve_triangular(a, b, UPLO='U')
assert x.shape == (10, 0, 0)
res = np.stack([np.dot(a[i], x[i]) for i in range(len(a))])
assert_allclose(res, b, atol=1e-15)
@skipIf(parse_version(np.__version__) < parse_version('1.13'),
"Prior to 1.13, numpy low level iterators didn't support removing "
"empty axis. So gufunc couldn't be called with empty inner loop")
def test_vector_m_zero(self):
"""Corner case of solving where m = 0"""
a = np.ascontiguousarray(np.random.randn(10, 0, 0))
b = np.ascontiguousarray(np.random.randn(10, 0, 2))
x = gulinalg.solve_triangular(a, b, UPLO='U')
assert x.shape == (10, 0, 2)
res = np.stack([np.dot(a[i], x[i]) for i in range(len(a))])
assert_allclose(res, b, atol=1e-15)
@skipIf(parse_version(np.__version__) < parse_version('1.13'),
"Prior to 1.13, numpy low level iterators didn't support removing "
"empty axis. So gufunc couldn't be called with empty inner loop")
def test_vector_n_zero(self):
"""Corner case of solving where n = 0"""
a = np.ascontiguousarray(np.random.randn(10, 2, 2))
b = np.ascontiguousarray(np.random.randn(10, 2, 0))
x = gulinalg.solve_triangular(a, b, UPLO='U')
assert x.shape == (10, 2, 0)
res = np.stack([np.dot(a[i], x[i]) for i in range(len(a))])
assert_allclose(res, b, atol=1e-15)
def test_vector_size_one_matrices(self):
"""Corner case of solving where m = 1 and n = 1"""
a = np.ascontiguousarray(np.random.randn(10, 1, 1))
b = np.ascontiguousarray(np.random.randn(10, 1, 1))
x = gulinalg.solve_triangular(a, b, UPLO='U')
assert x.shape == (10, 1, 1)
res = np.stack([np.dot(a[i], x[i]) for i in | |
import base64
import json
import random
import re
import sys
import time
import urllib
from typing import List
from bs4 import BeautifulSoup
import requests
import requests_cache
from mediawiki import mediawiki
import mwparserfromhell as mwph
from requests.adapters import HTTPAdapter
from ceimport import chunks, logger
def make_throttle_hook():
"""
Returns a response hook function which sleeps for `timeout` seconds if
response is not cached
"""
def hook(response, *args, **kwargs):
if not getattr(response, 'from_cache', False):
print('sleeping')
time.sleep(random.randint(500, 3000) / 1000.0)
return response
return hook
session = requests_cache.CachedSession()
session.hooks = {'response': make_throttle_hook()}
adapter = HTTPAdapter(max_retries=5)
session.mount("https://", adapter)
session.mount("http://", adapter)
def get_titles_in_category(mw, category):
"""Get a list of works constrained by the category from the specified URL
Arguments:
mw: a MediaWiki object pointing to an API
category: the category title to get page titles from
"""
return mw.categorymembers(category, results=None, subcategories=True)[0]
def read_source(source: str) -> str:
"""Read a URL and return the HTML string.
Args:
source: the URL of the page to load
Returns:
the contents of the page
"""
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}
r = session.get(source, headers=headers)
try:
r.raise_for_status()
return r.text
except requests.exceptions.HTTPError:
return None
def get_page_title(source):
page = read_source(source)
if page is not None:
bs = BeautifulSoup(page, features="lxml")
title = bs.find("title")
if title:
return title.text
def special_link_to_download_url(special_link, download_id):
url = ""
r = session.get(url, cookies={"imslpdisclaimeraccepted": "yes"}, allow_redirects=False)
location = r.headers['Location']
return location
def get_pages_for_category(mw, category_name, page_name=None, num_pages=None):
print("Getting pages for category {}".format(category_name))
list_of_titles = get_titles_in_category(mw, category_name)
if page_name and page_name in list_of_titles:
return [page_name]
elif page_name and page_name not in list_of_titles:
raise Exception("Asked for page '{}' but it's not here".format(page_name))
if num_pages:
print("Limiting number of pages to {}".format(num_pages))
list_of_titles = list_of_titles[:num_pages]
return list_of_titles
def category_pagelist(category_name: str):
mw = mediawiki.MediaWiki(url='https://imslp.org/api.php', rate_limit=True)
list_of_titles = get_pages_for_category(mw, category_name)
return list_of_titles
def get_wiki_content_for_pages(pages: List[str]):
"""Use the mediawiki api to load Wikitext for a list of page"""
if len(pages) > 50:
raise ValueError("can only do up to 50 pages")
query = "|".join(pages)
params = {
"action": "query",
"prop": "revisions",
"titles": query,
"rvslots": "*",
"rvprop": "content",
"formatversion": "2",
"format": "json"
}
url = 'https://imslp.org/api.php'
r = session.get(url, params=params)
r.raise_for_status()
try:
j = r.json()
except ValueError:
return []
# ["query"]["pages"]["5827"]["revisions"][0]["*"]
pages = j.get("query", {}).get("pages", {})
"""
imslp api returns a dictionary where page ids are the key values
-> this is different to the cpdl one
"""
ret = []
for pageid, page in pages.items():
if pageid == "-1" and "missing" in page:
# TODO Logging
pass
title = page["title"]
revisions = page.get("revisions")
if revisions:
text = revisions[0].get("*")
ret.append({"title": title, "content": text})
return ret
def api_all_pages():
"""Get a list of all composition pages in IMSLP, including the category which represents the work's composer"""
base_url = "https://imslp.org/imslpscripts/API.ISCR.php?account=worklist/disclaimer=accepted/sort=id/type=2/start={}/retformat=json"
hasnext = True
start = 0
alldata = []
while hasnext:
url = base_url.format(start)
r = session.get(url)
j = r.json()
metadata = j.get('metadata', {})
if metadata:
hasnext = metadata.get('moreresultsavailable', False)
for i in range(1000):
data = j.get(str(i))
if data:
alldata.append(data)
start += 1000
with open("all-imslp-pages.json", "w") as fp:
json.dump(alldata, fp)
def parse_imslp_date(year, month, day):
"""Return a date from imslp. Only return if all 3 components exist, and are integers
This prevents parsing items that only have some components (e.g. yyyy-mm), or approximate
values (e.g. c 1600)"""
if year and month and day:
try:
year = int(year)
month = int(month)
day = int(day)
return f"{year:d}-{month:02d}-{day:02d}"
except ValueError:
return None
def imslp_api_raw_query(page_name):
"""Use the custom IMSLP API to get some parsed metadata for a page"""
page_id = base64.b64encode(urllib.parse.quote(page_name).encode("utf-8"))
page_id = page_id.decode('utf-8')
url = f"https://imslp.org/imslpscripts/API.ISCR.php?retformat=json/disclaimer=accepted/type=0/id={page_id}"
r = session.get(url)
try:
return r.json()
except ValueError:
print(r.text)
return {}
def api_composer_get_relations(composer_name):
j = imslp_api_raw_query(composer_name)
if "0" not in j:
return {}
composer = j["0"]
intvals = composer["intvals"]
authorities = intvals.get("wikidata", {}).get("authorities", [])
external_relations = {}
for link, url, identifier in authorities:
if identifier == "Worldcat":
external_relations['worldcat'] = url
elif link == "[[wikipedia:Virtual International Authority File|VIAF]]":
external_relations['viaf'] = url
elif identifier == "Wikipedia":
external_relations['wikipedia'] = url
elif link == "[[wikipedia:MusicBrainz|MusicBrainz]]":
external_relations['musicbrainz'] = identifier
elif link == "[[wikipedia:International Standard Name Identifier|ISNI]]":
external_relations['isni'] = url
elif link == "[[wikipedia:Library of Congress Control Number|LCCN]]":
external_relations['loc'] = url
return external_relations
def api_composer(composer_name):
"""
Load a composer from ISMLP and return a dictionary adequate to create a Person on the CE
Arguments:
composer_name: an imslp Category name for a composer"""
j = imslp_api_raw_query(composer_name)
if "0" not in j:
return {}
composer = j["0"]
extvals = composer["extvals"]
intvals = composer["intvals"]
family_name = intvals.get("lastname")
given_name = intvals.get("firstname")
name = intvals.get("normalname")
gender = extvals.get("Sex")
image = intvals.get("picturelinkraw")
if image:
image = f"https://imslp.org{image}"
birth_date = parse_imslp_date(extvals.get("Born Year"), extvals.get("Born Month"), extvals.get("Born Day"))
death_date = parse_imslp_date(extvals.get("Died Year"), extvals.get("Died Month"), extvals.get("Died Day"))
composer_source = composer["permlink"]
# Make a second query to get the actual html title
page = read_source(composer_source)
if page is not None:
title = get_page_title(composer_source)
return {
'contributor': 'https://imslp.org',
'source': composer_source,
'format_': 'text/html',
'language': 'en',
'title': title,
'name': name,
'gender': gender,
'family_name': family_name,
'given_name': given_name,
'birth_date': birth_date,
'death_date': death_date,
'image': image
}
else:
return {}
def api_work(work_name):
"""Load a work from IMSLP and return a dict adequate to load MusicComposition into CE
There are two places where we can get metadata from:
- one is the wikitext of the page
- the other is the IMSLP API for a page, given the base64 of a title
https://imslp.org/imslpscripts/API.ISCR.php?retformat=json/disclaimer=accepted/type=0/id=VmFyaWF0aW9ucyBhbmQgRnVndWUgaW4gRS1mbGF0IG1ham9yLCBPcC4zNSAoQmVldGhvdmVuLCBMdWR3aWcgdmFuKQ==
"""
url = "https://imslp.org/wiki/" + work_name.replace(" ", "_")
html_page = read_source(url)
api_page = imslp_api_raw_query(work_name.replace("_", " "))
api_page = api_page.get('0', {})
wikitext = get_wiki_content_for_pages([work_name])
parsed = mwph.parse(wikitext[0]["content"])
musicbrainz_work_id = None
templates = parsed.filter_templates()
for t in templates:
if t.name == "MusBrnzW":
musicbrainz_work_id = t.params[0]
language_mapping = {'english': 'en',
'german': 'de',
'spanish': 'es',
'french': 'fr',
'dutch': 'nl',
'catalan': 'ca'}
if html_page is not None:
title = get_page_title(url)
inlanguage = None
language = api_page.get('extvals', {}).get('Language')
if language:
inlanguage = language_mapping.get(language.lower())
if inlanguage is None:
print(f"No mapping for language {language}")
name = api_page.get('extvals', {}).get('Work Title')
composer = api_page.get('parent')
work_dict = {
'title': title,
'name': name,
'contributor': 'https://imslp.org',
'source': url,
'format_': 'text/html',
'language': 'en',
'inlanguage': inlanguage
}
else:
work_dict = {}
composer = ""
return {"work": work_dict,
"composer": composer,
"musicbrainz_work_id": musicbrainz_work_id}
def get_mediaobject_for_filename(work_wikitext, filename):
"""
If we have a specific file that we want to import (looked up from a Special:ReverseLookup)
then find that file in the provided wikitext and return information to create a MediaObject
TODO: This shares a lot of common code with `files_for_work`
"""
# Filename doesn't include File: prefix in the template
if filename.startswith("File:"):
filename = filename.replace("File:", "")
parsed = mwph.parse(work_wikitext["content"])
# A page should have one node, the #fte:imslppage template
nodes = parsed.nodes
if not nodes or str(nodes[0].name).strip() != "#fte:imslppage":
logger.info("Cannot find #fte:imslppage node, skipping")
return {}
node = nodes[0]
# One of the parameters in this template is ' *****FILES***** '
files_param = None
for param in node.params:
if param.name == ' *****FILES***** ':
files_param = param
break
if files_param:
files = files_param.value
file_node = None
for node in files.nodes:
is_file_node = False
if hasattr(node, 'name') and node.name.strip() == "#fte:imslpfile":
for fileparam in node.params:
if "File Name" in fileparam.name and str(fileparam.value).strip() == filename.strip():
file_node = node
is_file_node = True
break
if is_file_node:
break
if file_node:
node_to_dict = {str(n.name): str(n.value).strip() for n in file_node.params}
chosen_file = [n for n, v in node_to_dict.items() if v == filename]
file_index = chosen_file[0].replace("File Name ", "")
license = node_to_dict.get("Copyright")
title = work_wikitext["title"].replace(" ", "_")
url = "http://imslp.org/wiki/" + title
this_file = node_to_dict[f"File Name {file_index}"]
this_desc = node_to_dict[f"File Description {file_index}"]
this_file = "File:" + this_file
permalink = get_permalink_from_filename(title, this_file.replace("_", " "))
file_url = "http://imslp.org/wiki/" + this_file
file_title = get_page_title(file_url)
# TODO: Person who published, transcribed work. Date of publication on imslp?
file_dict = {
'title': file_title,
'name': this_file,
'contributor': 'https://imslp.org',
'source': url,
'url': permalink,
'format_': 'text/html',
'language': 'en',
'license': license,
'description': this_desc,
}
return file_dict
return {}
def files_for_work(work_wikitext):
"""Get MediaObject information for files relevant to the work
If the work has an xml file, get the xml and the pdf associated with it
Arguments:
work_wikitext: the result of get_wiki_content_for_pages of a work
"""
parsed = mwph.parse(work_wikitext["content"])
# A page should have one node, the | |
name of the final blob is kept.
"""
v1Name = _messages.StringField(1)
v2Blob = _messages.StringField(2, repeated=True)
v2Name = _messages.StringField(3)
class GerritSourceContext(_messages.Message):
r"""A SourceContext referring to a Gerrit project.
Fields:
aliasContext: An alias, which may be a branch or tag.
gerritProject: The full project name within the host. Projects may be
nested, so "project/subproject" is a valid project name. The "repo name"
is the hostURI/project.
hostUri: The URI of a running Gerrit instance.
revisionId: A revision (commit) ID.
"""
aliasContext = _messages.MessageField('AliasContext', 1)
gerritProject = _messages.StringField(2)
hostUri = _messages.StringField(3)
revisionId = _messages.StringField(4)
class GitSourceContext(_messages.Message):
r"""A GitSourceContext denotes a particular revision in a third party Git
repository (e.g., GitHub).
Fields:
revisionId: Git commit hash.
url: Git repository URL.
"""
revisionId = _messages.StringField(1)
url = _messages.StringField(2)
class Hash(_messages.Message):
r"""Container message for hash values.
Fields:
type: Required. The type of hash that was performed, e.g. "SHA-256".
value: Required. The hash value.
"""
type = _messages.StringField(1)
value = _messages.BytesField(2)
class Identity(_messages.Message):
r"""The unique identifier of the update.
Fields:
revision: The revision number of the update.
updateId: The revision independent identifier of the update.
"""
revision = _messages.IntegerField(1, variant=_messages.Variant.INT32)
updateId = _messages.StringField(2)
class ImageOccurrence(_messages.Message):
r"""Details of the derived image portion of the DockerImage relationship.
This image would be produced from a Dockerfile with FROM .
Fields:
baseResourceUrl: Output only. This contains the base image URL for the
derived image occurrence.
distance: Output only. The number of layers by which this image differs
from the associated image basis.
fingerprint: Required. The fingerprint of the derived image.
layerInfo: This contains layer-specific metadata, if populated it has
length "distance" and is ordered with [distance] being the layer
immediately following the base image and [1] being the final layer.
"""
baseResourceUrl = _messages.StringField(1)
distance = _messages.IntegerField(2, variant=_messages.Variant.INT32)
fingerprint = _messages.MessageField('Fingerprint', 3)
layerInfo = _messages.MessageField('Layer', 4, repeated=True)
class InTotoProvenance(_messages.Message):
r"""A InTotoProvenance object.
Fields:
builderConfig: required
materials: The collection of artifacts that influenced the build including
sources, dependencies, build tools, base images, and so on. This is
considered to be incomplete unless metadata.completeness.materials is
true. Unset or null is equivalent to empty.
metadata: A Metadata attribute.
recipe: Identifies the configuration used for the build. When combined
with materials, this SHOULD fully describe the build, such that re-
running this recipe results in bit-for-bit identical output (if the
build is reproducible). required
"""
builderConfig = _messages.MessageField('BuilderConfig', 1)
materials = _messages.StringField(2, repeated=True)
metadata = _messages.MessageField('Metadata', 3)
recipe = _messages.MessageField('Recipe', 4)
class InTotoStatement(_messages.Message):
r"""Spec defined at https://github.com/in-
toto/attestation/tree/main/spec#statement The serialized InTotoStatement
will be stored as Envelope.payload. Envelope.payloadType is always
"application/vnd.in-toto+json".
Fields:
_type: Always "https://in-toto.io/Statement/v0.1".
predicateType: "https://slsa.dev/provenance/v0.1" for SlsaProvenance.
provenance: A InTotoProvenance attribute.
slsaProvenance: A SlsaProvenance attribute.
subject: A Subject attribute.
"""
_type = _messages.StringField(1)
predicateType = _messages.StringField(2)
provenance = _messages.MessageField('InTotoProvenance', 3)
slsaProvenance = _messages.MessageField('SlsaProvenance', 4)
subject = _messages.MessageField('Subject', 5, repeated=True)
class Jwt(_messages.Message):
r"""A Jwt object.
Fields:
compactJwt: The compact encoding of a JWS, which is always three base64
encoded strings joined by periods. For details, see:
https://tools.ietf.org/html/rfc7515.html#section-3.1
"""
compactJwt = _messages.StringField(1)
class Layer(_messages.Message):
r"""Layer holds metadata specific to a layer of a Docker image.
Fields:
arguments: The recovered arguments to the Dockerfile directive.
directive: Required. The recovered Dockerfile directive used to construct
this layer. See https://docs.docker.com/engine/reference/builder/ for
more information.
"""
arguments = _messages.StringField(1)
directive = _messages.StringField(2)
class ListOperationsResponse(_messages.Message):
r"""The response message for Operations.ListOperations.
Fields:
nextPageToken: The standard List next-page token.
operations: A list of operations that matches the specified filter in the
request.
"""
nextPageToken = _messages.StringField(1)
operations = _messages.MessageField('Operation', 2, repeated=True)
class ListVulnerabilitiesResponseV1(_messages.Message):
r"""ListVulnerabilitiesResponse contains a single page of vulnerabilities
resulting from a scan.
Fields:
nextPageToken: A page token that can be used in a subsequent call to
ListVulnerabilities to continue retrieving results.
occurrences: The list of Vulnerability Occurrences resulting from a scan.
"""
nextPageToken = _messages.StringField(1)
occurrences = _messages.MessageField('Occurrence', 2, repeated=True)
class Location(_messages.Message):
r"""An occurrence of a particular package installation found within a
system's filesystem. E.g., glibc was found in `/var/lib/dpkg/status`.
Fields:
cpeUri: Required. The CPE URI in [CPE
format](https://cpe.mitre.org/specification/) denoting the package
manager version distributing a package.
path: The path from which we gathered that this package/version is
installed.
version: The version installed at this location.
"""
cpeUri = _messages.StringField(1)
path = _messages.StringField(2)
version = _messages.MessageField('Version', 3)
class Material(_messages.Message):
r"""A Material object.
Messages:
DigestValue: A DigestValue object.
Fields:
digest: A DigestValue attribute.
uri: A string attribute.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class DigestValue(_messages.Message):
r"""A DigestValue object.
Messages:
AdditionalProperty: An additional property for a DigestValue object.
Fields:
additionalProperties: Additional properties of type DigestValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a DigestValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
digest = _messages.MessageField('DigestValue', 1)
uri = _messages.StringField(2)
class Metadata(_messages.Message):
r"""Other properties of the build.
Fields:
buildFinishedOn: The timestamp of when the build completed.
buildInvocationId: Identifies the particular build invocation, which can
be useful for finding associated logs or other ad-hoc analysis. The
value SHOULD be globally unique, per in-toto Provenance spec.
buildStartedOn: The timestamp of when the build started.
completeness: Indicates that the builder claims certain fields in this
message to be complete.
reproducible: If true, the builder claims that running the recipe on
materials will produce bit-for-bit identical output.
"""
buildFinishedOn = _messages.StringField(1)
buildInvocationId = _messages.StringField(2)
buildStartedOn = _messages.StringField(3)
completeness = _messages.MessageField('Completeness', 4)
reproducible = _messages.BooleanField(5)
class NonCompliantFile(_messages.Message):
r"""Details about files that caused a compliance check to fail.
Fields:
displayCommand: Command to display the non-compliant files.
path: display_command is a single command that can be used to display a
list of non compliant files. When there is no such command, we can also
iterate a list of non compliant file using 'path'. Empty if
`display_command` is set.
reason: Explains why a file is non compliant for a CIS check.
"""
displayCommand = _messages.StringField(1)
path = _messages.StringField(2)
reason = _messages.StringField(3)
class Occurrence(_messages.Message):
r"""An instance of an analysis type that has been found on a resource.
Enums:
KindValueValuesEnum: Output only. This explicitly denotes which of the
occurrence details are specified. This field can be used as a filter in
list requests.
Fields:
attestation: Describes an attestation of an artifact.
build: Describes a verifiable build.
compliance: Describes a compliance violation on a linked resource.
createTime: Output only. The time this occurrence was created.
deployment: Describes the deployment of an artifact on a runtime.
discovery: Describes when a resource was discovered.
dsseAttestation: Describes an attestation of an artifact using dsse.
envelope: https://github.com/secure-systems-lab/dsse
image: Describes how this resource derives from the basis in the
associated note.
kind: Output only. This explicitly denotes which of the occurrence details
are specified. This field can be used as a filter in list requests.
name: Output only. The name of the occurrence in the form of
`projects/[PROJECT_ID]/occurrences/[OCCURRENCE_ID]`.
noteName: Required. Immutable. The analysis note associated with this
occurrence, in the form of `projects/[PROVIDER_ID]/notes/[NOTE_ID]`.
This field can be used as a filter in list requests.
package: Describes the installation of a package on the linked resource.
remediation: A description of actions that can be taken to remedy the
note.
resourceUri: Required. Immutable. A URI that represents the resource for
which the occurrence applies. For example,
`https://gcr.io/project/image@sha256:123abc` for a Docker image.
updateTime: Output only. The time this occurrence was last updated.
upgrade: Describes an available package upgrade on the linked resource.
vulnerability: Describes a security vulnerability.
"""
class KindValueValuesEnum(_messages.Enum):
r"""Output only. This explicitly denotes which of the occurrence details
are specified. This field can be used as a filter in list requests.
Values:
NOTE_KIND_UNSPECIFIED: Default value. This value is unused.
VULNERABILITY: The note and occurrence represent a package
vulnerability.
BUILD: The note and occurrence assert build provenance.
IMAGE: This represents an image basis relationship.
PACKAGE: This represents a package installed via a package manager.
DEPLOYMENT: The note and occurrence track deployment events.
DISCOVERY: The note and occurrence track the initial discovery status of
a resource.
ATTESTATION: This represents a logical "role" that can attest to
artifacts.
UPGRADE: This | |
RegressionModels.random_forest_regressor
elif model_name == "SVR":
train_model_fun = RegressionModels.support_vector_regressor
elif model_name == "AdaBoostRegressor":
train_model_fun = RegressionModels.ada_boost_regressor
elif model_name == "GradientBoostingRegressor":
train_model_fun = RegressionModels.gradient_boosting_regressor
elif model_name == "LogisticRegression":
train_model_fun = ClassificationModels.logistic_regression_classifier
elif model_name == "SVC":
train_model_fun = ClassificationModels.support_vector_classifier
elif model_name == "KNeighborsClassifier":
train_model_fun = ClassificationModels.k_neighbors_classifier
elif model_name == "DecisionTreeClassifier":
train_model_fun = ClassificationModels.decision_tree_classifier
elif model_name == "RandomForestClassifier":
train_model_fun = ClassificationModels.random_forest_classifier
elif model_name == "AdaBoostClassifier":
train_model_fun = ClassificationModels.ada_boost_classifier
elif model_name == "GradientBoostClassifier":
train_model_fun = ClassificationModels.gradient_boosting_classifier
else:
return 'Non-Implemented Action'
trained_model = train_model_fun(X, y, True, **model_params)
"""Save Final Model"""
save_project_model(trained_model, 'model.pkl')
query = f'''Update tblProjects Set Model_Trained=1 Where Id="{session.get('pid')}"'''
mysql.update_record(query)
logger.info('Final Training Done')
ProjectReports.insert_record_ml('Final Training Done')
return render_template('model_training/congrats.html')
elif session['project_type'] == 3:
X = df
model = load_project_model()
if model is None:
return render_template('model_training/model_result.html', action=action,
status="error",
msg="Model is not found, please train model again")
else:
model_params = {}
for key, value in model.get_params().items():
model_params[key] = value
if model_name == "KMeans":
train_model_fun = ClusteringModels.kmeans_clustering
elif model_name == "DBSCAN":
train_model_fun = ClusteringModels.dbscan_clustering
elif model_name == "AgglomerativeClustering":
train_model_fun = ClusteringModels.agglomerative_clustering
else:
return 'Non Implemented mtd'
trained_model, y_pred = train_model_fun(X, True, **model_params)
"""Save Trained Model"""
save_project_model(trained_model, 'model.pkl')
query = f'''Update tblProjects Set Model_Trained=1 Where Id="{session.get('pid')}"'''
mysql.update_record(query)
logger.info('Final Training Done')
ProjectReports.insert_record_ml('Final Training Done')
return render_template('model_training/congrats.html')
except Exception as e:
logger.error('Error in Model Training Submit')
ProjectReports.insert_record_ml('Error in Model Training', '', '', 0, str(e))
render_template('model_training/model_result.html', action=action, status="error",
msg="Model is not found, please train model again")
if action == "Scheduled_model":
path = os.path.join(from_root(), 'artifacts', 'model_temp.pkl')
pass
else:
return "Non Implemented Method"
else:
logger.critical('DataFrame has no data')
return redirect('/')
except Exception as e:
logger.error('Error in Model Training Submit')
ProjectReports.insert_record_ml('Error in Model Training', '', '', 0, str(e))
return render_template('500.html', exception=e)
@app_training.route('/congrats', methods=['GET', 'POST'])
def congrats():
try:
if 'pid' in session:
df = load_data()
if df is not None:
target = session['target_column']
X = df.drop(target, axis=1)
y = df[target]
model = load_project_model()
if model is None:
return render_template('model_training/model_result.html', status="error",
msg="Model is not found, please train model again")
else:
for key, value in model.get_params():
exec(key + "=value")
logger.info('Loaded Congrats Page')
ProjectReports.insert_record_ml('Loaded Congrats Page')
if request.method == "GET":
return render_template('model_training/congrats.html')
else:
return render_template('model_training/congrats.html')
except Exception as e:
logger.error('Error in Model Training Submit')
ProjectReports.insert_record_ml('Error in Model Training', '', '', 0, str(e))
return render_template('500.html', exception=e)
@app_training.route('/prediction', methods=['GET', 'POST'])
def prediction():
try:
if 'pid' in session:
file_path = ""
logger.info('Loaded Prediction Page')
ProjectReports.insert_record_ml('Loaded Prediction Page')
if request.method == "GET":
is_trained = mysql.fetch_all(
f"SELECT * FROM tblProjects WHERE Id ={session.get('pid')} AND Model_Trained=1")
if is_trained is None:
return render_template('model_training/prediction_page.html', status="error",
msg="your model is not trained, please train model first")
else:
return render_template('model_training/prediction_page.html', status="success")
else:
try:
f = request.files['file']
ALLOWED_EXTENSIONS = ['csv', 'tsv', 'json']
msg = ""
if len(request.files) == 0:
msg = 'Please select a file to upload'
elif f.filename.strip() == '':
msg = 'Please select a file to upload'
elif f.filename.rsplit('.', 1)[1].lower() not in ALLOWED_EXTENSIONS:
msg = 'This file format is not allowed, please select mentioned one'
if msg:
logger.error(msg)
return render_template('model_training/prediction_page.html', status="error", msg=msg)
filename = secure_filename(f.filename)
file_path = os.path.join(config_args['dir_structure']['upload_folder'], filename)
f.save(file_path)
if file_path.endswith('.csv'):
df = pd.read_csv(file_path)
elif file_path.endswith('.tsv'):
df = pd.read_csv(file_path, sep='\t')
elif file_path.endswith('.json'):
df = pd.read_json(file_path)
else:
msg = 'This file format is currently not supported'
logger.info(msg)
return render_template('model_training/prediction_page.html', status="error", msg=msg)
prediction = make_prediction(df)
data = prediction.to_html()
if len(data) > 0:
save_prediction_result(prediction)
return render_template('model_training/prediction_result.html', status="success", data=data)
else:
return render_template('model_training/prediction_result.html', status="error",
msg="There is some issue, coudn't perform prediction. Please check your data")
except Exception as e:
logger.error('Error in Model Training Submit')
ProjectReports.insert_record_ml('Error in Model Training', '', '', 0, str(e))
return render_template('model_training/prediction_page.html', status="error", msg=str(e))
finally:
if file_path:
os.remove(file_path)
else:
logger.error('Project id not found, redirect to home page')
ProjectReports.insert_record_ml('Project id not found, redirect to home page', '', '', 0, 'Error')
return redirect('/')
except Exception as e:
logger.error(e)
return redirect('/')
@app_training.route('/download_prediction', methods=['POST'])
def download_prediction():
try:
return load_prediction_result()
except Exception as e:
logger.error(e)
return jsonify({'success': False})
@app_training.route('/model_training/ann', methods=['GET'])
def ann_training():
try:
return render_template('model_training/ann.html', optimizers=OPTIMIZERS,
activation_functions=ACTIVATION_FUNCTIONS, loss=REGRESSION_LOSS)
except Exception as e:
logger.error(e)
return jsonify({'success': False})
def save_neural_network(checkpoint, name='model_temp.pth.tar'):
path = os.path.join(from_root(), 'artifacts', session.get('project_name'))
if not os.path.exists(path):
os.mkdir(path)
file_name = os.path.join(path, name)
torch.save(checkpoint, file_name)
def load_neural_network(checkpoint, name='model_temp.pth.tar'):
path = os.path.join(from_root(), 'artifacts', session.get('project_name'))
if not os.path.exists(path):
os.mkdir(path)
file_name = os.path.join(path, name)
torch.save(checkpoint, file_name)
def create_layers(data=None, df=None, feature_map={}, typ=None):
layers = []
activation = {'ReLU': nn.ReLU(),
'ELU': nn.ELU(),
'LeakyReLU': nn.LeakyReLU(),
'Softmax': nn.Softmax(),
'PReLU': nn.PReLU(),
'SELU': nn.SELU(),
'Tanh': nn.Tanh(),
'Softplus': nn.Softplus(),
'Softmin': nn.Softmin(),
'Sigmoid': nn.Sigmoid(),
'RReLU': nn.RReLU(),
}
infer_in = data[0]['units']
for i in data:
if i['type'] == 'input':
in_feature = df.shape[1]
out_feature = i['units']
layers.append(nn.Linear(in_features=in_feature, out_features=out_feature))
layers.append(activation[i['activation']])
if i['type'] == 'linear':
in_feature = infer_in
out_feature = i['units']
layers.append(nn.Linear(in_feature, out_feature))
layers.append(activation[i['activation']])
infer_in = out_feature
if i['type'] == 'batch_normalization':
layers.append(nn.BatchNorm1d(num_features=infer_in))
if i['type'] == 'dropout':
layers.append(nn.Dropout(p=i['percentage']))
if i['type'] == 'output':
if typ == 'Regression':
in_feature = infer_in
out_feature = 1
layers.append(nn.Linear(in_features=in_feature, out_features=out_feature))
if typ == 'Classification':
in_feature = infer_in
out_feature = len(feature_map.keys())
layers.append(nn.Linear(in_features=in_feature, out_features=out_feature))
if typ == 'cluestring':
return 'CLuestring cant be performed using Ann'
return layers
class CustomTrainData(Dataset):
def __init__(self, train_df, target):
self.train_df = train_df
self.target = target
self.x = torch.from_numpy(self.train_df.to_numpy())
self.y = torch.from_numpy(self.target.to_numpy())
self.n_sample = self.train_df.shape[0]
def __getitem__(self, index):
return self.x[index], self.y[index]
def __len__(self):
return self.n_sample
class CustomTestData(Dataset):
def __init__(self, test_df, target):
self.test_df = test_df
self.target = target
self.x = torch.from_numpy(self.test_df.to_numpy())
self.y = torch.from_numpy(self.target.to_numpy())
self.n_sample = self.test_df.shape[0]
def __getitem__(self, index):
return self.x[index], self.y[index]
def __len__(self):
return self.n_sample
def count_parameters(model):
table = PrettyTable(["Modules", "Parameters"])
total_params = 0
for name, parameter in model.named_parameters():
if not parameter.requires_grad: continue
param = parameter.numel()
table.add_row([name, param])
total_params += param
return table, total_params
def trainTestSplit(df, target, size=0.25):
X = df.drop(target, axis=1)
y = df[target]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=1 - size, random_state=101)
return X_train, X_test, y_train, y_test
def main(Data=None, df=None, target=None, size=None, num_epoch=None, typ=None):
model_info = {}
model_metrice = {}
model_metrice_plot = {}
feature_map = {}
if typ == 'Classification':
for i in enumerate(df[target].unique()):
feature_map[i[1]] = i[0]
df[target] = df[target].replace(feature_map)
model_info['feature_map'] = feature_map
model_info['split_size'] = size
model_info['batch_size'] = 32
X_train, X_test, y_train, y_test = trainTestSplit(df, target, size=size)
# Data class creation
trainData = CustomTrainData(X_train, y_train)
testData = CustomTestData(X_test, y_test)
# Data loader creation
train_data_loader = DataLoader(trainData, batch_size=32, shuffle=True)
test_data_loader = DataLoader(testData, batch_size=32)
# Model Creation
model = nn.Sequential(*create_layers(Data['layerUnits'], X_train, feature_map, typ))
print(model)
# Optimizer and Loss ---- > front end
table, total_params = count_parameters(model)
model_info['table'] = table.get_html_string()
model_info['total_params'] = total_params
model_info['optimizer'] = Data['optimizers']
model_info['loss'] = Data['loss']
model_info['model'] = list(model)
optimizer_selection = {'Adam': torch.optim.Adam(model.parameters(), lr=float(Data['learningRate'])),
'AdaGrad': torch.optim.Adagrad(model.parameters(), lr=float(Data['learningRate'])),
'AdaMax': torch.optim.Adamax(model.parameters(), lr=float(Data['learningRate'])),
'RMSProps': torch.optim.RMSprop(model.parameters(), lr=float(Data['learningRate']))}
optimizer = optimizer_selection[Data['optimizers']]
if typ == "Classification":
loss_selection_classification = {'BCEWithLogitsLoss': nn.BCEWithLogitsLoss(), 'CrossEntropyLoss': nn.CrossEntropyLoss()}
loss_func = loss_selection_classification[Data['loss']]
if typ == "Regression":
loss_selection_regression = {'MAE': nn.L1Loss(), 'MSE': nn.MSELoss(), 'Huber Loss': nn.HuberLoss(),
'Smoth L1': nn.SmoothL1Loss()}
loss_func = loss_selection_regression[Data['loss']]
print(loss_func)
# Regression
# Train
if typ == "Regression":
loss_perEpoch = []
model.train()
num_epochs = num_epoch
for epooch in range(num_epochs):
for batch_idx, data in enumerate(train_data_loader):
features = data[0].float()
labels = data[1].float().reshape(features.shape[0],1)
# print(features.shape,labels.shape)
optimizer.zero_grad()
output = model(features)
loss = loss_func(output, labels)
loss.backward()
optimizer.step()
if batch_idx % 2 == 0:
loss_perEpoch.append(loss.item())
print(f'Epoch {epooch}/{num_epochs} Loss: {loss.item()}')
model_metrice['train_loss'] = loss_perEpoch[-1]
model_metrice_plot['train_loss'] = loss_perEpoch
model_metrice_plot['train_accuracy'] = [x for x in range(len(loss_perEpoch))]
# Test
model.eval()
test_loss = []
with torch.no_grad():
for idx, data in enumerate(test_data_loader):
features = data[0].float()
labels = data[1].float().reshape(features.shape[0],1)
output = model(features)
test_loss.append(loss_func(output, labels).item())
model_metrice['test_loss'] = np.mean(test_loss)
model_metrice['test_accuracy'] = None
model_metrice_plot['test_loss'] = test_loss
model_metrice_plot['test_accuracy'] = [x for x in range(len(test_loss))]
print("Test Loss :", np.mean(test_loss))
# Classification
if typ == 'Classification':
# Train
loss_perEpoch = []
train_acc = []
model.train()
num_epochs = num_epoch
for epooch in range(num_epochs):
for batch_idx, data in enumerate(train_data_loader):
features = data[0].float()
labels = data[1]
# print(features,labels)
optimizer.zero_grad()
output = model(features)
loss = loss_func(output, labels)
loss.backward()
optimizer.step()
if batch_idx % 8 == 0:
train_acc.append((torch.argmax(output, axis=1) == labels.squeeze().long()).float().mean())
loss_perEpoch.append(loss.item())
print(f'Epoch {epooch}/{num_epochs} Loss: {loss.item()}')
model_metrice['train_loss'] = loss_perEpoch[-1]
model_metrice_plot['train_loss'] = loss_perEpoch
model_metrice_plot['train_accuracy'] = train_acc
# Test
model.eval()
test_loss = []
test_acc = []
with torch.no_grad():
for idx, data in enumerate(test_data_loader):
features = data[0].float()
labels = data[1]
output = model(features)
test_acc.append((torch.argmax(output, axis=1) == labels.squeeze().long()).float().mean())
test_loss.append(loss_func(output, labels).item())
print("Test Loss :", np.mean(test_loss), " ", "Test Accuracy :", np.mean(test_acc))
model_metrice['test_accuracy'] = np.mean(test_acc)
model_metrice['test_loss'] = np.mean(test_loss)
model_metrice_plot['test_loss'] = test_loss
model_metrice_plot['test_accuracy'] = | |
<filename>upnp_inspector/mediaserver.py
# -*- coding: utf-8 -*-
# Licensed under the MIT license
# http://opensource.org/licenses/mit-license.php
# Copyright 2009 - <NAME> <<EMAIL>>
# Copyright 2014 - <NAME> <<EMAIL>>
import mimetypes
mimetypes.init()
import pygtk
pygtk.require("2.0")
import gtk
from twisted.internet import reactor
from coherence import log
from coherence.upnp.core.utils import parse_xml
from ._resources import _geticon
# gtk store defines
NAME_COLUMN = 0
ID_COLUMN = 1
UPNP_CLASS_COLUMN = 2
CHILD_COUNT_COLUMN = 3
UDN_COLUMN = 4
SERVICE_COLUMN = 5
ICON_COLUMN = 6
DIDL_COLUMN = 7
TOOLTIP_ICON_COLUMN = 8
namespaces = {'{http://purl.org/dc/elements/1.1/}': 'dc:',
'{urn:schemas-upnp-org:metadata-1-0/upnp/}': 'upnp:',
'{urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/}': 'DIDL-Lite:',
'{urn:schemas-dlna-org:metadata-1-0}': 'dlna:',
'{http://www.pv.com/pvns/}': 'pv:'}
class ItemDetailsWidget(object):
def __init__(self):
self.window = gtk.ScrolledWindow()
self.window.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
self.window.set_border_width(2)
self.window.set_shadow_type(gtk.SHADOW_ETCHED_IN)
self.store = gtk.TreeStore(str, str)
self.treeview = gtk.TreeView(self.store)
self.column = gtk.TreeViewColumn()
self.treeview.append_column(self.column)
self.treeview.set_headers_visible(False)
self.treeview.connect("button_press_event", self.button_action)
text_cell = gtk.CellRendererText()
self.column.pack_start(text_cell, False)
self.column.set_attributes(text_cell, text=0)
text_cell = gtk.CellRendererText()
self.column.pack_start(text_cell, True)
self.column.set_attributes(text_cell, text=1)
self.window.set_size_request(400, 300)
self.window.add(self.treeview)
def open_url(self, url):
import webbrowser
webbrowser.open(url)
def button_action(self, widget, event):
#print "ItemDetailsWidget button_action", widget, self
x = int(event.x)
y = int(event.y)
path = widget.get_path_at_pos(x, y)
if path == None:
return True
row_path, column, _, _ = path
if event.button == 3:
store = widget.get_model()
iter = store.get_iter(row_path)
menu = gtk.Menu()
key, = store.get(iter, 0)
value, = store.get(iter, 1)
clipboard = gtk.clipboard_get(gtk.gdk.SELECTION_CLIPBOARD)
if key in ['DIDL-Lite:res', 'upnp:albumArtURI']:
item = gtk.MenuItem("Copy URL")
item.connect("activate", lambda w: clipboard.set_text(value))
menu.append(item)
item = gtk.MenuItem("Open URL")
item.connect("activate", lambda w: self.open_url(value))
menu.append(item)
else:
item = gtk.MenuItem("Copy value")
item.connect("activate", lambda w: clipboard.set_text(value))
menu.append(item)
menu.show_all()
menu.popup(None, None, None, event.button, event.time)
return True
return False
class TreeWidget(object):
def __init__(self, coherence, device,
details_store=None,
cb_item_dbl_click=None,
cb_resource_chooser=None):
self.details_store = details_store
self.cb_item_dbl_click = cb_item_dbl_click
self.cb_item_right_click = None
self.cb_resource_chooser = cb_resource_chooser
self.build_ui()
self.coherence = coherence
self.device = device
self.mediaserver_found(device)
def build_ui(self):
self.window = gtk.ScrolledWindow()
self.window.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
self.folder_icon = _geticon('folder.png')
self.audio_icon = _geticon('audio-x-generic.png')
self.video_icon = _geticon('video-x-generic.png')
self.image_icon = _geticon('image-x-generic.png')
self.store = gtk.TreeStore(str, # 0: name or title
str, # 1: id, '0' for the device
str, # 2: upnp_class, 'root' for the device
int, # 3: child count, -1 if not available
str, # 4: device udn, '' for an item
str, # 5: service path, '' for a non container item
gtk.gdk.Pixbuf,
str, # 7: DIDLLite fragment, '' for a non upnp item
gtk.gdk.Pixbuf
)
self.treeview = gtk.TreeView(self.store)
self.column = gtk.TreeViewColumn('Items')
self.treeview.append_column(self.column)
self.treeview.enable_model_drag_source(
gtk.gdk.BUTTON1_MASK, [('upnp/metadata', 0, 1)],
gtk.gdk.ACTION_DEFAULT | gtk.gdk.ACTION_PRIVATE)
self.treeview.connect("drag_data_get", self.drag_data_get_cb)
# create a CellRenderers to render the data
icon_cell = gtk.CellRendererPixbuf()
text_cell = gtk.CellRendererText()
self.column.pack_start(icon_cell, False)
self.column.pack_start(text_cell, True)
self.column.set_attributes(text_cell, text=0)
self.column.add_attribute(icon_cell, "pixbuf", 6)
self.treeview.connect("row-activated", self.browse)
self.treeview.connect("row-expanded", self.row_expanded)
self.treeview.connect("button_press_event", self.button_action)
#self.treeview.set_property("has-tooltip", True)
#self.treeview.connect("query-tooltip", self.show_tooltip)
#self.tooltip_path = None
self.we_are_scrolling = None
#def end_scrolling():
# self.we_are_scrolling = None
#def start_scrolling(w,e):
# if self.we_are_scrolling != None:
# self.we_are_scrolling.reset(800)
# else:
# self.we_are_scrolling = reactor.callLater(800, end_scrolling)
#self.treeview.connect('scroll-event', start_scrolling)
self.window.set_size_request(400, 300)
self.window.add(self.treeview)
def drag_data_get_cb(self, treeview, context, selection, info, timestamp):
treeselection = treeview.get_selection()
model, iter = treeselection.get_selected()
didl = model.get_value(iter, DIDL_COLUMN)
#print "drag_data_get_cb", didl
selection.set('upnp/metadata', 8, didl)
return
def show_tooltip(self, widget, x, y, keyboard_mode, tooltip):
if self.we_are_scrolling != None:
return False
ret = False
try:
path = self.treeview.get_dest_row_at_pos(x, y)
iter = self.store.get_iter(path[0])
title, object_id, upnp_class, item = self.store.get(iter, NAME_COLUMN, ID_COLUMN, UPNP_CLASS_COLUMN, DIDL_COLUMN)
from coherence.upnp.core import DIDLLite
if upnp_class == 'object.item.videoItem':
self.tooltip_path = object_id
item = DIDLLite.DIDLElement.fromString(item).getItems()[0]
tooltip_icon, = self.store.get(iter, TOOLTIP_ICON_COLUMN)
if tooltip_icon != None:
tooltip.set_icon(tooltip_icon)
else:
tooltip.set_icon(self.video_icon)
for res in item.res:
protocol, network, content_format, additional_info = res.protocolInfo.split(':')
if(content_format == 'image/jpeg' and
'DLNA.ORG_PN=JPEG_TN' in additional_info.split(';')):
icon_loader = gtk.gdk.PixbufLoader()
icon_loader.write(urllib.urlopen(str(res.data)).read())
icon_loader.close()
icon = icon_loader.get_pixbuf()
tooltip.set_icon(icon)
self.store.set_value(iter, TOOLTIP_ICON_COLUMN, icon)
#print "got poster", icon
break
title = title.replace('&', '&')
try:
director = item.director.replace('&', '&')
except AttributeError:
director = ""
try:
description = item.description.replace('&', '&')
except AttributeError:
description = ""
tooltip.set_markup("<b>%s</b>\n"
"<b>Director:</b> %s\n"
"<b>Description:</b> %s" % (title,
director,
description))
ret = True
except TypeError:
#print traceback.format_exc()
pass
except Exception:
#print traceback.format_exc()
#print "something wrong"
pass
return ret
def button_action(self, widget, event):
#print "TreeWidget button_action", widget, event, event.button
x = int(event.x)
y = int(event.y)
path = widget.get_path_at_pos(x, y)
if path == None:
return True
row_path, column, _, _ = path
if event.button == 1 and self.details_store != None:
store = widget.get_model()
iter = store.get_iter(row_path)
didl, = store.get(iter, DIDL_COLUMN)
self.details_store.clear()
#print didl
et = parse_xml(didl, 'utf-8')
et = et.getroot()
def un_namespace(text):
for k, v in namespaces.items():
if text.startswith(k):
return text.replace(k, v)
return text
def append(item, row=None):
for k, v in item.attrib.items():
self.details_store.append(row, (un_namespace(k), v))
for child in item:
new_row = self.details_store.append(row, (un_namespace(child.tag), child.text))
if un_namespace(child.tag) == 'DIDL-Lite:res':
append(child, new_row)
for item in et:
append(item)
if event.button == 3:
if self.cb_item_right_click != None:
return self.cb_item_right_click(widget, event)
else:
store = widget.get_model()
iter = store.get_iter(row_path)
title, object_id, upnp_class = self.store.get(iter, NAME_COLUMN, ID_COLUMN, UPNP_CLASS_COLUMN)
menu = None
if upnp_class == 'root' or upnp_class.startswith('object.container'):
def refresh(treeview, path):
expanded = treeview.row_expanded(path)
store = treeview.get_model()
iter = store.get_iter(row_path)
child = store.iter_children(iter)
while child:
store.remove(child)
child = store.iter_children(iter)
self.browse(treeview, path, None,
starting_index=0, requested_count=0, force=True, expand=expanded)
menu = gtk.Menu()
item = gtk.MenuItem("Refresh container")
item.connect("activate", lambda x: refresh(widget, row_path))
menu.append(item)
def download_links(links,directory):
import urllib, socket, os
socket.setdefaulttimeout(15)
for link in links:
name,url = link.split(",")
filepath = "{}/{}".format(directory,name)
if not os.path.exists(filepath):
urllib.urlretrieve(url,filepath)
def download_files(treeview,path):
expanded = treeview.row_expanded(path)
store = treeview.get_model()
iter = store.get_iter(row_path)
child = store.iter_children(iter)
links = []
while child:
title,object_id,upnp_class,url,didl = self.store.get(child,NAME_COLUMN,ID_COLUMN,UPNP_CLASS_COLUMN,SERVICE_COLUMN,DIDL_COLUMN)
if("object.container" not in upnp_class):
links.append(title+","+url)
store.remove(child)
child = store.iter_children(iter)
self.browse(treeview,path,None,
starting_index=0,requested_count=0,force=True,expand=expanded)
import os
directory = os.getcwd()+"/upnp-cache/"+self.device.get_friendly_name()+"/"+self.store.get(iter,NAME_COLUMN)[0]
if not os.path.exists(directory):
os.makedirs(directory)
import thread
thread.start_new_thread(download_links,(links,directory))
menu = gtk.Menu()
item = gtk.MenuItem("refresh container")
item2 = gtk.MenuItem("download files")
item.connect("activate", lambda x: refresh(widget,row_path))
item2.connect("activate", lambda x: download_files(widget,row_path))
menu.append(item)
menu.append(item2)
if upnp_class != 'root':
url, didl = self.store.get(iter, SERVICE_COLUMN, DIDL_COLUMN)
if upnp_class.startswith('object.container'):
from coherence.upnp.core import DIDLLite
url = ''
item = DIDLLite.DIDLElement.fromString(didl).getItems()[0]
res = item.res.get_matching(['*:*:*:*'], protocol_type='http-get')
if len(res) > 0:
for r in res:
if r.data.startswith('dlna-playcontainer://'):
url = r.data
break
if url != '':
print "prepare to play", url
def handle_error(e):
print 'we have an error', e
def handle_result(r):
print "done", r
def start(r, service):
print "call start", service, service.device.get_friendly_name()
action = service.get_action('Play')
d = action.call(InstanceID=0, Speed=1)
d.addCallback(handle_result)
d.addErrback(handle_error)
def set_uri(r, service, url, didl):
print "call set", service, service.device.get_friendly_name(), url, didl
action = service.get_action('SetAVTransportURI')
d = action.call(InstanceID=0, CurrentURI=url,
CurrentURIMetaData=didl)
d.addCallback(start, service)
d.addErrback(handle_error)
return d
def play(service, url, didl):
print "call stop", service, service.device.get_friendly_name()
action = service.get_action('Stop')
print action
d = action.call(InstanceID=0)
d.addCallback(set_uri, service, url, didl)
d.addErrback(handle_error)
if menu == None:
menu = gtk.Menu()
else:
menu.append(gtk.SeparatorMenuItem())
item = gtk.MenuItem("Play on MediaRenderer")
item.set_sensitive(False)
menu.append(item)
menu.append(gtk.SeparatorMenuItem())
for device in self.coherence.devices:
if device.get_device_type().split(':')[3].lower() == 'mediarenderer':
service = device.get_service_by_type('AVTransport')
if service != None:
item = gtk.MenuItem(device.get_friendly_name())
item.connect("activate", lambda x, s, u, d: play(s, u, d), service, url, didl)
menu.append(item)
if menu != None:
menu.show_all()
menu.popup(None, None, None, event.button, event.time)
return True
return 0
def handle_error(self, error):
print error
def device_has_action(self, udn, service, action):
try:
self.devices[udn][service]['actions'].index(action)
return True
except:
return False
def state_variable_change(self, variable):
#print variable.name, 'changed to', variable.value
name = variable.name
value = variable.value
if name == 'ContainerUpdateIDs':
changes = value.split(',')
while len(changes) > 1:
container = changes.pop(0).strip()
update_id = changes.pop(0).strip()
def match_func(model, iter, data):
# data is a tuple containing column number, key
column, key = data
value = model.get_value(iter, column)
return value == key
def search(model, iter, func, data):
#print "search", model, iter, data
while iter:
if func(model, iter, data):
return iter
result = search(model, model.iter_children(iter),
func, data)
if result:
return result
iter = model.iter_next(iter)
return None
row_count = 0
for row in self.store:
iter = self.store.get_iter(row_count)
match_iter = search(self.store, self.store.iter_children(iter),
match_func, (ID_COLUMN, container))
if match_iter:
print "heureka, we have a change in ", container,
print ", container needs a reload"
path = self.store.get_path(match_iter)
expanded = self.treeview.row_expanded(path)
child = self.store.iter_children(match_iter)
while child:
self.store.remove(child)
child = self.store.iter_children(match_iter)
self.browse(self.treeview, path, None,
starting_index=0, requested_count=0,
force=True, expand=expanded)
break
row_count += 1
def mediaserver_found(self, device):
service = device.get_service_by_type('ContentDirectory')
def reply(response):
item = self.store.append(None)
self.store.set_value(item, NAME_COLUMN, 'root')
self.store.set_value(item, ID_COLUMN, '0')
self.store.set_value(item, UPNP_CLASS_COLUMN, 'root')
self.store.set_value(item, CHILD_COUNT_COLUMN, -1)
self.store.set_value(item, UDN_COLUMN, device.get_usn())
self.store.set_value(item, ICON_COLUMN, self.folder_icon)
self.store.set_value(item, DIDL_COLUMN, response['Result'])
self.store.set_value(item, SERVICE_COLUMN, service)
self.store.set_value(item, TOOLTIP_ICON_COLUMN, None)
self.store.append(item, ('...loading...', '', 'placeholder',
-1, '', '', None, '', None))
action = service.get_action('Browse')
d = action.call(ObjectID='0', BrowseFlag='BrowseMetadata',
StartingIndex=str(0), RequestedCount=str(0),
Filter='*', SortCriteria='')
d.addCallback(reply)
d.addErrback(self.handle_error)
service.subscribe_for_variable('ContainerUpdateIDs',
callback=self.state_variable_change)
service.subscribe_for_variable('SystemUpdateID',
callback=self.state_variable_change)
def row_expanded(self, view, iter, row_path):
#print "row_expanded", view,iter,row_path
| |
:param str lease_id:
Required if the blob has an active lease.
:param int timeout:
The timeout parameter is expressed in seconds.
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('block', block)
_validate_not_none('block_id', block_id)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = _get_path(container_name, blob_name)
request.query = {
'comp': 'block',
'blockid': _encode_base64(_to_str(block_id)),
'timeout': _int_to_str(timeout),
}
request.headers = {
'x-ms-lease-id': _to_str(lease_id)
}
request.body = _get_request_body_bytes_only('block', block)
if validate_content:
computed_md5 = _get_content_md5(request.body)
request.headers['Content-MD5'] = _to_str(computed_md5)
self._perform_request(request)
def put_block_list(
self, container_name, blob_name, block_list, content_settings=None,
metadata=None, validate_content=False, lease_id=None, if_modified_since=None,
if_unmodified_since=None, if_match=None, if_none_match=None,
timeout=None):
'''
Writes a blob by specifying the list of block IDs that make up the blob.
In order to be written as part of a blob, a block must have been
successfully written to the server in a prior Put Block operation.
You can call Put Block List to update a blob by uploading only those
blocks that have changed, then committing the new and existing blocks
together. You can do this by specifying whether to commit a block from
the committed block list or from the uncommitted block list, or to commit
the most recently uploaded version of the block, whichever list it may
belong to.
:param str container_name:
Name of existing container.
:param str blob_name:
Name of existing blob.
:param block_list:
A list of :class:`~azure.storeage.blob.models.BlobBlock` containing the block ids and block state.
:type block_list: list of :class:`~azure.storage.blob.models.BlobBlock`
:param ~azure.storage.blob.models.ContentSettings content_settings:
ContentSettings object used to set properties on the blob.
:param metadata:
Name-value pairs associated with the blob as metadata.
:type metadata: a dict mapping str to str
:param bool validate_content:
If true, calculates an MD5 hash of the block list content. The storage
service checks the hash of the block list content that has arrived
with the hash that was sent. This is primarily valuable for detecting
bitflips on the wire if using http instead of https as https (the default)
will already validate. Note that this check is associated with
the block list content, and not with the content of the blob itself.
:param str lease_id:
Required if the blob has an active lease.
:param datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:param datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:param str if_match:
An ETag value, or the wildcard character (*). Specify this header to perform
the operation only if the resource's ETag matches the value specified.
:param str if_none_match:
An ETag value, or the wildcard character (*). Specify this header
to perform the operation only if the resource's ETag does not match
the value specified. Specify the wildcard character (*) to perform
the operation only if the resource does not exist, and fail the
operation if it does exist.
:param int timeout:
The timeout parameter is expressed in seconds.
:return: ETag and last modified properties for the updated Block Blob
:rtype: :class:`~azure.storage.blob.models.ResourceProperties`
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
_validate_not_none('block_list', block_list)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = _get_path(container_name, blob_name)
request.query = {
'comp': 'blocklist',
'timeout': _int_to_str(timeout),
}
request.headers = {
'x-ms-lease-id': _to_str(lease_id),
'If-Modified-Since': _datetime_to_utc_string(if_modified_since),
'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since),
'If-Match': _to_str(if_match),
'If-None-Match': _to_str(if_none_match),
}
_add_metadata_headers(metadata, request)
if content_settings is not None:
request.headers.update(content_settings._to_headers())
request.body = _get_request_body(
_convert_block_list_to_xml(block_list))
if validate_content:
computed_md5 = _get_content_md5(request.body)
request.headers['Content-MD5'] = _to_str(computed_md5)
response = self._perform_request(request)
return _parse_base_properties(response)
def get_block_list(self, container_name, blob_name, snapshot=None,
block_list_type=None, lease_id=None, timeout=None):
'''
Retrieves the list of blocks that have been uploaded as part of a
block blob. There are two block lists maintained for a blob:
Committed Block List:
The list of blocks that have been successfully committed to a
given blob with Put Block List.
Uncommitted Block List:
The list of blocks that have been uploaded for a blob using
Put Block, but that have not yet been committed. These blocks
are stored in Azure in association with a blob, but do not yet
form part of the blob.
:param str container_name:
Name of existing container.
:param str blob_name:
Name of existing blob.
:param str snapshot:
Datetime to determine the time to retrieve the blocks.
:param str block_list_type:
Specifies whether to return the list of committed blocks, the list
of uncommitted blocks, or both lists together. Valid values are:
committed, uncommitted, or all.
:param str lease_id:
Required if the blob has an active lease.
:param int timeout:
The timeout parameter is expressed in seconds.
:return: list committed and/or uncommitted blocks for Block Blob
:rtype: :class:`~azure.storage.blob.models.BlobBlockList`
'''
_validate_not_none('container_name', container_name)
_validate_not_none('blob_name', blob_name)
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = _get_path(container_name, blob_name)
request.query = {
'comp': 'blocklist',
'snapshot': _to_str(snapshot),
'blocklisttype': _to_str(block_list_type),
'timeout': _int_to_str(timeout),
}
request.headers = {'x-ms-lease-id': _to_str(lease_id)}
response = self._perform_request(request)
return _convert_xml_to_block_list(response)
#----Convenience APIs-----------------------------------------------------
def create_blob_from_path(
self, container_name, blob_name, file_path, content_settings=None,
metadata=None, validate_content=False, progress_callback=None,
max_connections=2, max_retries=5, retry_wait=1.0,
lease_id=None, if_modified_since=None, if_unmodified_since=None,
if_match=None, if_none_match=None, timeout=None):
'''
Creates a new blob from a file path, or updates the content of an
existing blob, with automatic chunking and progress notifications.
:param str container_name:
Name of existing container.
:param str blob_name:
Name of blob to create or update.
:param str file_path:
Path of the file to upload as the blob content.
:param ~azure.storage.blob.models.ContentSettings content_settings:
ContentSettings object used to set blob properties.
:param metadata:
Name-value pairs associated with the blob as metadata.
:type metadata: a dict mapping str to str
:param bool validate_content:
If true, calculates an MD5 hash for each chunk of the blob. The storage
service checks the hash of the content that has arrived with the hash
that was sent. This is primarily valuable for detecting bitflips on
the wire if using http instead of https as https (the default) will
already validate. Note that this MD5 hash is not stored with the
blob.
:param progress_callback:
Callback for progress with signature function(current, total) where
current is the number of bytes transfered so far, and total is the
size of the blob, or None if the total size is unknown.
:type progress_callback: callback function in format of func(current, total)
:param int max_connections:
Maximum number of parallel connections to use when the blob size exceeds
64MB.
:param int max_retries:
Number of times to retry upload of blob chunk if an error occurs.
:param int retry_wait:
Sleep time in secs between retries.
:param str lease_id:
Required if the blob has an active lease.
:param datetime if_modified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only
if the resource has been modified since the specified time.
:param datetime if_unmodified_since:
A DateTime value. Azure expects the date value passed in to be UTC.
If timezone is included, any non-UTC datetimes will be converted to UTC.
If a date is passed in without timezone info, it is assumed to be UTC.
Specify this header to perform the operation only if
the resource has not been modified since the specified date/time.
:param str if_match:
An ETag value, or the wildcard character (*). Specify this header to perform
the operation only | |
Relative Elevation Angle"
ESDName = "Sensor Relative Elevation Angle"
UDSName = ""
_domain = (-(2 ** 31 - 1), 2 ** 31 - 1)
_range = (-180, 180)
units = 'degrees'
@UAVBasicUniversalMetadataSet.add_parser
class SlantRange(IEEE754ElementParser):
key = hexstr_to_bytes("06 0E 2B 34 01 01 01 01 07 01 08 01 01 00 00 00")
TAG = 21
UDSKey = "06 0E 2B 34 01 01 01 01 07 01 08 01 01 00 00 00"
LDSName = "Slant Range"
ESDName = "Slant Range"
UDSName = "Slant Range"
_domain = (0, 2**32-1)
_range = (0, +5e6)
units = 'meters'
@UAVBasicUniversalMetadataSet.add_parser
class TargetWidth(IEEE754ElementParser):
key = hexstr_to_bytes("06 0E 2B 34 01 01 01 01 07 01 09 02 01 00 00 00")
TAG = 22
UDSKey = "<KEY>"
LDSName = "Target Width"
ESDName = "Target Width"
UDSName = "Target Width"
_domain = (0, 2**16-1)
_range = (0, +10e3)
units = 'meters'
@UAVBasicUniversalMetadataSet.add_parser
class FrameCenterLatitude(IEEE754ElementParser):
key = hexstr_to_bytes("06 0E 2B 34 01 01 01 01 07 01 02 01 03 02 00 00")
TAG = 23
UDSKey = "<KEY>"
LDSName = "Frame Center Latitude"
ESDName = "Target Latitude"
UDSName = "Frame Center Latitude"
_domain = (-(2**31 - 1), 2**31 - 1)
_range = (-90, 90)
units = 'degrees'
@UAVBasicUniversalMetadataSet.add_parser
class FrameCenterLongitude(IEEE754ElementParser):
key = hexstr_to_bytes("06 0E 2B 34 01 01 01 01 07 01 02 01 03 04 00 00")
TAG = 24
UDSKey = "06 0E 2B 34 01 01 01 01 07 01 02 01 03 04 00 00"
LDSName = "Frame Center Longitude"
ESDName = "Target Longitude"
UDSName = "Frame Center Longitude"
_domain = (-(2**31 - 1), 2**31 - 1)
_range = (-180, 180)
units = 'degrees'
@UAVBasicUniversalMetadataSet.add_parser
class OffsetCornerLatitudePoint1(IEEE754ElementParser):
key = hexstr_to_bytes("06 0E 2B 34 01 01 01 03 07 01 02 01 03 07 01 00")
TAG = 26
UDSKey = "<KEY>"
LDSName = "Offset Corner Latitude Point 1"
ESDName = "SAR Latitude 4"
UDSName = "Corner Latitude Point 1"
_domain = (-(2**15 - 1), 2**15 - 1)
_range = (-0.075, +0.075)
units = 'degrees'
@UAVBasicUniversalMetadataSet.add_parser
class OffsetCornerLongitudePoint1(IEEE754ElementParser):
key = hexstr_to_bytes("06 0E 2B 34 01 01 01 03 07 01 02 01 03 0B 01 00")
TAG = 27
UDSKey = "06 0E 2B 34 01 01 01 03 07 01 02 01 03 0B 01 00"
LDSName = "Offset Corner Longitude Point 1"
ESDName = "SAR Longitude 4"
UDSName = "Corner Longitude Point 1"
_domain = (-(2**15 - 1), 2**15 - 1)
_range = (-0.075, 0.075)
units = 'degrees'
@UAVBasicUniversalMetadataSet.add_parser
class OffsetCornerLatitudePoint2(IEEE754ElementParser):
key = hexstr_to_bytes("06 0E 2B 34 01 01 01 03 07 01 02 01 03 08 01 00")
TAG = 28
UDSKey = "<KEY>"
LDSName = "Offset Corner Latitude Point 2"
ESDName = "SAR Latitude 1"
UDSName = "Corner Latitude Point 2"
_domain = (-(2**15 - 1), 2**15 - 1)
_range = (-0.075, 0.075)
units = 'degrees'
@UAVBasicUniversalMetadataSet.add_parser
class OffsetCornerLongitudePoint2(IEEE754ElementParser):
key = hexstr_to_bytes("<KEY>1 01 03 07 01 02 01 03 0C 01 00")
TAG = 29
UDSKey = "<KEY>"
LDSName = "Offset Corner Longitude Point 2"
ESDName = "SAR Longitude 1"
UDSName = "Corner Longitude Point 2"
_domain = (-(2**15 - 1), 2**15 - 1)
_range = (-0.075, 0.075)
units = 'degrees'
@UAVBasicUniversalMetadataSet.add_parser
class OffsetCornerLatitudePoint3(IEEE754ElementParser):
key = hexstr_to_bytes("06 0E 2B 34 01 01 01 03 07 01 02 01 03 09 01 00")
TAG = 30
UDSKey = "<KEY>"
LDSName = "Offset Corner Latitude Point 3"
ESDName = "SAR Latitude 2"
UDSName = "Corner Latitude Point 3"
_domain = (-(2**15 - 1), 2**15 - 1)
_range = (-0.075, 0.075)
units = 'degrees'
@UAVBasicUniversalMetadataSet.add_parser
class OffsetCornerLongitudePoint3(IEEE754ElementParser):
key = hexstr_to_bytes("<KEY>1 01 01 03 07 01 02 01 03 0D 01 00")
TAG = 31
UDSKey = "<KEY>"
LDSName = "Offset Corner Longitude Point 3"
ESDName = "SAR Longitude 2"
UDSName = "Corner Longitude Point 3"
_domain = (-(2**15 - 1), 2**15 - 1)
_range = (-0.075, 0.075)
units = 'degrees'
@UAVBasicUniversalMetadataSet.add_parser
class OffsetCornerLatitudePoint4(IEEE754ElementParser):
key = hexstr_to_bytes("06 0E 2B 34 01 01 01 03 07 01 02 01 03 0A 01 00")
TAG = 32
UDSKey = "06 0E 2B 34 01 01 01 03 07 01 02 01 03 0A 01 00"
LDSName = "Offset Corner Latitude Point 4"
ESDName = "SAR Latitude 3"
UDSName = "Corner Latitude Point 4"
_domain = (-(2**15 - 1), 2**15 - 1)
_range = (-0.075, 0.075)
units = 'degrees'
@UAVBasicUniversalMetadataSet.add_parser
class OffsetCornerLongitudePoint4(IEEE754ElementParser):
key = hexstr_to_bytes("06 0E 2B 34 01 01 01 03 07 01 02 01 03 0E 01 00")
TAG = 33
UDSKey = "06 0E 2B 34 01 01 01 03 07 01 02 01 03 0E 01 00"
LDSName = "Offset Corner Longitude Point 4"
ESDName = "SAR Longitude 3"
UDSName = "Corner Longitude Point 4"
_domain = (-(2**15 - 1), 2**15 - 1)
_range = (-0.075, 0.075)
units = 'degrees'
@UAVBasicUniversalMetadataSet.add_parser
class StartDateTime(StringElementParser):
key = hexstr_to_bytes("06 0E 2B 34 01 01 01 01 07 02 01 02 01 01 00 00")
TAG = 72
UDSKey = "06 0E 2B 34 01 01 01 01 07 02 01 02 01 01 00 00"
LDSName = "Start Date Time - UTC"
UDSName = "Start Date Time - UTC"
min_length, max_length = 0, 127
@UAVBasicUniversalMetadataSet.add_parser
class EventStartTime(DateTimeElementParser):
key = hexstr_to_bytes("06 0E 2B 34 01 01 01 01 07 02 01 02 07 01 00 00")
TAG = 72
UDSKey = "<KEY>"
LDSName = "Event Start Time - UTC"
ESDName = "Mission Start Time, Date, and Date of Collection"
UDSName = "Event Start Date Time - UTC"
@UAVBasicUniversalMetadataSet.add_parser
class RVTLocalSet(StringElementParser):
key = hexstr_to_bytes("06 0E 2B 34 01 01 01 01 07 02 01 02 07 01 00 00")
TAG = 73
UDSKey = "06 0E 2B 34 01 01 01 01 07 02 01 02 07 01 00 00"
LDSName = "RVT Local Data Set"
ESDName = ""
UDSName = "Remote Video Terminal Local Set"
@UAVBasicUniversalMetadataSet.add_parser
class VMTILocalSet(IEEE754ElementParser):
key = hexstr_to_bytes("06 0E 2B 34 02 0B 01 01 0E 01 03 03 06 00 00 00")
TAG = 74
UDSKey = "06 0E 2B 34 02 0B 01 01 0E 01 03 03 06 00 00 00"
LDSName = "VMTI Local Set"
ESDName = ""
UDSName = "Video Moving Target Indicator Local Set"
@UAVBasicUniversalMetadataSet.add_parser
class CornerLatitudePoint1Full(IEEE754ElementParser):
key = hexstr_to_bytes("06 0E 2B 34 01 01 01 03 07 01 02 01 03 07 01 00")
TAG = 82
UDSKey = "<KEY>"
LDSName = "Corner Latitude Point 1 (Full)"
ESDName = "SAR Latitude 4"
UDSName = "Corner Latitude Point 1 (Decimal Degrees)"
_domain = (-(2**31 - 1), 2**31 - 1)
_range = (-90, 90)
units = 'degrees'
@UAVBasicUniversalMetadataSet.add_parser
class CornerLongitudePoint1Full(IEEE754ElementParser):
key = hexstr_to_bytes("<KEY>1 01 03 07 01 02 01 03 0B 01 00")
TAG = 83
UDSKey = "06 0E 2B 34 01 01 01 03 07 01 02 01 03 0B 01 00"
LDSName = "Corner Longitude Point 1 (Full)"
ESDName = "SAR Longitude 4"
UDSName = "Corner Longitude Point 1 (Decimal Degrees)"
_domain = (-(2**31 - 1), 2**31 - 1)
_range = (-180, 180)
units = 'degrees'
@UAVBasicUniversalMetadataSet.add_parser
class CornerLatitudePoint2Full(IEEE754ElementParser):
key = hexstr_to_bytes("<KEY>1 03 07 01 02 01 03 08 01 00")
TAG = 84
UDSKey = "<KEY>"
LDSName = "Corner Latitude Point 2 (Full)"
ESDName = "SAR Latitude 1"
UDSName = "Corner Latitude Point 2 (Decimal Degrees)"
_domain = (-(2**31 - 1), 2**31 - 1)
_range = (-90, 90)
units = 'degrees'
@UAVBasicUniversalMetadataSet.add_parser
class CornerLongitudePoint2Full(IEEE754ElementParser):
key = hexstr_to_bytes("<KEY>1 00")
TAG = 85
UDSKey = "<KEY>3 0C 01 00"
LDSName = "Corner Longitude Point 2 (Full)"
ESDName = "SAR Longitude 1"
UDSName = "Corner Longitude Point 2 (Decimal Degrees)"
_domain = (-(2**31 - 1), 2**31 - 1)
_range = (-180, 180)
units = 'degrees'
@UAVBasicUniversalMetadataSet.add_parser
class CornerLatitudePoint3Full(IEEE754ElementParser):
key = hexstr_to_bytes("06 0E 2B 34 01 01 01 03 07 01 02 01 03 09 01 00")
TAG = 86
UDSKey = "<KEY>"
LDSName = "Corner Latitude Point 3 (Full)"
ESDName = "SAR Latitude 2"
UDSName = "Corner Latitude Point 3 (Decimal Degrees)"
_domain = (-(2**31 - 1), 2**31 - 1)
_range = (-90, 90)
units = | |
txt_file is not None:
pass
logging.info('lblfile {} imgfile {}'.format(txt_file,img_file))
img_arr = cv2.imread(img_file)
if img_arr is None:
logging.warning('problem reading {}, returning'.format(img_file))
return None
image_h, image_w = img_arr.shape[0:2]
result_dict = {}
result_dict['filename']=img_file
result_dict['dimensions_h_w_c']=img_arr.shape
result_dict['annotations']=[]
if not os.path.exists(txt_file):
logging.warning('yolo2tgdict could not find {}, trying replacing "images" with "labels" '.format(txt_file))
#try alternate path replacing 'images' with 'labels'
if 'images' in img_file:
img_dir = os.path.dirname(img_file)
img_base = os.path.basename(img_file)
labels_dir = img_dir.replace('images','labels')
lbl_name = os.path.basename(img_file).replace('.jpg','.txt').replace('.png','.txt')
txt_file = os.path.join(labels_dir,lbl_name)
if not os.path.exists(txt_file):
logging.warning('yolo2tgdict could not find {}, returning '.format(txt_file))
return
else:
return
with open(txt_file,'r') as fp:
lines = fp.readlines()
logging.debug('{} bbs found'.format(len(lines)))
if lines == []:
logging.warning('no lines in {}'.format(txt_file))
for line in lines:
if line.strip()[0]=='#':
logging.debug('got comment line')
continue
class_index,x,y,w,h = line.split()
x_p=float(x)
y_p=float(y)
w_p=float(w)
h_p=float(h)
class_index = int(class_index)
class_label = classlabels[class_index]
x_center = int(x_p*image_w)
y_center = int(y_p*image_h)
w = int(w_p*image_w)
h = int(h_p*image_h)
x1 = x_center-w/2
x2 = x_center+w/2
y1 = y_center-h/2
y2 = y_center+h/2
logging.info('class {} x_c {} y_c {} w {} h {} x x1 {} y1 {} x2 {} y2 {}'.format(class_index,x_center,y_center,w,h,x1,y1,x2,y2))
if visual_output:
cv2.rectangle(img_arr,(x1,y1),(x2,y2),color=[100,255,100],thickness=2)
object_dict={}
object_dict['bbox_xywh'] = [x1,y1,w,h]
object_dict['object']=class_label
result_dict['annotations'].append(object_dict)
if visual_output:
cv2.imshow('yolo2tgdict',img_arr)
cv2.waitKey(0)
return result_dict
def tgdict_to_yolo(tg_dict,label_dir=None,classes=constants.hls_yolo_categories,yolo_trainfile='yolo_train.txt'):
'''
changing save dir to be same as img dir
input- dict in 'tg format' which is like this
{'filename':'image423.jpg','annotations':[{'object':'person','bbox_xywh':[x,y,w,h]},{'object':'person','bbox_xywh':[x,y,w,h],'sId':104}],
{'filename':'image423.jpg','annotations':[{'object':'person','bbox_xywh':[x,y,w,h]},{'object':'person','bbox_xywh':[x,y,w,h],'sId',105}
That json can then be used to generate yolo or frcnn training files
output : for yolo - https://pjreddie.com/darknet/yolo/
Darknet wants a .txt file for each image with a line for each ground truth object in the image that looks like:
<object-class> <x> <y> <width> <height>
where those are percentages...
it looks like yolo makes an assumption abt where images and label files are, namely in parallel dirs named [whatever]images and [whatever]labels:
e.g. JPEGImages labels
and a train.txt file pointing to just the images - the label files are same names with .txt instead of .jpg
also writes a line in the yolo_trainfile . This is all getting called by json_to_yolo
:param img_path:
:param bb_xywh:
:param class_number:
:param destination_dir:
:return:
'''
img_filename = tg_dict['filename']
annotations = tg_dict['annotations']
sid = None
if 'sid' in tg_dict:
sid = tg_dict['sId']
dims = tg_dict['dimensions_h_w_c']
im_h,im_w=(dims[0],dims[1])
logging.debug('writing yolo for file {}\nannotations {}'.format(img_filename,annotations))
if label_dir is None:
label_dir = os.path.dirname(img_filename)
label_name = os.path.basename(img_filename).replace('.png','.txt').replace('.jpg','.txt').replace('.jpeg','.txt')
if label_name[-4:]!='.txt':
logging.warning('did not replace suffix of {} with .txt'.format(img_filename))
label_path = os.path.join(label_dir,label_name)
print('writing yolo to '+str(label_path))
with open(label_path,'w') as fp:
for annotation in annotations:
bb_xywh = annotation['bbox_xywh']
bb_yolo = imutils.xywh_to_yolo(bb_xywh,(im_h,im_w))
logging.info('dims {} bbxywh {} bbyolo {}'.format((im_w,im_h),bb_xywh,bb_yolo))
object = annotation['object']
class_number = classes.index(object)
line = str(class_number)+' '+str(bb_yolo[0])+' '+str(bb_yolo[1])+' '+str(bb_yolo[2])+' '+str(bb_yolo[3])+'\n'
fp.write(line)
fp.close()
Utils.ensure_file(yolo_trainfile)
with open(yolo_trainfile,'a') as fp2:
fp2.write(img_filename+'\n')
fp2.close()
def json_vietnam_to_yolo(jsonfile,split_to_test_and_train=True,label_dir=None,classes=constants.hls_yolo_categories,yolo_trainfile=None,check_dims=True,visual_output=True):
''' input- json dicts in 'vietname rmat' which is like this
{"objects":[{"label":"Private Car","x_y_w_h":[1160,223,65,59]},{"label":"Private Car","x_y_w_h":[971,354,127,85]}],"image_path":"2017-07-06_09-24-24-995.jpeg","image_w_h":[1600,900]}
output : for yolo - https://pjreddie.com/darknet/yolo/ looking like
<object-class> <x> <y> <width> <height>
where x,y,width,height are percentages...
it looks like yolo makes an assumption abt where images and label files are, namely in parallel dirs named [whatever]images and [whatever]labels:
e.g. JPEGImages labels
and a train.txt file pointing to just the images - the label files are same names with .txt instead of .jpg
:param img_path:
:param bb_xywh:
:param class_number:
:param destination_dir:
:return:
'''
print('converting json annotations in '+jsonfile+' to yolo')
with open(jsonfile,'r') as fp:
vietnam_dict = json.load(fp)
img_filename = vietnam_dict['image_path']
annotations = vietnam_dict['objects']
dims = vietnam_dict['image_w_h']
im_h,im_w=(dims[1],dims[0])
logging.debug('writing yolo for image {} hxw {}x{}\nannotations {} '.format(img_filename,im_h,im_w,annotations))
if check_dims or visual_output:
if not os.path.isabs(img_filename):
file_path = os.path.join(os.path.dirname(jsonfile),img_filename)
else:
file_path = img_filename
if not os.path.exists(file_path):
logging.warning('{} does not exist'.format(file_path))
img_arr = cv2.imread(file_path)
if img_arr is None:
logging.warning('could not find {}'.format(file_path))
return
actual_h,actual_w = img_arr.shape[0:2]
if actual_h!=im_h or actual_w != im_w:
logging.warning('image dims hw {} {} dont match json {}'.format(actual_h,actual_w,im_h,im_w))
return
if label_dir is None:
img_parent = Utils.parent_dir(os.path.dirname(img_filename))
img_diralone = os.path.dirname(img_filename).split('/')[-1]
label_diralone = img_diralone+'labels'
# label_dir= os.path.join(img_parent,label_diralone)
label_dir = os.path.dirname(img_filename) #keep labels and imgs in same dir, yolo is apparently ok with that
print('using label dir {}'.format(label_dir))
Utils.ensure_dir(label_dir)
# label_dir = os.path.join(img_parent,label_ext)
logging.debug('yolo img parent {} labeldir {} imgalone {} lblalone {} '.format(img_parent,label_dir,img_diralone,label_diralone))
label_name = os.path.basename(img_filename).replace('.png','.txt').replace('.jpg','.txt').replace('.jpeg','.txt')
if label_name[-4:]!='.txt':
logging.warning('did not replace image suffix of {} with .txt'.format(img_filename))
return
label_path = os.path.join(label_dir,label_name)
print('writing label to '+str(label_path))
with open(label_path,'w') as fp:
for annotation in annotations:
bb_xywh = annotation['x_y_w_h']
bb_yolo = imutils.xywh_to_yolo(bb_xywh,(im_h,im_w))
object = annotation['label']
if not object in constants.vietnam_to_hls_map:
logging.warning('{} not found in constants.vietname to hls map'.format(object))
raw_input('ret to cont')
continue
tg_object = constants.vietnam_to_hls_map[object]
class_number = classes.index(tg_object)
logging.debug('wxh {} bbxywh {} bbyolo {}\norigobj {} tgobj {} ind {}'.format((im_w,im_h),bb_xywh,bb_yolo,object,tg_object,class_number))
line = str(class_number)+' '+str(bb_yolo[0])+' '+str(bb_yolo[1])+' '+str(bb_yolo[2])+' '+str(bb_yolo[3])+'\n'
fp.write(line)
if visual_output:
img_arr = imutils.bb_with_text(img_arr,bb_xywh,tg_object)
if visual_output:
cv2.imshow('image',img_arr)
cv2.waitKey(0)
cv2.destroyAllWindows()
fp.close()
if yolo_trainfile is None:
return
with open(yolo_trainfile,'a') as fp2:
fp2.write(file_path+'\n')
fp2.close()
def vietnam_dir_to_yolo(dir,visual_output=False):
json_files = [os.path.join(dir,f) for f in os.listdir(dir) if '.json' in f]
yolo_trainfile = dir+'filelist.txt'
Utils.ensure_file(yolo_trainfile)
print('{} .json files in {}'.format(len(json_files),dir))
label_dir = dir
for json_file in json_files:
json_vietnam_to_yolo(json_file,yolo_trainfile=yolo_trainfile,label_dir=label_dir,visual_output=visual_output)
create_nn_imagelsts.split_to_trainfile_and_testfile(yolo_trainfile)
return yolo_trainfile
def read_many_yolo_bbs(imagedir='/data/jeremy/image_dbs/hls/data.vision.ee.ethz.ch/left/',labeldir=None,img_filter='.png'):
if labeldir is None:
labeldir = os.path.join(Utils.parent_dir(imagedir),'labels')
imgfiles = [f for f in os.listdir(imagedir) if img_filter in f]
imgfiles = sorted(imgfiles)
print('found {} files in {}, label dir {}'.format(len(imgfiles),imagedir,labeldir))
for f in imgfiles:
bb_path = os.path.join(labeldir,f).replace(img_filter,'.txt')
if not os.path.isfile(bb_path):
print('{} not found '.format(bb_path))
continue
image_path = os.path.join(imagedir,f)
read_yolo_bbs(bb_path,image_path)
def read_pascal_xml_write_yolo(dir='/media/jeremy/9FBD-1B00/hls_potential/voc2007/VOCdevkit/VOC2007',annotation_folder='Annotations',img_folder='JPEGImages',
annotation_filter='.xml'):
'''
nondestructive - if there are already label files these get added to not overwritten
:param dir:
:param annotation_folder:
:param img_folder:
:param annotation_filter:
:return:
'''
# classes = [ 'person','hat','backpack','bag','person_wearing_red_shirt','person_wearing_blue_shirt',
# 'car','bus','truck','unattended_bag', 'bicycle', 'motorbike']
classes = constants.hls_yolo_categories
annotation_dir = os.path.join(dir,annotation_folder)
img_dir = os.path.join(dir,img_folder)
annotation_files = [os.path.join(annotation_dir,f) for f in os.listdir(annotation_dir) if annotation_filter in f]
listfilename = os.path.join(dir,'filelist.txt')
list_file = open(listfilename, 'w')
for annotation_file in annotation_files:
success = convert_pascal_xml_annotation(annotation_file,classes)
if success:
print('found relevant class(es)')
filenumber = os.path.basename(annotation_file).replace('.xml','')
jpgpath = os.path.join(img_dir,str(filenumber)+'.jpg')
list_file.write(jpgpath+'\n')
def convert_pascal_xml_annotation(in_file,classes,labeldir=None):
filenumber = os.path.basename(in_file).replace('.xml','')
# in_file = open('VOCdevkit/VOC%s/Annotations/%s.xml'%(year, image_id))
if labeldir==None:
parent_dir = Utils.parent_dir(os.path.dirname(in_file))
labeldir = os.path.join(parent_dir,'labels')
Utils.ensure_dir(labeldir)
out_filename = os.path.join(labeldir, filenumber+'.txt')
print('in {} out {}'.format(in_file,out_filename))
tree=ET.parse(in_file)
root = tree.getroot()
size = root.find('size')
w = int(size.find('width').text)
h = int(size.find('height').text)
success=False
for obj in root.iter('object'):
difficult = obj.find('difficult').text
cls = obj.find('name').text
if cls not in classes or int(difficult)==1:
continue
cls_id = classes.index(cls)
xmlbox = obj.find('bndbox')
b = (float(xmlbox.find('xmin').text), float(xmlbox.find('xmax').text), float(xmlbox.find('ymin').text), float(xmlbox.find('ymax').text))
bb = convert_x1x2y1y2_to_yolo((w,h), b)
out_file = open(out_filename, 'a+')
os.chmod(out_filename, 0o666)
out_file.write(str(cls_id) + " " + " ".join([str(round(a,4)) for a in bb]) + '\n')
# os.chmod(out_filename, 0o777)
success = True
return(success)
def read_pascal_txt_write_yolo(dir='/media/jeremy/9FBD-1B00/hls_potential/voc2005_1/',
annotation_folder='all_relevant_annotations',img_folder='all_relevant_images',
annotation_filter='.txt',image_filter='.png',yolo_annotation_dir='labels'):
'''
nondestructive - if there are already label files these get added to not overwritten
:param dir:
:param annotation_folder:
:param img_folder:
:param annotation_filter:
:return:
'''
# classes = [ 'person','hat','backpack','bag','person_wearing_red_shirt','person_wearing_blue_shirt',
# 'car','bus','truck','unattended_bag', 'bicycle', 'motorbike']
classes = constants.hls_yolo_categories
annotation_dir = os.path.join(dir,annotation_folder)
img_dir = os.path.join(dir,img_folder)
annotation_files = [os.path.join(annotation_dir,f) for f in os.listdir(annotation_dir) if annotation_filter in f]
listfilename = os.path.join(dir,'filelist.txt')
list_file = open(listfilename, 'w')
yolo_annotation_path = os.path.join(dir,yolo_annotation_dir)
Utils.ensure_dir(yolo_annotation_path)
for annotation_file in annotation_files:
out_filename=os.path.join(yolo_annotation_path,os.path.basename(annotation_file))
print('outfile'+out_filename)
success = convert_pascal_txt_annotation(annotation_file,classes,out_filename)
if success:
print('found relevant class(es)')
filename = os.path.basename(annotation_file).replace(annotation_filter,'')
img_dir = os.path.join(dir,img_folder)
imgpath = os.path.join(img_dir,str(filename)+image_filter)
list_file.write(imgpath+'\n')
def convert_pascal_txt_annotation(in_file,classes,out_filename):
print('in {} out {}'.format(in_file,out_filename))
with open(in_file,'r') as fp:
lines = fp.readlines()
for i in range(len(lines)):
if 'Image filename' in lines[i]:
imfile=lines[i].split()[3]
print('imfile:'+imfile)
# path = Utils.parent_dir(os.path.basename(in_file))
# if path.split('/')[-1] != 'Annotations':
# path = Utils.parent_dir(path)
# print('path to annotation:'+str(path))
# img_path = os.path.join(path,imfile)
# print('path to img:'+str(img_path))
# img_arr = cv2.imread(img_path)
if 'Image size' in lines[i]:
nums = re.findall('\d+', lines[i])
print(lines[i])
print('nums'+str(nums))
w = int(nums[0])
h = int(nums[1])
print('h {} w {}'.format(h,w))
if '# Details' in lines[i] :
object = lines[i].split()[5].replace('(','').replace(')','').replace('"','')
nums = re.findall('\d+', lines[i+2])
print('obj {} nums {}'.format(object,nums))
success=False
cls_id = tg_class_from_pascal_class(object,classes)
if cls_id is not None:
print('class index '+str(cls_id)+' '+classes[cls_id])
success=True
if not success:
print('NO RELEVANT CLASS FOUND')
continue
b = (int(nums[1]), int(nums[3]), int(nums[2]), int(nums[4])) #file has xmin ymin xmax ymax
print('bb_x1x2y1y2:'+str(b))
bb = convert_x1x2y1y2_to_yolo((w,h), b)
print('bb_yolo'+str(bb))
if os.path.exists(out_filename):
append_write = 'a' # append if already exists
else:
append_write = 'w' # make a new file if not
out_file = open(out_filename, append_write)
# os.chmod(out_filename, 0o666) #
out_file.write(str(cls_id) + " " + " ".join([str(round(a,4)) for a in bb]) + '\n')
# os.chmod(out_filename, 0o777)
success = True
return(success)
def tgdict_to_api_dict(tgdict):
'''
| |
<gh_stars>0
import numpy as np
import pandas as pd
import cv2 as cv
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from IPython.display import Image
from moviepy.editor import VideoFileClip
from IPython.display import HTML
import pickle
import io
import os
import glob
def abs_sobel_thresh(img, orient='x', thresh=(0,255)):
# Convert to grayscale
gray = cv.cvtColor(img, cv.COLOR_RGB2GRAY)
# Apply x or y gradient with the OpenCV Sobel() function
# and take the absolute value
if orient == 'x':
abs_sobel = np.absolute(cv.Sobel(gray, cv.CV_64F, 1, 0))
if orient == 'y':
abs_sobel = np.absolute(cv.Sobel(gray, cv.CV_64F, 0, 1))
# Rescale back to 8 bit integer
scaled_sobel = np.uint8(255*abs_sobel/np.max(abs_sobel))
# Create a copy and apply the threshold
binary_output = np.zeros_like(scaled_sobel)
# Here I'm using inclusive (>=, <=) thresholds, but exclusive is ok too
binary_output[(scaled_sobel >= thresh[0]) & (scaled_sobel <= thresh[1])] = 1
# Return the result
return binary_output
def mag_threshold(img, sobel_kernel=3, thresh=(0, 255)):
# 1) Convert to grayscale
gray = cv.cvtColor(img, cv.COLOR_RGB2GRAY)
# 2) Take the gradient in x and y separately
x = cv.Sobel(gray, cv.CV_64F, 1, 0, ksize=sobel_kernel)
y = cv.Sobel(gray, cv.CV_64F, 0, 1, ksize=sobel_kernel)
# 3) Calculate the xy magnitude
mag = np.sqrt(x**2 + y**2)
# 4) Scale to 8-bit (0 - 255) and convert to type = np.uint8
scale = np.max(mag)/255
eightbit = (mag/scale).astype(np.uint8)
# 5) Create a binary mask where mag thresholds are met
binary_output = np.zeros_like(eightbit)
binary_output[(eightbit > thresh[0]) & (eightbit < thresh[1])] =1
return binary_output
def dir_threshold(img, sobel_kernel=3, thresh=(0, np.pi/2)):
# 1) Convert to grayscale
gray = cv.cvtColor(img, cv.COLOR_RGB2GRAY)
# 2) Take the gradient in x and y separately
x = np.absolute(cv.Sobel(gray, cv.CV_64F, 1, 0, ksize=sobel_kernel))
y = np.absolute(cv.Sobel(gray, cv.CV_64F, 0, 1, ksize=sobel_kernel))
# 4) Use np.arctan2(abs_sobely, abs_sobelx) to calculate the direction of the gradient
direction = np.arctan2(y, x)
binary_output = np.zeros_like(direction)
binary_output[(direction > thresh[0]) & (direction < thresh[1])] = 1
return binary_output
def hls_select(img, sthresh=(0, 255),lthresh=()):
# 1) Convert to HLS color space
hls_img = cv.cvtColor(img, cv.COLOR_RGB2HLS)
# 2) Apply a threshold to the S channel
L = hls_img[:,:,1]
S = hls_img[:,:,2]
# 3) Return a binary image of threshold result
binary_output = np.zeros_like(S)
binary_output[(S >= sthresh[0]) & (S <= sthresh[1])
& (L > lthresh[0]) & (L <= lthresh[1])] = 1
return binary_output
def binary_pipeline(img):
img_copy = cv.GaussianBlur(img, (3, 3), 0)
#img_copy = np.copy(img)
# color channels
s_binary = hls_select(img_copy, sthresh=(140, 255), lthresh=(120, 255))
#red_binary = red_select(img_copy, thresh=(200,255))
# Sobel x
x_binary = abs_sobel_thresh(img_copy,thresh=(25, 200))
y_binary = abs_sobel_thresh(img_copy,thresh=(25, 200), orient='y')
xy = cv.bitwise_and(x_binary, y_binary)
#magnitude & direction
mag_binary = mag_threshold(img_copy, sobel_kernel=3, thresh=(30,100))
dir_binary = dir_threshold(img_copy, sobel_kernel=3, thresh=(0.8, 1.2))
# Stack each channel
gradient = np.zeros_like(s_binary)
gradient[((x_binary == 1) & (y_binary == 1)) | ((mag_binary == 1) & (dir_binary == 1))] = 1
final_binary = cv.bitwise_or(s_binary, gradient)
return final_binary
#This is here because of local variable conflicts
inverse_perspective_transform = None
def warp_image(img):
image_size = (img.shape[1], img.shape[0])
x = img.shape[1]
y = img.shape[0]
#the "order" of points in the polygon you are defining does not matter
#but they need to match the corresponding points in destination_points!
source_points = np.float32([
[0.117 * x, y],
[(0.5 * x) - (x*0.078), (2/3)*y],
[(0.5 * x) + (x*0.078), (2/3)*y],
[x - (0.117 * x), y]
])
destination_points = np.float32([
[0.25 * x, y],
[0.25 * x, 0],
[x - (0.25 * x), 0],
[x - (0.25 * x), y]
])
perspective_transform = cv.getPerspectiveTransform(source_points, destination_points)
global inverse_perspective_transform
inverse_perspective_transform = cv.getPerspectiveTransform( destination_points, source_points)
warped_img = cv.warpPerspective(img, perspective_transform, image_size, flags=cv.INTER_LINEAR)
return warped_img, inverse_perspective_transform
def track_lanes_initialize(binary_warped):
global window_search
histogram = np.sum(binary_warped[int(binary_warped.shape[0]/2):,:], axis=0)
# Create an output image to draw on and visualize the result
out_img = np.dstack((binary_warped, binary_warped, binary_warped))*255
# we need max for each half of the histogram. the example above shows how
# things could be complicated if didn't split the image in half
# before taking the top 2 maxes
midpoint = np.int(histogram.shape[0]/2)
leftx_base = np.argmax(histogram[:midpoint])
rightx_base = np.argmax(histogram[midpoint:]) + midpoint
# Choose the number of sliding windows
# this will throw an error in the height if it doesn't evenly divide the img height
nwindows = 9
# Set height of windows
window_height = np.int(binary_warped.shape[0]/nwindows)
# Identify the x and y positions of all nonzero pixels in the image
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Current positions to be updated for each window
leftx_current = leftx_base
rightx_current = rightx_base
# Set the width of the windows +/- margin
margin = 100
# Set minimum number of pixels found to recenter window
minpix = 50
# Create empty lists to receive left and right lane pixel indices
left_lane_inds = []
right_lane_inds = []
# Step through the windows one by one
for window in range(nwindows):
# Identify window boundaries in x and y (and right and left)
win_y_low = int(binary_warped.shape[0] - (window+1)*window_height)
win_y_high = int(binary_warped.shape[0] - window*window_height)
win_xleft_low = leftx_current - margin
win_xleft_high = leftx_current + margin
win_xright_low = rightx_current - margin
win_xright_high = rightx_current + margin
# Draw the windows on the visualization image
cv.rectangle(out_img,(win_xleft_low,win_y_low),(win_xleft_high,win_y_high),(0,255,0), 3)
cv.rectangle(out_img,(win_xright_low,win_y_low),(win_xright_high,win_y_high),(0,255,0), 3)
# Identify the nonzero pixels in x and y within the window
good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]
# Append these indices to the lists
left_lane_inds.append(good_left_inds)
right_lane_inds.append(good_right_inds)
# If you found > minpix pixels, recenter next window on their mean position
if len(good_left_inds) > minpix:
leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
if len(good_right_inds) > minpix:
rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
# Concatenate the arrays of indices
left_lane_inds = np.concatenate(left_lane_inds)
right_lane_inds = np.concatenate(right_lane_inds)
# Extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
# Fit a second order polynomial to each
left_fit = np.polyfit(lefty, leftx, 2)
right_fit = np.polyfit(righty, rightx, 2)
# Generate x and y values for plotting
ploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0] )
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
margin = 100
left_lane_inds = ((nonzerox > (left_fit[0]*(nonzeroy**2) + left_fit[1]*nonzeroy + left_fit[2] - margin)) & (nonzerox < (left_fit[0]*(nonzeroy**2) + left_fit[1]*nonzeroy + left_fit[2] + margin)))
right_lane_inds = ((nonzerox > (right_fit[0]*(nonzeroy**2) + right_fit[1]*nonzeroy + right_fit[2] - margin)) & (nonzerox < (right_fit[0]*(nonzeroy**2) + right_fit[1]*nonzeroy + right_fit[2] + margin)))
# Again, extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
# Fit a second order polynomial to each
left_fit = np.polyfit(lefty, leftx, 2)
right_fit = np.polyfit(righty, rightx, 2)
return left_fit,right_fit
def track_lanes_update(binary_warped, left_fit,right_fit):
global window_search
global frame_count
# repeat window search to maintain stability
if frame_count % 10 == 0:
window_search=True
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
margin = 100
left_lane_inds = ((nonzerox > (left_fit[0]*(nonzeroy**2) + left_fit[1]*nonzeroy + left_fit[2] - margin)) & (nonzerox < (left_fit[0]*(nonzeroy**2) + left_fit[1]*nonzeroy + left_fit[2] + margin)))
right_lane_inds = ((nonzerox > (right_fit[0]*(nonzeroy**2) + right_fit[1]*nonzeroy + right_fit[2] - margin)) & (nonzerox < (right_fit[0]*(nonzeroy**2) + right_fit[1]*nonzeroy + right_fit[2] + margin)))
# Again, extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
# Fit a second order polynomial to each
left_fit = np.polyfit(lefty, leftx, 2)
right_fit = np.polyfit(righty, rightx, 2)
# Generate x and y values for plotting
ploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0] )
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
return left_fit,right_fit,leftx,lefty,rightx,righty
def get_val(y,poly_coeff):
return poly_coeff[0]*y**2+poly_coeff[1]*y+poly_coeff[2]
def lane_fill_poly(binary_warped,undist,left_fit,right_fit):
# Generate x and y values
ploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0] )
left_fitx = get_val(ploty,left_fit)
right_fitx = get_val(ploty,right_fit)
# Create an image to draw the lines on
warp_zero = np.zeros_like(binary_warped).astype(np.uint8)
color_warp = np.dstack((warp_zero, warp_zero, warp_zero))
# Recast x and y for cv2.fillPoly()
| |
# -*- coding: utf-8 -*-
# Copyright (C) 2014 AT&T Labs All Rights Reserved.
# Copyright (C) 2014 University of Pennsylvania All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Inception utilities"""
import logging
import os
import time
import struct
import md5
from collections import defaultdict
from collections import deque
from SimpleXMLRPCServer import SimpleXMLRPCServer
from xmlrpclib import ServerProxy
import socket
from kazoo import client
from oslo.config import cfg
import bidict
from ryu import log
from ryu.lib.dpid import str_to_dpid
from ryu.app import inception_dhcp as i_dhcp
from ryu.app import inception_priority as i_priority
from ryu.lib import hub
from ryu.lib.packet.packet import Packet
from ryu.lib.packet.dhcp import dhcp
from ryu.lib.packet.ethernet import ethernet
from ryu.lib.packet.udp import udp
from ryu.ofproto import ether
from ryu.ofproto import inet
LOGGER = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.import_opt('ip_prefix', 'ryu.app.inception_conf')
CONF.import_opt('dhcp_ip', 'ryu.app.inception_conf')
CONF.import_opt('dhcp_port', 'ryu.app.inception_conf')
CONF.import_opt('arp_timeout', 'ryu.app.inception_conf')
CONF.import_opt('zookeeper_storage', 'ryu.app.inception_conf')
CONF.import_opt('interdcenter_port_prefix', 'ryu.app.inception_conf')
CONF.import_opt('intradcenter_port_prefix', 'ryu.app.inception_conf')
class Topology(object):
"""
Build switch level topology of Inception network.
Gateway is assumed to have no local VMs connected.
Topology is maintained as two dictionaries:
"""
def __init__(self, gateway_ips=()):
# {dpid_gw -> {dcenter_id -> port_no}}
self.gateway_to_dcenters = defaultdict(dict)
self.gateway_ips = gateway_ips
self.gateways = []
self.dhcp_switch = None
self.dhcp_port = None
# {local dpid -> {remote ip -> local port}}
self.dpid_ip_to_port = defaultdict(dict)
# {local ip -> local dpid}
self.ip_to_dpid = bidict.bidict()
@classmethod
def topology_from_gateways(cls, gateway_ips_str):
gateway_ips = str_to_tuple(gateway_ips_str)
topology = cls(gateway_ips)
return topology
def update_switch(self, dpid_new, ip_new, ports):
"""Update switch topology"""
self.ip_to_dpid[ip_new] = dpid_new
LOGGER.info("Add: (switch=%s) -> (ip=%s)", dpid_new, ip_new)
self.parse_switch_ports(dpid_new, ip_new, ports)
if ip_new == CONF.dhcp_ip:
self.dhcp_switch = dpid_new
LOGGER.info("DHCP server switch: (ip=%s), (dpid=%s)", ip_new,
dpid_new)
if ip_new in self.gateway_ips:
self.gateways.append(dpid_new)
LOGGER.info("Gateway switch: (ip=%s), (dpid=%s)", ip_new, dpid_new)
def parse_switch_ports(self, dpid, ip, switch_ports):
"""Parse port name to extract connection information"""
local_port_prefix = CONF.intradcenter_port_prefix
remote_port_prefix = CONF.interdcenter_port_prefix
for port in switch_ports:
port_no = str(port.port_no)
# Port_name: e.g., "obr_<ip_prefix>"
if port.name.startswith(local_port_prefix) and '_' in port.name:
peer_ip = self.extract_ip_addr(CONF.ip_prefix, port.name)
LOGGER.info("Add: (switch=%s, peer_ip=%s) -> (port=%s)",
dpid, peer_ip, port_no)
self.dpid_ip_to_port[dpid][peer_ip] = port_no
# Port_name: e.g., "gateway_<dcenter_id>"
elif port.name.startswith(remote_port_prefix):
peer_dcenter = self.extract_dcenter(port.name)
self.gateway_to_dcenters[dpid][peer_dcenter] = port_no
LOGGER.info("New inter-datacenter connection:"
"(gateway=%s) -> (datacenter=%s)",
dpid, peer_dcenter)
# Port name matches DHCP port
if port.name == CONF.dhcp_port:
self.dhcp_port = port_no
def extract_ip_addr(self, ip_prefix, port_name):
"""Extract IP address from port name"""
_, ip_suffix1, ip_suffix2 = port_name.split('_')
peer_ip = '.'.join((ip_prefix, ip_suffix1, ip_suffix2))
return peer_ip
def extract_dcenter(self, port_name):
"""Extract datacenter id from port name"""
_, dcenter_id = port_name.split('_')
return dcenter_id
def gateway_connected(self):
"""Check if any gateway is connected or not"""
return self.gateways
def is_gateway(self, dpid):
"""Check if dpid is gateway"""
return (dpid in self.gateways)
def get_gateways(self):
return self.gateways
def is_dhcp(self, dpid):
"""Check if dpid is dhcp server"""
return dpid == self.dhcp_switch
def get_fwd_port(self, dpid1, dpid2):
ip_2 = self.ip_to_dpid[:dpid2] # bidict reverse query
port = self.dpid_ip_to_port[dpid1][ip_2]
return port
def get_dcenter_port(self, dpid_gw, dcenter):
return self.gateway_to_dcenters[dpid_gw][dcenter]
def get_neighbors(self, dpid):
"""Get neighbors in the form of {dpid_1: port_1, dpid_2, port_2, ...}.
Skip neighbor switches not connected yet (i.e., not in self.ip_to_dpid)
"""
ip_to_port = self.dpid_ip_to_port[dpid]
dpid_to_port = {}
for ip, port in ip_to_port.items():
dpid = self.ip_to_dpid.get(ip)
if dpid is not None:
dpid_to_port[dpid] = port
return dpid_to_port
class SwitchManager(object):
"""Manage openflow-switches"""
SWITCH_MAXID = 65535
def __init__(self, self_dcenter='0'):
# Zookeeper data
# Record all switches id assignment, to detect switch id conflict
# {dcenter => {dpid => id}}
self.dcenter_to_swcids = defaultdict(dict)
# Local cache
self.self_dcenter = self_dcenter
# Record available ids of each switch which can be assigned to VMs
# {dpid => deque(available ids)}
self.dpid_to_vmids = defaultdict(deque)
def init_swc_vmids(self, dpid):
self.dpid_to_vmids[dpid] = deque(xrange(self.SWITCH_MAXID))
def create_vm_id(self, dpid):
try:
vm_id = self.dpid_to_vmids[dpid].pop()
return str(vm_id)
except IndexError:
LOGGER.info("ERROR: Index Error")
return None
def recollect_vm_id(self, vm_id, dpid):
self.dpid_to_vmids[dpid].appendleft(int(vm_id))
def generate_swc_id(self, dpid):
"""Create switch id"""
swc_id = str((hash(dpid) % self.SWITCH_MAXID) + 1)
local_ids = self.dcenter_to_swcids[self.self_dcenter]
if swc_id in local_ids.values():
# TODO(chen): Hash conflict
LOGGER.info("ERROR: switch id conflict: %s", swc_id)
else:
local_ids[dpid] = swc_id
return swc_id
def update_swc_id(self, dcenter, dpid, swc_id):
self.dcenter_to_swcids[dcenter][dpid] = swc_id
def get_swc_id(self, dcenter, dpid):
return self.dcenter_to_swcids[dcenter].get(dpid)
def invalidate_vm_id(self, dpid, vm_id):
if dpid not in self.dpid_to_vmids:
self.dpid_to_vmids[dpid] = deque(xrange(self.SWITCH_MAXID))
return False
try:
self.dpid_to_vmids[dpid].remove(int(vm_id))
return True
except ValueError:
return False
class VmManager(object):
"""Manage virtual machines in the network"""
VM_MAXID = 65535
def __init__(self):
# Local cache
# Record VM's vm_id, to facilitate vmac generation
# {mac => {dpid => id}}
self.mac_to_id = {}
# Record VM's positions, to facilitate detecting live migration
# {mac => (dcenter, dpid, port)}
self.mac_to_position = {}
# Record VM's local flow setup, to prevent redundant flow setup
self.mac_to_dpid = {}
def update_vm(self, dcenter, dpid, port, mac, vm_id):
self.mac_to_position[mac] = (dcenter, dpid, port)
LOGGER.info("Update: (mac=%s) => (dcenter=%s, switch=%s, port=%s)",
mac, dcenter, dpid, port)
self.mac_to_id[mac] = (dpid, vm_id)
def get_position(self, mac):
return self.mac_to_position.get(mac)
def get_vm_id(self, mac):
id_tuple = self.mac_to_id.get(mac)
if id_tuple is None:
return None
else:
_, vm_id = id_tuple
return vm_id
def mac_exists(self, mac):
return (mac in self.mac_to_position)
def flow_setup(self, mac, dpid):
self.mac_to_dpid[mac] = dpid
def flow_exists(self, mac, dpid):
return (self.mac_to_dpid[mac] == dpid)
def position_shifts(self, mac, dcenter, dpid, port):
if mac not in self.mac_to_position:
return False
else:
pos_old = self.mac_to_position[mac]
return (pos_old != (dcenter, dpid, port))
class VmacManager(object):
"""
Create vmacs of VMs, switches and datacenters
"""
DCENTER_MASK = "ff:ff:00:00:00:00"
SWITCH_MASK = "ff:ff:ff:ff:00:00"
TENANT_MASK = "00:00:00:00:00:ff"
def __init__(self, self_dcenter='0'):
# zookeeper data
# Record guests which queried vmac,
# to inform of VMs during live migration
# {vmac => {mac => time}}
self.vmac_to_queries = defaultdict(dict)
# Local cache
# All Switches' virtual MAC, to facilitate vmac generation
# {dpid => vmac}
self.dpid_to_vmac = {}
# All VMs' virtual MAC, to facilitate ARP resolution
# {mac => vmac}
self.mac_to_vmac = {}
def get_query_macs(self, vmac):
if vmac not in self.vmac_to_queries:
return []
query_list = []
for mac_query in self.vmac_to_queries[vmac].keys():
time_now = time.time()
query_time = self.vmac_to_queries[vmac][mac_query]
if (time_now - float(query_time)) > CONF.arp_timeout:
del self.vmac_to_queries[vmac][mac_query]
else:
query_list.append(mac_query)
return query_list
def del_vmac_query(self, vmac):
self.vmac_to_queries.pop(vmac, None)
def update_query(self, vmac, mac, query_time):
self.vmac_to_queries[vmac][mac] = query_time
def create_dc_vmac(self, dcenter_str):
"""Generate MAC address for datacenter based on datacenter id.
Address form: xx:xx:00:00:00:00
xx:xx is converted from data center id
"""
dcenter = int(dcenter_str)
if dcenter > 65535:
return
dcenter_high = (dcenter >> 8) & 0xff
dcenter_low = dcenter & 0xff
dcenter_vmac = "%02x:%02x:00:00:00:00" % (dcenter_high, dcenter_low)
return dcenter_vmac
def create_swc_vmac(self, dcenter, dpid, swc_id_str):
"""Generate MAC address prefix for switch based on
datacenter id and switch id.
Address form: xx:xx:yy:yy:00:00
xx:xx is converted from data center id
yy:yy is converted from switch id
"""
dcenter_vmac = self.create_dc_vmac(dcenter)
dcenter_prefix = self.get_dc_prefix(dcenter_vmac)
swc_id = int(swc_id_str)
switch_high = (swc_id >> 8) & 0xff
switch_low = swc_id & 0xff
switch_suffix = ("%02x:%02x:00:00" % (switch_high, switch_low))
switch_vmac = ':'.join((dcenter_prefix, switch_suffix))
self.dpid_to_vmac[dpid] = switch_vmac
return switch_vmac
def create_vm_vmac(self, mac, tenant_manager, vm_manager):
"""Generate virtual MAC address of a VM"""
_, dpid, _ = vm_manager.get_position(mac)
switch_vmac = self.dpid_to_vmac[dpid]
switch_prefix = self.get_swc_prefix(switch_vmac)
vm_id = int(vm_manager.get_vm_id(mac))
vm_id_hex = vm_id & 0xff
vm_id_suffix = "%02x" % vm_id_hex
tenant_id = int(tenant_manager.get_tenant_id(mac))
tenant_id_hex = tenant_id & 0xff
tenant_id_suffix = "%02x" % tenant_id_hex
vmac = ':'.join((switch_prefix, vm_id_suffix, tenant_id_suffix))
self.mac_to_vmac[mac] = vmac
LOGGER.info("Create: (mac=%s) => (vmac=%s)", mac, vmac)
return vmac
def construct_vmac(self, dcenter, dpid, vm_id_str, tenant_id_str):
swc_vmac = self.dpid_to_vmac[dpid]
switch_prefix = self.get_swc_prefix(swc_vmac)
vm_id = int(vm_id_str)
vm_id_hex = vm_id & 0xff
vm_id_suffix = "%02x" % vm_id_hex
tenant_id = int(tenant_id_str)
tenant_id_hex = tenant_id & 0xff
tenant_id_suffix = "%02x" % tenant_id_hex
vmac = ':'.join((switch_prefix, vm_id_suffix, tenant_id_suffix))
return vmac
def get_swc_prefix(self, vmac):
"""Extract switch prefix from virtual MAC address"""
return vmac[:11]
def get_dc_prefix(self, vmac):
"""Extract switch prefix from virtual MAC address"""
return vmac[:5]
def get_vm_vmac(self, mac):
return self.mac_to_vmac.get(mac)
def get_swc_vmac(self, dpid):
return self.dpid_to_vmac.get(dpid)
class FlowManager(object):
"""Handle flow installation/uninstallation"""
# Table id
PRIMARY_TABLE = 0
SECONDARY_TABLE = 1
def __init__(self, dpset=None, multi_tenancy=False):
self.dpset = dpset
# Switches on which interdatacenter flows are to be installed
self.interdcenter_waitinglist = []
| |
"\n")
# Initial checks
for el in self.model.grid.get_cell_list_contents(self.pos):
# Apply effects of fire
if el.__class__.__name__ == "Fire":
if not self.unconscious:
self.model.activeAgents.remove(self)
self.model.activeAgentsCount -= 1
else:
self.model.removedAgents.remove((self, "unconscious"))
self.dead = True
self.state = "DEAD"
self.model.schedule.remove(self)
self.model.grid.remove_agent(self)
self.model.removedAgents.append((self, "dead"))
log(str(self.unique_id) + " died in the fire")
return
# Check if the agent is not already unconscious and skip if so
if self.unconscious:
log("Agent unconscious.")
return
# Apply effects of smoke
if el.__class__.__name__ == "Smoke":
self.intoxication += 1
log("Agent inhaled smoke. Intoxication level: " + str(self.intoxication))
if self.intoxication >= 60 and self.unconscious == False:
self.unconscious = True
self.state = "UNCONSCIOUS"
self.model.activeAgents.remove(self)
self.model.removedAgents.append((self, "unconscious"))
self.model.activeAgentsCount -= 1
log(str(self.unique_id) + " lost consciousness")
continue
# If leading children, reduce the agent's speed
currFreq = self.freq
if self.leading:
self.freq = max(2, self.maxFreq)
else:
self.freq = self.maxFreq
if self.freq != currFreq:
if self.freq > 1:
self.offset = round(random.random() * self.freq)
else:
self.offset = 0
# Check if it's the agent's turn to move
if (self.model.schedule.steps + self.offset) % self.freq != 0:
log("Skipping step...")
return
# If the agent has moved to accommodate other agent, reset the flag and skip step
if self.moved == True:
self.moved = False
log("Skipping step...")
return
# If the agent has children, check if none of them are unconscious or dead and remove these
for child in self.children:
if self.model.getAgent(child).unconscious \
or self.model.getAgent(child).dead or self.model.getAgent(child).evacuated:
self.children.remove(child)
if child in self.foundChildren:
self.foundChildren.remove(child)
if child in self.ledChildren:
self.ledChildren.remove(child)
# Check if all of them are visible
if len(self.children) > 0:
self.locateChildren()
# Check distances to all children and the children the agent is leading
distancesToChildren = []
distancesToledChildren = []
for child in self.children:
dist = manDist(self.pos, self.model.getAgent(child).pos)
distancesToChildren.append((dist, child))
if child in self.ledChildren and child in self.foundChildren:
distancesToledChildren.append((dist, child))
# Check if all children were accounted for, if not, switch state to 'FINDING_CHILDREN'.
if len(self.foundChildren) < len(self.children):
if self.state != "FINDING_CHILDREN":
self.previousState = self.state
self.state = "FINDING_CHILDREN"
self.path = []
else:
if self.state == "FINDING_CHILDREN":
self.state = self.previousState
self.previousState = "FINDING_CHILDREN"
# Update the information about all visible children
for child in self.visibleChildren:
if child not in self.foundChildren and manDist(self.pos, self.model.getAgent(child).pos) < 4:
log("Found child:" + child)
self.waitingTimeForChildren = 0
self.foundChildren.append(child)
self.searchedChild = None
for child in self.foundChildren:
if self.model.getAgent(child).followedGuardian == None:
log("Waiting for the child to acknowledge the guardian.")
return
elif self.model.getAgent(child).followedGuardian.unique_id == self.unique_id\
and child not in self.ledChildren:
log("Leading child:" + child)
self.ledChildren.append(child)
self.leading = True
log("\nFound children: " + str(self.foundChildren))
log("Leading children: " + str(self.ledChildren))
if len(self.ledChildren) == 0:
self.leading = False
# If children are falling behind, wait for them to catch up
if self.leading:
if len(distancesToledChildren) > 0 and\
(min(distancesToledChildren)[0] > 2 or max(distancesToledChildren)[0] > len(self.ledChildren) + 2):
log("Waiting for the children to catch up")
self.waitingTimeForChildren += 1
if self.waitingTimeForChildren > 20:
log("Children considered lost, agent will move to collect them again.")
self.foundChildren = []
else:
return
r = random.random()
if r < 0.2 and self.waitingTimeForChildren > 20:
neighbors = self.model.grid.getObject(self.pos, "Cell").neighbors
for neighbor in neighbors:
free = True
for obj in self.model.grid.get_cell_list_contents(neighbors[neighbor]):
if obj.__class__.__name__ in ["Adult", "Child", "Obstacle", "Fire", "Heat", "Exit"]:
free = False
if free and self.target != None:
log("Moving to a free space to avoid potential blockage.")
log("MOVING " + str(neighbor) + ", TO " + str(neighbors[neighbor]))
self.model.grid.move_agent(self, neighbors[neighbor])
self.path = []
return
else:
self.waitingTimeForChildren = 0
# Check if the children are still active, and if not, remove them from the list
# If no children (or none left), set leading to false
else:
self.leading = False
# Take action depending on current state
if self.state == "FINDING_CHILDREN":
# pick the first (nearest) child to pick up.
children = self.children.copy()
if len(self.foundChildren) < len(self.children):
if self.searchedChild == None:
self.path = []
for i in range(len(children)):
child = children.pop(children.index(min(children)))
if child not in self.foundChildren:
self.searchedChild = child
log("Beginning the process of picking up a child: " + child)
break
else:
log("Picking up a child: " + self.searchedChild)
else:
log("All children found. Switching back to the previous state: " + self.previousState)
self.state = self.previousState
self.previousState = "FINDING_CHILDREN"
self.target, self.path = self.pickExit(self.knownExits)
return
# Compute a path and proceed toward the searched child
if self.path == []:
self.path = computePath(self.model.grid, self.pos, (self.model.getAgent(self.searchedChild).pos, self.searchedChild),
self.knownFires, self.knownHeat, self.knownObstacles, childTarget=True)
next = self.path[0]
log("Moving towards " + self.searchedChild + "'s location: " + str(self.path[-1]))
# If there is no possible path, abandon child and save yourself/other children
if next == "blocked":
log("Path to " + self.searchedChild + " is now considered blocked.")
self.children.remove(self.searchedChild)
if self.searchedChild in self.foundChildren:
self.foundChildren.remove(self.searchedChild)
if self.searchedChild in self.ledChildren:
self.ledChildren.remove(self.searchedChild)
self.path = []
self.searchedChild = None
return
# If currently waiting for other agents to move, attempt to recompute path
if self.waiting:
if (self.model.schedule.steps + self.offset) % (5 * self.freq) != 0:
log("Previously path was blocked. Waiting.")
else:
log("Path was blocked for 5 moves since the last time it was computed. "
+ "Attempting to recompute the path.")
self.path = computePath(self.model.grid, self.pos, (self.model.getAgent(self.searchedChild).pos, self.searchedChild),
self.knownFires, self.knownHeat, self.knownObstacles, childTarget=True)
next = self.path[0]
if next == "blocked":
return
# Check whether the next cell is blocked
blocked = False
for obj in self.model.grid.get_cell_list_contents(next):
if obj.__class__.__name__ in ["Adult", "Child", "Obstacle", "Fire", "Heat", "Smoke"]:
log("Path blocked by " + obj.__class__.__name__)
# If next cell blocked by another agent, wait, and move randomly to avoid blockages
if obj.__class__.__name__ in ["Adult", "Child"]:
log("Waiting (" + str(self.waitingTime) + ")")
self.waiting = True
self.waitingTime += self.freq
if self.waitingTime > self.patience * 2:
log("Child " + self.searchedChild + " is considered permanently lost and will be abandoned.")
self.children.remove(self.searchedChild)
if self.searchedChild in self.foundChildren:
self.foundChildren.remove(self.searchedChild)
if self.searchedChild in self.ledChildren:
self.ledChildren.remove(self.searchedChild)
self.searchedChild = None
# Every 10 steps try to get the blocking agent to move out of the way
if self.waitingTime % (10 * self.freq) == 0 and obj.moved == False:
adjacentToNeighbor = self.model.grid.getObject(obj.pos, "Cell").neighbors
for cell in adjacentToNeighbor:
if self.model.grid.cellAvailable(adjacentToNeighbor[cell],
["Adult", "Child", "Exit", "Fire", "Heat",
"Obstacle"]):
log("Asking agent " + obj.unique_id + " to move out of the way, to cell " + str(
adjacentToNeighbor[cell]) + ".")
if obj.path != None:
obj.path = [obj.pos] + obj.path
self.model.grid.move_agent(obj, adjacentToNeighbor[cell])
obj.moved = True
break
blocked = True
r = random.random()
if r < 0.2 and self.waitingTime > 20:
neighbors = self.model.grid.getObject(self.pos, "Cell").neighbors
for neighbor in neighbors:
free = True
for obj in self.model.grid.get_cell_list_contents(neighbors[neighbor]):
if obj.__class__.__name__ in ["Adult", "Child", "Obstacle", "Fire", "Heat", "Exit"]:
free = False
if free and self.target != None and self.searchedChild != None:
log("Moving to a free space to avoid potential blockage.")
log("MOVING " + str(neighbor) + ", TO " + str(neighbors[neighbor]))
self.model.grid.move_agent(self, neighbors[neighbor])
self.path = computePath(self.model.grid, self.pos, (self.model.getAgent(self.searchedChild).pos, self.searchedChild),
self.knownFires, self.knownHeat, self.knownObstacles, childTarget=True)
break
break
# If next cell blocked by a non-agent, attempt to update path
else:
self.path = computePath(self.model.grid, self.pos, (self.model.getAgent(self.searchedChild).pos, self.searchedChild),
self.knownFires, self.knownHeat, self.knownObstacles, childTarget=True)
next = self.path[0]
# If no path possible anymore, end step
if next == "blocked":
log("No path possible.")
blocked = True
# If the new path is blocked by an agent, end step
elif next != "reached" and next != None:
for obj in self.model.grid.get_cell_list_contents(next):
if obj.__class__.__name__ in ["Adult", "Child"]:
log("Path blocked by " + obj.__class__.__name__ + ".")
blocked = True
break
# If possible, move to the next cell
if not blocked:
log("MOVING TO " + str(next))
del (self.path[0])
self.model.grid.move_agent(self, next)
# Decrease waiting time counter:
self.waiting = False
if self.waitingTime > 0:
self.waitingTime -= 2* self.freq
elif self.state == "EVACUATING":
# If no target, determine if any eligible exit exists
if self.target == None:
self.target, self.path = self.pickExit(self.knownExits)
if self.target != None:
log("Updated target to " + str(self.target[1]) + " " + str(self.target[0]))
else:
log("No known available exits. Switching state to 'EXPLORING'.")
self.previousState = self.state
self.state = "EXPLORING"
self.target = None
self.path = []
return
# If state entered with no specified path, compute path
if self.path == []:
self.path = computePath(self.model.grid, self.pos, self.target,
self.knownFires, self.knownHeat, self.knownObstacles)
| |
thread = api.auth_token_create_with_http_info(data, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param Authenticate data: (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(Tokens, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'data'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method auth_token_create" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'data' is set
if self.api_client.client_side_validation and ('data' not in local_var_params or # noqa: E501
local_var_params['data'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `data` when calling `auth_token_create`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'data' in local_var_params:
body_params = local_var_params['data']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Bearer'] # noqa: E501
return self.api_client.call_api(
'/auth/token/', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Tokens', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def auth_users_list(self, **kwargs): # noqa: E501
"""Read-only API endpoint for viewing user details. # noqa: E501
- superusers and staff users might view all users - user with a manage role might view all users for the organisations they have the manage role for - regular users might view their user details # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.auth_users_list(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str username:
:param str username__contains:
:param str username__icontains:
:param str username__in: Multiple values may be separated by commas.
:param str username__startswith:
:param str username__istartswith:
:param str username__endswith:
:param str username__regex:
:param str email:
:param str email__contains:
:param str email__icontains:
:param str email__in: Multiple values may be separated by commas.
:param str email__startswith:
:param str email__istartswith:
:param str email__endswith:
:param str email__regex:
:param int limit: Number of results to return per page.
:param int offset: The initial index from which to return the results.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: InlineResponse200
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.auth_users_list_with_http_info(**kwargs) # noqa: E501
def auth_users_list_with_http_info(self, **kwargs): # noqa: E501
"""Read-only API endpoint for viewing user details. # noqa: E501
- superusers and staff users might view all users - user with a manage role might view all users for the organisations they have the manage role for - regular users might view their user details # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.auth_users_list_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str username:
:param str username__contains:
:param str username__icontains:
:param str username__in: Multiple values may be separated by commas.
:param str username__startswith:
:param str username__istartswith:
:param str username__endswith:
:param str username__regex:
:param str email:
:param str email__contains:
:param str email__icontains:
:param str email__in: Multiple values may be separated by commas.
:param str email__startswith:
:param str email__istartswith:
:param str email__endswith:
:param str email__regex:
:param int limit: Number of results to return per page.
:param int offset: The initial index from which to return the results.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(InlineResponse200, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'username',
'username__contains',
'username__icontains',
'username__in',
'username__startswith',
'username__istartswith',
'username__endswith',
'username__regex',
'email',
'email__contains',
'email__icontains',
'email__in',
'email__startswith',
'email__istartswith',
'email__endswith',
'email__regex',
'limit',
'offset'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method auth_users_list" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'username' in local_var_params and local_var_params['username'] is not None: # noqa: E501
query_params.append(('username', local_var_params['username'])) # noqa: E501
if 'username__contains' in local_var_params and local_var_params['username__contains'] is not None: # noqa: E501
query_params.append(('username__contains', local_var_params['username__contains'])) # noqa: E501
if 'username__icontains' in local_var_params and local_var_params['username__icontains'] is not None: # noqa: E501
query_params.append(('username__icontains', local_var_params['username__icontains'])) # noqa: E501
if 'username__in' in local_var_params and local_var_params['username__in'] is not None: # noqa: E501
query_params.append(('username__in', local_var_params['username__in'])) # noqa: E501
if 'username__startswith' in local_var_params and local_var_params['username__startswith'] is not None: # noqa: E501
query_params.append(('username__startswith', local_var_params['username__startswith'])) # noqa: E501
if 'username__istartswith' in local_var_params and local_var_params['username__istartswith'] is not None: # noqa: E501
query_params.append(('username__istartswith', local_var_params['username__istartswith'])) # noqa: E501
if 'username__endswith' in local_var_params and local_var_params['username__endswith'] is not None: # noqa: E501
query_params.append(('username__endswith', local_var_params['username__endswith'])) # noqa: E501
if 'username__regex' in local_var_params and local_var_params['username__regex'] is not None: # noqa: E501
query_params.append(('username__regex', local_var_params['username__regex'])) # noqa: E501
if 'email' in local_var_params and local_var_params['email'] is not None: # noqa: E501
query_params.append(('email', local_var_params['email'])) # noqa: E501
if 'email__contains' in local_var_params and local_var_params['email__contains'] is not None: # noqa: E501
query_params.append(('email__contains', local_var_params['email__contains'])) # noqa: E501
if 'email__icontains' in local_var_params and local_var_params['email__icontains'] is not None: # noqa: E501
query_params.append(('email__icontains', local_var_params['email__icontains'])) # noqa: E501
if 'email__in' in local_var_params and local_var_params['email__in'] is not None: # noqa: E501
query_params.append(('email__in', local_var_params['email__in'])) # noqa: E501
if 'email__startswith' in local_var_params and local_var_params['email__startswith'] is not None: # noqa: E501
query_params.append(('email__startswith', local_var_params['email__startswith'])) # noqa: E501
if 'email__istartswith' in local_var_params and local_var_params['email__istartswith'] is not None: # noqa: E501
query_params.append(('email__istartswith', local_var_params['email__istartswith'])) # noqa: E501
if 'email__endswith' in local_var_params and local_var_params['email__endswith'] is not None: # noqa: E501
query_params.append(('email__endswith', local_var_params['email__endswith'])) # noqa: E501
if 'email__regex' in local_var_params and local_var_params['email__regex'] is not None: # noqa: E501
query_params.append(('email__regex', local_var_params['email__regex'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'offset' in local_var_params and local_var_params['offset'] is not None: # noqa: E501
query_params.append(('offset', local_var_params['offset'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Bearer'] # noqa: E501
return self.api_client.call_api(
'/auth/users/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InlineResponse200', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def auth_users_read(self, id, **kwargs): # noqa: E501
"""auth_users_read # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.auth_users_read(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param int id: A unique integer value identifying this user. (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: User
If the method is called asynchronously,
returns the request | |
By
disregarding such factors, we can assume the alignment of the gcrs and
itrs axes and simplify the conversion to a rotation about the z-axis.
We can convert between the gcrs and itrs frames using the following
equations:
.. math:: \vec{V}_{gcrs} = E_3^\gamma\;\vec{V}_{itrs}
.. math:: \vec{V}_{itrs} = E_3^{-\gamma}\;\vec{V}_{gcrs}
where :math:`\gamma` is the Earth rotation angle, :math:`E_3^\theta` is
the rotation matrix that rotates a vector around the z-axis by an angle
:math:`\theta`, and :math:`\vec{V}` is a position vector in either the
ECI or ECEF frames. [Kok17b]_
References
----------
.. [Kok17b] <NAME>. Changing Coordinates in the Context of Orbital
Mechanics. Cyber and Electronic Warfare Division, Defence Science,
and Technology Group, Jan.2017, p. 21.
"""
theta = -self.era() if frame == "gcrs" else self.era()
theta = np.radians(theta)
# Construct rotational matrix.
A11 = np.cos(theta)
A12 = -np.sin(theta)
A21 = np.sin(theta)
A22 = np.cos(theta)
# Rotate position data around z-axis by ERA.
output = np.zeros((self.length, 3))
output[:, 0] = A11 * position[:, 0] + A12 * position[:, 1]
output[:, 1] = A21 * position[:, 0] + A22 * position[:, 1]
output[:, 2] = position[:, 2]
return output
def gcrs(self) -> np.ndarray:
"""Return cartesian gcrs position data.
Returns
-------
np.ndarray
Array of shape (n,3) with columns of XYZ gcrs position data.
See Also
--------
itrs : Return cartesian itrs position data.
geo : Return geographical position data.
Examples
--------
>>> time = Time(julian=np.array([2454545]))
>>> position = np.array([[6343.82, -2640.87, -11.26]])
>>> coor = Coordinate(position=position, type="itrs", time=time)
>>> coor.gcrs()
np.array([[6212.21719598, -2937.10811161, -11.26]])
"""
if self._GCRS is None:
if self._ITRS is None:
self._ITRS = self._geo_to_itrs(self._GEO)
self._GCRS = self._gcrs_and_itrs(self._ITRS, frame="itrs")
return self._GCRS
def itrs(self) -> np.ndarray:
"""Return cartesian itrs position data.
Returns
-------
np.ndarray
Array of shape (n,3) with columns of XYZ itrs position data.
See Also
--------
gcrs : Return cartesian gcrs position data.
geo : Return geographical position data.
Examples
--------
>>> time = Time(julian=np.array([2454545]))
>>> position = np.array([[6343.82, -2640.87, -11.26]])
>>> coor = Coordinate(position=position, type="gcrs", time=time)
>>> coor.itrs()
np.array([[6461.30569276, -2338.75507354, -11.26]])
"""
if self._ITRS is None:
if self._GCRS is not None:
self._ITRS = self._gcrs_and_itrs(position=self._GCRS, frame="gcrs")
else:
self._ITRS = self._geo_to_itrs(position=self._GEO)
return self._ITRS
def _get_ang(self, u: np.ndarray, v: np.ndarray) -> np.ndarray:
"""Calculate degree angle bewteen two vectors.
Parameters
----------
u, v : np.ndarray
Arrays of shape (n,3) with rows of XYZ cartesian data.
Returns
-------
np.ndarray
Array of shape (n,) containing the degree angle between the two
arrays.
"""
num = np.sum(u * v, axis=1)
denom = np.linalg.norm(u, axis=1) * np.linalg.norm(v, axis=1)
ang = np.degrees(np.arccos(num / denom))
return ang
def horizontal(self, location: Any) -> Tuple:
"""Return horizontal position data in degrees and decimals.
In this method, the azimuth is calculated as increasing degrees
clockwise from North and ranges from 0 to 360 degrees. The altitude is
calculated as increasing degrees above the observer's local horizon and
ranges from 0 to 90 degrees.
Parameters
----------
location : GroundPosition
Ground location defining the center of the horizontal system.
Returns
-------
tuple
Tuple of length two containing the altitude and azimuth data as
decimals and degrees in NumPy arrays of shape (n,).
Examples
--------
>>> time = Time(julian=np.array([2454545]))
>>> position = np.array([[6343.82, -2640.87, -11.26]])
>>> location = GroundPosition(52.1579, -106.6702)
>>> coor = Coordinate(position=position, frame="ecef", time=time)
>>> coor.horizontal(location=location)
(array([-40.8786098]), array([94.73615482]))
"""
if self._ITRS is None:
self.itrs()
# Convert observer position into cartesian coordinates.
lat, lon, radius = location.lat, location.lon, location.radius
GEO_data = self._geo_to_itrs(np.array([[lat, lon, 0]]))
obs = np.repeat(GEO_data, self.length, 0)
# Determine line of sight vector then altitude.
LOS = self._ITRS - obs
Alt = 90 - self._get_ang(LOS, obs)
# Find surface tangent vector passing through z-axis.
k_hat = np.repeat(np.array([[0, 0, 1]]), self.length, axis=0)
beta = np.radians(lat)
tangent = (k_hat.T * radius / np.sin(beta)).T - obs
# Find LOS projection on tangent plane.
norm_proj = (obs.T * np.sum(LOS * obs, axis=1) / radius ** 2).T
proj_LOS = LOS - norm_proj
# Determing azimuth.
reference = np.cross(tangent, obs)
neg_ind = np.where(np.sum(proj_LOS * reference, axis=1) < 0)[0]
Az = self._get_ang(tangent, proj_LOS)
Az[neg_ind] = 360 - self._get_ang(tangent[neg_ind], proj_LOS[neg_ind])
return Alt, Az
def off_nadir(self, location: Any) -> np.ndarray:
"""Return the off-nadir angle to a ground location.
This method calculates the off-nadir angle, in degrees and decimals, to
the input ground position at each satellite position.
Parameters
----------
location : GroundPosition
Calculate off-nadir angles for the specified ground location.
Returns
-------
np.ndarray
Array of shape (n,) containing off-nadir angle data in degrees and
decimals.
Notes
-----
The off-nadir angle is the acute angle measured in increasing degrees
from the satellites nadir to the line joining the satellite to the
ground location of interest.
"""
if self._ITRS is None:
self.itrs()
lat, lon = location.lat, location.lon
geo_data = self._geo_to_itrs(np.array([[lat, lon, 0]]))
obs = np.repeat(geo_data, self.length, 0)
LOS = np.subtract(self._ITRS, obs)
ang = self._get_ang(LOS, self._ITRS)
return ang
def _WGS84_radius(self, lattitude: np.ndarray) -> np.ndarray:
"""Calculate the Earth's geocentric radius using WGS84.
Parameters
----------
latitude : np.ndarray
Array of shape (n,) representing the geocentric latitude of a
ground location in degrees and decimals.
Returns
-------
np.ndarray
Array of shape (n,) containing the Earth's geocentric radius in
kilometres.
Notes
-----
By using an Earth ellipsoid with the WGS84 parameters of
:math:`a=6378.137` and :math:`b=6356.7523142`, the geocentric radius
can be calculated using the following formulation:
.. math:: r = \sqrt{\frac{(a^2\cos(\beta))^2 + (b^2\sin(\beta))^2}{(a\cos(\beta))^2 + (b\sin(\beta))^2}}
where :math:`\beta` is the observer's latitude. [Tim18]_
References
----------
.. [Tim18] Timur. Earth Radius by Latitude (WGS 84). 2018. url:
https://planetcalc.com/7721/.
"""
# Get lattidue parameter.
phi = np.radians(lattitude)
# Define WGS84 Parameters.
a = 6378.137
b = 6356.752314245
c_phi, s_phi = np.cos(phi), np.sin(phi)
num = (a ** 2 * c_phi) ** 2 + (b ** 2 * s_phi) ** 2
denom = (a * c_phi) ** 2 + (b * s_phi) ** 2
radius = np.sqrt(num / denom)
return radius
def altitude(self) -> np.ndarray:
"""Return the geodetic altitude above Earth's surface in kilometres.
This method uses the WGS84 reference ellipsoid to calculate the
geodetic altitude above the Earth's surface.
Returns
-------
np.ndarray
Array of shape (n,) containing satellite altitudes in kilometres.
Notes
-----
This method uses an ellipsoid based model of the Earth to calculate the
ellipsoid height in an iterative manner described in "Coordinate
Systems in Geodesy" by <NAME> and <NAME>. [KW98b]_
References
----------
.. [KW98b] <NAME> and <NAME>. Coordinate Systems in Geodesy.
Jan. 1998, pp. 31–33.
Examples
--------
>>> time = Time(julian=np.array([2454545]))
>>> position = np.array([[6343.82, -2640.87, -11.26]])
>>> coor = Coordinate(position=position, type="itrs", time=time)
>>> coor.altitude()
np.array([504.1269764])
"""
if self._ITRS is None:
self.itrs()
itrs_data = self._ITRS
x = itrs_data[:, 0]
y = itrs_data[:, 1]
z = itrs_data[:, 2]
a = 6378.137
b = 6356.752314245
epsilon = 10e-10
e = np.sqrt(1 - b ** 2 / a ** 2)
p = np.sqrt(x ** 2 + y ** 2)
N = a
h = np.sqrt(x ** 2 + y ** 2 + z ** 2) - np.sqrt(a * b)
phi = np.arctan((z / p) * (1 - (e ** 2 * N) / (N + h)) ** -1)
def new_vals(a, b, e, p, N, h, phi, z):
N = a / np.sqrt(np.cos(phi) ** 2 + b ** 2 / a ** 2 * np.sin(phi) ** 2)
h = p / np.cos(phi) - N
phi = np.arctan((z / p) * (1 - (e ** 2 * N) / (N + h)) ** -1)
return N, h, phi
Np, hp, phip = new_vals(a, b, e, p, N, h, phi, z)
while (np.mean(hp - h) > a * epsilon) and (np.mean(phip - phi) > epsilon):
N, h, phi = Np, hp, phip
Np, hp, phip = new_vals(a, b, e, p, N, h, phi, z)
return h
def distance(self, location: Any) -> np.ndarray:
"""Return the distance to a ground location.
Parameters
----------
location : GroundPosition
Calculate distances to the specified ground location.
Returns
-------
np.ndarray
Array of shape (n,) containing distances in kilometres between | |
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param retention_days: The backup retention period in days. This is how many days Point-in-Time
Restore will be supported.
:type retention_days: int
:param diff_backup_interval_in_hours: The differential backup interval in hours. This is how
many interval hours between each differential backup will be supported. This is only applicable
to live databases but not dropped databases. Possible values include: 12, 24.
:type diff_backup_interval_in_hours: str or ~azure.mgmt.sql.models.DiffBackupIntervalInHours
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'retention_days': {'key': 'properties.retentionDays', 'type': 'int'},
'diff_backup_interval_in_hours': {'key': 'properties.diffBackupIntervalInHours', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(BackupShortTermRetentionPolicy, self).__init__(**kwargs)
self.retention_days = kwargs.get('retention_days', None)
self.diff_backup_interval_in_hours = kwargs.get('diff_backup_interval_in_hours', None)
class BackupShortTermRetentionPolicyListResult(msrest.serialization.Model):
"""A list of short term retention policies.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: Array of results.
:vartype value: list[~azure.mgmt.sql.models.BackupShortTermRetentionPolicy]
:ivar next_link: Link to retrieve next page of results.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[BackupShortTermRetentionPolicy]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(BackupShortTermRetentionPolicyListResult, self).__init__(**kwargs)
self.value = None
self.next_link = None
class CheckNameAvailabilityRequest(msrest.serialization.Model):
"""A request to check whether the specified name for a resource is available.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param name: Required.
:type name: str
:ivar type: Has constant value: "Microsoft.Sql/servers".
:vartype type: str
"""
_validation = {
'name': {'required': True},
'type': {'required': True, 'constant': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
type = "Microsoft.Sql/servers"
def __init__(
self,
**kwargs
):
super(CheckNameAvailabilityRequest, self).__init__(**kwargs)
self.name = kwargs['name']
class CheckNameAvailabilityResponse(msrest.serialization.Model):
"""The result of a name availability check.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: The name whose availability was checked.
:vartype name: str
:ivar available: True if the name is available, otherwise false.
:vartype available: bool
:ivar reason: The reason code explaining why the name is unavailable. Will be undefined if the
name is available. Possible values include: "Invalid", "AlreadyExists".
:vartype reason: str or ~azure.mgmt.sql.models.CheckNameAvailabilityReason
:ivar message: A message explaining why the name is unavailable. Will be undefined if the name
is available.
:vartype message: str
"""
_validation = {
'name': {'readonly': True},
'available': {'readonly': True},
'reason': {'readonly': True},
'message': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'available': {'key': 'available', 'type': 'bool'},
'reason': {'key': 'reason', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CheckNameAvailabilityResponse, self).__init__(**kwargs)
self.name = None
self.available = None
self.reason = None
self.message = None
class CompleteDatabaseRestoreDefinition(msrest.serialization.Model):
"""Contains the information necessary to perform a complete database restore operation.
All required parameters must be populated in order to send to Azure.
:param last_backup_name: Required. The last backup name to apply.
:type last_backup_name: str
"""
_validation = {
'last_backup_name': {'required': True},
}
_attribute_map = {
'last_backup_name': {'key': 'lastBackupName', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CompleteDatabaseRestoreDefinition, self).__init__(**kwargs)
self.last_backup_name = kwargs['last_backup_name']
class CopyLongTermRetentionBackupParameters(msrest.serialization.Model):
"""Contains the information necessary to perform long term retention backup copy operation.
:param target_subscription_id: The subscription that owns the target server.
:type target_subscription_id: str
:param target_resource_group: The resource group that owns the target server.
:type target_resource_group: str
:param target_server_resource_id: The resource Id of the target server that owns the database.
:type target_server_resource_id: str
:param target_server_fully_qualified_domain_name: The fully qualified domain name of the target
server.
:type target_server_fully_qualified_domain_name: str
:param target_database_name: The name of the database owns the copied backup.
:type target_database_name: str
:param target_backup_storage_redundancy: The storage redundancy type of the copied backup.
Possible values include: "Geo", "Local", "Zone".
:type target_backup_storage_redundancy: str or
~azure.mgmt.sql.models.TargetBackupStorageRedundancy
"""
_attribute_map = {
'target_subscription_id': {'key': 'properties.targetSubscriptionId', 'type': 'str'},
'target_resource_group': {'key': 'properties.targetResourceGroup', 'type': 'str'},
'target_server_resource_id': {'key': 'properties.targetServerResourceId', 'type': 'str'},
'target_server_fully_qualified_domain_name': {'key': 'properties.targetServerFullyQualifiedDomainName', 'type': 'str'},
'target_database_name': {'key': 'properties.targetDatabaseName', 'type': 'str'},
'target_backup_storage_redundancy': {'key': 'properties.targetBackupStorageRedundancy', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CopyLongTermRetentionBackupParameters, self).__init__(**kwargs)
self.target_subscription_id = kwargs.get('target_subscription_id', None)
self.target_resource_group = kwargs.get('target_resource_group', None)
self.target_server_resource_id = kwargs.get('target_server_resource_id', None)
self.target_server_fully_qualified_domain_name = kwargs.get('target_server_fully_qualified_domain_name', None)
self.target_database_name = kwargs.get('target_database_name', None)
self.target_backup_storage_redundancy = kwargs.get('target_backup_storage_redundancy', None)
class CreateDatabaseRestorePointDefinition(msrest.serialization.Model):
"""Contains the information necessary to perform a create database restore point operation.
All required parameters must be populated in order to send to Azure.
:param restore_point_label: Required. The restore point label to apply.
:type restore_point_label: str
"""
_validation = {
'restore_point_label': {'required': True},
}
_attribute_map = {
'restore_point_label': {'key': 'restorePointLabel', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CreateDatabaseRestorePointDefinition, self).__init__(**kwargs)
self.restore_point_label = kwargs['restore_point_label']
class TrackedResource(Resource):
"""ARM tracked top level resource.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource ID.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Required. Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
**kwargs
):
super(TrackedResource, self).__init__(**kwargs)
self.location = kwargs['location']
self.tags = kwargs.get('tags', None)
class Database(TrackedResource):
"""A database resource.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource ID.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Required. Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param sku: The database SKU.
The list of SKUs may vary by region and support offer. To determine the SKUs (including the
SKU name, tier/edition, family, and capacity) that are available to your subscription in an
Azure region, use the ``Capabilities_ListByLocation`` REST API or one of the following
commands:
.. code-block:: azurecli
az sql db list-editions -l <location> -o table
`
.. code-block:: powershell
Get-AzSqlServerServiceObjective -Location <location>
`.
:type sku: ~azure.mgmt.sql.models.Sku
:ivar kind: Kind of database. This is metadata used for the Azure portal experience.
:vartype kind: str
:ivar managed_by: Resource that manages the database.
:vartype managed_by: str
:param create_mode: Specifies the mode of database creation.
Default: regular database creation.
Copy: creates a database as a copy of an existing database. sourceDatabaseId must be specified
as the resource ID of the source database.
Secondary: creates a database as a secondary replica of an existing database. sourceDatabaseId
must be specified as the resource ID of the existing primary database.
PointInTimeRestore: Creates a database by restoring a point in time backup of an existing
database. sourceDatabaseId must be specified as the resource ID of the existing database, and
restorePointInTime must be specified.
Recovery: Creates a database by restoring a geo-replicated backup. sourceDatabaseId must be
specified as the recoverable database resource ID to restore.
Restore: Creates a database by restoring a backup of a deleted database. sourceDatabaseId must
be specified. If sourceDatabaseId is the database's original resource ID, then
sourceDatabaseDeletionDate must be specified. Otherwise sourceDatabaseId must be the restorable
dropped database resource ID and sourceDatabaseDeletionDate is ignored. restorePointInTime may
also be specified to restore from an earlier point in time.
RestoreLongTermRetentionBackup: Creates a database by restoring from a long term retention
vault. recoveryServicesRecoveryPointResourceId must be specified as the recovery point resource
ID.
Copy, Secondary, and RestoreLongTermRetentionBackup are not supported for DataWarehouse
edition. Possible | |
extract_offset = BitVecVal(0, 32)
if fail == 'PacketTooShort':
# XXX: Merge size calculation
header_size = BitVecVal(0, 32)
for name, field in extract_header.fields.items():
# XXX: deal with valid flags
if field.name != '$valid$':
if field.var_length:
header_size += sym_size
else:
header_size += BitVecVal(field.size, 32)
self.sym_packet.set_max_length(
simplify(new_pos + header_size - 8))
return new_pos
elif fail == 'HeaderTooShort':
header_size = BitVecVal(0, 32)
for name, field in extract_header.fields.items():
if field.var_length:
field_size_c = BitVecVal(field.size, sym_size.size())
# The variable length field should be larger than
# the maximum field length but still fit in the
# maximum packet size
c_packet_size = new_pos + header_size
constraints.append(
And(
UGT(sym_size, field_size_c),
ULT(sym_size,
BitVecVal(sym_packet.max_packet_size, 32) -
c_packet_size)))
sym_packet.update_packet_size(c_packet_size + sym_size)
return new_pos
if field.name != '$valid$':
header_size += BitVecVal(field.size, 32)
assert False
for name, field in extract_header.fields.items():
# XXX: deal with valid flags
if field.name != '$valid$':
if field.var_length:
# This messes up the packet size somewhat
field_val = sym_packet.extract(
new_pos + extract_offset, field.size)
ones = BitVecVal(-1, field.size)
assert ones.size() >= sym_size.size()
field_size_c = BitVecVal(field.size, sym_size.size())
ones, shift_bits = self.equalize_bv_size(
[ones, field_size_c - sym_size])
context.insert(field,
field_val & (LShR(ones, shift_bits)))
constraints.append(ULE(sym_size, field_size_c))
extract_offset += sym_size
else:
context.insert(field,
sym_packet.extract(
new_pos + extract_offset,
field.size))
extract_offset += BitVecVal(field.size, 32)
else:
# Even though the P4_16 isValid() method
# returns a boolean value, it appears that
# when p4c-bm2-ss compiles expressions like
# "if (ipv4.isValid())" into a JSON file, it
# compares the "ipv4.$valid$" field to a bit
# vector value of 1 with the == operator, thus
# effectively treating the "ipv4.$valid$" as
# if it is a bit<1> type.
context.insert(field, BitVecVal(1, 1))
return new_pos + extract_offset
elif op == p4_parser_ops_enum.verify:
expected_result = BoolVal(False) if fail != '' else BoolVal(True)
sym_cond = self.type_value_to_smt(context, parser_op.value[0],
sym_packet, pos)
constraints.append(sym_cond == expected_result)
return new_pos
elif op == p4_parser_ops_enum.primitive:
primitive = parser_op.value[0]
# XXX: merge with action_to_smt
if primitive.op == 'add_header':
header_name = primitive.parameters[0].header_name
context.set_field_value(header_name, '$valid$', BitVecVal(
1, 1))
return new_pos
else:
raise Exception(
'Primitive not supported: {}'.format(primitive.op))
logging.warning('Primitive not supported')
else:
raise Exception('Parser op not supported: {}'.format(op))
def action_to_smt(self, context, table_name, action):
# XXX: This will not work if an action is used multiple times
# XXX: Need a way to access the model for those parameters
# Create symbolic values for the runtime data (parameters for actions)
for i, runtime_param in enumerate(action.runtime_data):
context.register_runtime_data(table_name, action.name,
runtime_param.name,
runtime_param.bitwidth)
for primitive in action.primitives:
context.set_source_info(primitive.source_info)
# In Apr 2017, p4c and behavioral-model added primitives
# "assign", "assign_VL" (for assigning variable length
# 'varbit' fields), and "assign_header" primitives. I believe
# that "assign" is either identical to "modify_field", or very
# very close. See
# https://github.com/p4lang/behavioral-model/pull/330
if primitive.op in ['modify_field', 'assign']:
value = self.type_value_to_smt(context,
primitive.parameters[1])
field = primitive.parameters[0]
fld_info = self.hlir.headers[field.header_name].fields[
field.header_field]
dest_size = fld_info.size
if dest_size != value.size():
if Config().get_debug():
logging.debug(
"primitive op '%s' lhs/rhs width mismatch"
" (%d != %d bits) lhs %s source_info %s"
"" % (primitive.op, dest_size, value.size(), field,
primitive.source_info))
logging.debug(" value %s" % (value))
if dest_size > value.size():
value = ZeroExt(dest_size - value.size(), value)
else:
value = Extract(dest_size - 1, 0, value)
context.set_field_value(field.header_name, field.header_field,
value)
elif primitive.op == 'drop' or primitive.op == 'mark_to_drop':
# Dropping the packet does not modify the context. However we
# should eventually adapt the expected path.
context.set_field_value('standard_metadata', 'egress_spec',
BitVecVal(511, 9))
context.set_field_value('standard_metadata', 'mcast_grp',
BitVecVal(0, 16))
pass
elif primitive.op == 'add_header':
header_name = primitive.parameters[0].header_name
context.set_field_value(header_name, '$valid$', BitVecVal(
1, 1))
elif primitive.op == 'remove_header':
header_name = primitive.parameters[0].header_name
context.set_field_value(header_name, '$valid$', BitVecVal(
0, 1))
context.remove_header_fields(header_name)
elif primitive.op == 'assign_header_stack':
header_stack_src = self.hlir.get_header_stack(
primitive.parameters[1].header_stack_name)
header_stack_dst = self.hlir.get_header_stack(
primitive.parameters[0].header_stack_name)
header_stack_t = self.hlir.get_header_type(
header_stack_src.header_type_name)
for i in range(header_stack_src.size):
src_valid = simplify(
context.get_header_field('{}[{}]'.format(
header_stack_src.name, i), '$valid$'))
context.set_field_value('{}[{}]'.format(
header_stack_dst.name, i), '$valid$', src_valid)
if src_valid == BitVecVal(1, 1):
for field_name, field in header_stack_t.fields.items():
val = context.get_header_field(
'{}[{}]'.format(header_stack_src.name, i),
field.name)
context.set_field_value('{}[{}]'.format(
header_stack_dst.name, i), field.name, val)
else:
dst_name = '{}[{}]'.format(header_stack_dst.name, i)
context.set_field_value(dst_name, '$valid$', BitVecVal(0, 1))
context.remove_header_fields(dst_name)
elif primitive.op == 'pop':
assert isinstance(primitive.parameters[0], TypeValueHeaderStack)
assert isinstance(primitive.parameters[1], TypeValueHexstr)
header_stack_name = primitive.parameters[0].header_stack_name
header_stack = self.hlir.get_header_stack(header_stack_name)
header_stack_t = self.hlir.get_header_type(header_stack.header_type_name)
pop_n = primitive.parameters[1].value
for i in range(pop_n, header_stack.size):
j = i - pop_n
src_name = '{}[{}]'.format(header_stack_name, i)
dst_name = '{}[{}]'.format(header_stack_name, j)
src_valid = simplify(context.get_header_field(src_name, '$valid$'))
if src_valid == BitVecVal(1, 1):
for field_name, field in header_stack_t.fields.items():
val = context.get_header_field(
src_name,
field.name)
context.set_field_value(dst_name, field.name, val)
else:
context.set_field_value(dst_name, '$valid$', BitVecVal(0, 1))
context.remove_header_fields(dst_name)
for i in range(header_stack.size - pop_n, header_stack.size):
dst_name = '{}[{}]'.format(header_stack_name, i)
context.set_field_value(dst_name, '$valid$', BitVecVal(0, 1))
context.remove_header_fields(dst_name)
elif primitive.op == 'push':
assert isinstance(primitive.parameters[0], TypeValueHeaderStack)
assert isinstance(primitive.parameters[1], TypeValueHexstr)
header_stack_name = primitive.parameters[0].header_stack_name
header_stack = self.hlir.get_header_stack(header_stack_name)
header_stack_t = self.hlir.get_header_type(header_stack.header_type_name)
push_n = primitive.parameters[1].value
for i in range(header_stack.size - 1, push_n - 1, -1):
j = i - push_n
src_name = '{}[{}]'.format(header_stack_name, j)
dst_name = '{}[{}]'.format(header_stack_name, i)
src_valid = simplify(context.get_header_field(src_name, '$valid$'))
if src_valid == BitVecVal(1, 1):
for field_name, field in header_stack_t.fields.items():
val = context.get_header_field(
src_name,
field.name)
context.set_field_value(dst_name, field.name, val)
else:
context.set_field_value(dst_name, '$valid$', BitVecVal(0, 1))
context.remove_header_fields(dst_name)
for i in range(0, push_n):
dst_name = '{}[{}]'.format(header_stack_name, i)
context.set_field_value(dst_name, '$valid$', BitVecVal(0, 1))
context.remove_header_fields(dst_name)
elif (primitive.op in [
'modify_field_rng_uniform',
'modify_field_with_hash_based_offset',
'clone_ingress_pkt_to_egress',
'clone_egress_pkt_to_egress', 'count', 'execute_meter',
'generate_digest'
] and Config().get_allow_unimplemented_primitives()):
logging.warning('Primitive op {} allowed but treated as no-op'.
format(primitive.op))
else:
raise Exception(
'Primitive op {} not supported'.format(primitive.op))
context.unset_source_info()
context.remove_runtime_data()
def table_set_default_cmd_string(self, table, action, params):
return ('{} {} {}'.format(table, action,
' '.join([str(x) for x in params])))
def table_add_cmd_string(self, table, action, values, params, priority):
priority_str = ""
if priority:
priority_str = " %d" % (priority)
return ('{} {} {} => {}{}'.format(table, action, ' '.join(values),
' '.join([str(x) for x in params]),
priority_str))
def parser_transition_key_constraint(self, sym_transition_keys, value,
mask):
# value should be int or long
# mask should be int, long, or None
# In the JSON file, if there are multiple fields in the
# transition_key, then the values are packed in a particular
# manner -- each transition_key is separately rounded up to a
# multiple of 8 bits wide, and its value is packed into the value
# as that width, with most significant 0 bits for padding, if
# needed.
#
# See https://github.com/p4lang/behavioral-model/issues/441 for a
# reference to the relevant part of the behavioral-model JSON
# spec.
assert isinstance(value, int) or isinstance(value, long)
assert isinstance(mask, int) or isinstance(mask, long) or mask is None
assert len(sym_transition_keys) >= 1
bitvecs = []
sz_total = 0
for k in sym_transition_keys:
sz_bits = k.size()
sz_bytes = (sz_bits + 7) / 8
sz_total += 8 * sz_bytes
bitvecs.append(ZeroExt(8 * sz_bytes - sz_bits, k))
bv_value = BitVecVal(value, sz_total)
bv_mask = BitVecVal(mask if mask is not None else -1, sz_total)
logging.debug(
"bitvecs {} value {} mask {}".format(bitvecs, bv_value, bv_mask))
if len(sym_transition_keys) > 1:
constraint = (Concat(bitvecs) & bv_mask) == (bv_value & bv_mask)
else:
constraint = (bitvecs[0] & bv_mask) == (bv_value & bv_mask)
return constraint
def init_context(self):
assert len(self.context_history) == 1
assert len(self.result_history) == 1
context = Context()
# Register the fields of all headers in the context
for header_name, header in self.hlir.headers.items():
for field_name, field in header.fields.items():
if field_name == '$valid$':
# All valid bits in headers are 0 in the beginning
context.insert(field, BitVecVal(0, 1))
else:
context.register_field(field)
for stack_name, stack in self.hlir.header_stacks.items():
for i in range(stack.size):
context.set_field_value('{}[{}]'.format(stack_name, i),
'$valid$', BitVecVal(0, 1))
# XXX: refactor
context.set_field_value('standard_metadata', 'ingress_port',
BitVec('$ingress_port$', 9))
context.set_field_value('standard_metadata', 'packet_length',
self.sym_packet.get_sym_packet_size())
context.set_field_value('standard_metadata', 'instance_type',
BitVec('$instance_type$', 32))
context.set_field_value('standard_metadata', 'egress_spec',
BitVecVal(0, 9))
self.context_history[0] = context
self.result_history[0] = []
def generate_parser_constraints(self, parser_path):
parser_constraints_gen_timer = Timer('parser_constraints_gen')
parser_constraints_gen_timer.start()
if Config().get_incremental():
self.solver.pop()
self.solver.push()
self.sym_packet = Packet()
self.init_context()
constraints = []
# XXX: make this work for multiple parsers
parser = self.hlir.parsers['parser']
pos = BitVecVal(0, 32)
logging.info('path = {}'.format(' -> '.join(
[str(n) for n in list(parser_path)])))
for path_transition in parser_path:
assert isinstance(path_transition, ParserTransition) or isinstance(
path_transition, ParserOpTransition)
node = path_transition.src
next_node = path_transition.dst
logging.debug('{} -> {}\tpos = {}'.format(node, next_node, pos))
new_pos = pos
parse_state = parser.parse_states[node]
skip_select = False
for op_idx, parser_op in enumerate(parse_state.parser_ops):
fail = ''
if isinstance(
path_transition, ParserOpTransition
) and op_idx == path_transition.op_idx and path_transition.next_state == 'sink':
fail = path_transition.error_str
skip_select = True
new_pos = self.parser_op_to_smt(
self.current_context(), self.sym_packet, parser_op, fail,
pos, new_pos, constraints)
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Provides managers specific to SSI / Trust Triangle roles.
AgentConnectionManager (ACM) is a based on PySyft's DuetCredentialExchanger Class. The class helps to manage aries
agents, send messages, and establish aries and duet connections. Specifically, active aries connections are
used to establish duet connections. The subclasses (RelyingParty, CredentialHolder, IssuingAuthority) have
functionalities that are specific to their roles in the trust triangle (e.g., only IssuingAuthority can issue
verifiable credentials).
Note: there are two types of connections
(1) Aries Connections (via ACA-PY agents) to send messages and exchange verifiable credentials
(2) Duet Connections (via PySyft's Duet) to exchange data and host an encrypted database
The Aries Connections are established by manually exchanging an invitation (e.g., QR-code or json posted online or
sent via E-Mail). Then, messages are sent via the Aries Connection to establish Duet Connections.
"""
# Standard libraries and 3rd party packages
import ast
import asyncio
import json
import time
from typing import Dict as TypeDict
from typing import Optional
from typing import Tuple
from typing import Union
import nest_asyncio
import requests
# from libs.aries_basic_controller import AriesAgentController
from aries_cloudcontroller import AriesAgentController
from pprintpp import pprint
from syft.grid.duet.exchange_ids import DuetCredentialExchanger
# local sources
from .connection import Connection
from .helpers import *
from .message import Message
nest_asyncio.apply()
class AgentConnectionManager(DuetCredentialExchanger): # dce
def __init__(self, agent_controller: AriesAgentController) -> None:
"""
Initialize the AgentConnectionManager (ACM). The class builds on top of the DuetCredentialExchanger
(see https://github.com/OpenMined/PySyft/blob/7049ca017cf26074518c02d4891283c6e1101df5/packages/syft/src/syft/grid/duet/exchange_ids.py),
which is defined by the PySyft package. A DuetCredentialExchanger allows to exchange Duet Tokens
to initiate a Duet connection.
Args:
agent_controller:
"""
super().__init__() # Initiate DuetCredentialExchanger
self.agent_controller = agent_controller # For aries agent
self.agent_listeners = [
{"topic": "connections", "handler": self._connections_handler},
{"topic": "basicmessages", "handler": self._messages_handler},
]
self.connections: TypeDict = {} # Dict of connections established with agent_controller {connection_id : Connection}
self.messages: [Message] = [] # List of messages agent_controller received
self.role: Optional[str] = None # Role of agent controller (e.g., RelyingParty)
self.duet_connection_id: Optional[str] = None # ID of connection through which to establish a Duet connection
def run(self, credential: str = "") -> Optional[str]:
"""
Default function required for any subclass of DuetCredentialExchanger. Defines what credential_exchanger (i.e.,
agent_controller) should do when they initiate or join a Duet connection. Uses the connection_id previously
set as self.duet_connection_id
Args:
credential: duet token obtained from the Duet network
(see https://github.com/OpenMined/PySyft/blob/f4717d2944593460df9b431e9143c1d1208dc45d/packages/syft/src/syft/grid/duet/__init__.py)
Returns: responder_id (duet token of duet partner who initiated the duet connection)
OR client_id (duet token of duet partner who is joining the duet connection)
"""
# Get duet_connection and set duet_token to duet token (self.duet_connection_id is set in agents' notebooks beforehand)
self._update_connection(connection_id=self.duet_connection_id, token=credential)
# Process if agent is joining the duet connection:
if self.join:
self._duet_invitee_exchange(credential=credential)
return self.responder_id
# Process if agent is initiating the duet connection:
else:
client_id = self._duet_inviter_exchange(credential=credential)
return client_id
def get_duet_connection(self) -> Connection:
"""
Gets Aries connection over which a Duet connection is being established
Returns: Connection
"""
return self.get_connection(self.duet_connection_id)
def get_duet_connections(self) -> [Connection]:
"""
Get all Aries Connections thorugh which a Duet Connection is established
Returns: list of Connections
"""
return [c for _id, c in self.connections.items() if c.is_duet_connection is True]
def _duet_inviter_exchange(self, credential: str) -> str:
"""
Proceed to initiate Duet connection as an inviter: (1) send credential (i.e., duet_token) to duet partner and
(2) await the duet token of the joining duet partner
Args:
credential: duet token of the agent herself
Returns: duet_token_partner is the duet token of the duet partner
"""
# Get duet connection
duet_conn = self.get_duet_connection()
# Send credential (i.e., duet token) to the joining duet partner
self._send_duet_token(credential, 1, duet_conn)
# Await the response of the duet partner (i.e., another duet token)
token_partner = self._await_partner_duet_token(2, duet_conn)
return token_partner
def _duet_invitee_exchange(self, credential: str) -> None:
"""
Proceed to join a Duet connection as an invitee: (1) Await duet token of inviting partner, (2) reset responder
ID (because otherwise it is only set as ""), and send duet token to the inviting party.
Args:
credential: duet token of invitee
Returns: -
"""
# Get duet connection
duet_conn = self.get_duet_connection()
token_partner = duet_conn.duet_token_partner
# Await duet_token_partner if the inviting duet partner has not yet sent a duet token,
# or Future() is already initiated
if token_partner is None or token_partner is asyncio.Future():
token_partner = self._await_partner_duet_token(1, duet_conn)
# Else print that a duet token was already received
else:
print("\n♫♫♫ >", colored("STEP 1:", attrs=["bold"]), "Obtained Duet Token {c}".format(c=token_partner))
print("♫♫♫ > from Duet Partner {n}".format(n=duet_conn.connection_with))
print("♫♫♫ > via Connection ID {cid}".format(cid=duet_conn.connection_id))
# Reset responder_id (of DuetCredentialExchanger) to the duet token obtained by the partner -> relevant for
# the proper functionality of the DuetCredentialExchanger
self.set_responder_id(token_partner)
# Send duet token to initiating duet partner
self._send_duet_token(credential, 2, duet_conn)
print("\n♫♫♫ > ...waiting for partner to connect...")
def _send_duet_token(self, credential: str, step: int, duet_conn: Connection) -> None:
"""
Send duet token to partner and print information
Args:
credential: duet token that should be sent
step: step number (so internal function can be used in different situations)
duet_conn: Aries connection over which a Duet Connection is established
Returns: -
"""
# Send duet token to duet partner
print("\n♫♫♫ >", colored("STEP {n}:".format(n=str(step)), attrs=["bold"]),
"Sending Duet Token {c}".format(c=credential))
print("♫♫♫ > to Duet Partner {n}".format(n=duet_conn.connection_with))
print("♫♫♫ > via Connection ID {cid}".format(cid=self.duet_connection_id))
self.send_message(self.duet_connection_id, "Duet Token : {c}".format(c=credential), duet_print=True)
def _await_partner_duet_token(self, step: int, duet_conn: Connection) -> str:
"""
Await duet token from partner and print information
Args:
credential: duet token that should be sent
step: step number to print function call as correct step
duet_conn: Aries connection over which a Duet Connection is established
Returns: -
"""
# Set Duet Token to asyncio.Future() (i.e. we are awaiting a result) and wait until it is set
print("\n♫♫♫ >", colored("STEP {n}:".format(n=str(step)), attrs=["bold"]),
"Awaiting Duet Token from Duet Partner...")
if duet_conn.duet_token_partner is None:
self._update_connection(connection_id=duet_conn.connection_id, token_partner=asyncio.Future())
# Wait until duet_token_partner is set a Future() with status "Finished"
loop = asyncio.get_event_loop()
duet_token_partner = loop.run_until_complete(duet_conn.duet_token_partner)
# Print duet_token_partner info and return
print("\n♫♫♫ >", colored("DONE!", COLOR_SUCCESS, attrs=["bold"]), "Partner's Duet Token:",
str(duet_token_partner))
return str(duet_token_partner)
def get_connection(self, connection_id: str) -> Optional[Connection]:
"""
Get connection by connection_id
Returns: Connection (if it exists) or None
"""
for _id, connection in self.connections.items():
if _id == connection_id:
return connection
return None
def get_connections(self) -> list[Optional[Connection]]:
"""
Returns: Get all connections of the agent
"""
return list(self.connections.values())
def get_active_connections(self) -> list[Optional[Connection]]:
"""
Get all connections where Connection.is_active = True
Returns: list of active connections
"""
return [c for _id, c in self.connections.items() if c.is_active is True]
def get_connection_id(self, agent_name: str) -> list[Optional[Connection]]:
"""
Returns list of connection IDs with a particular agent
Args:
agent_name: name of agent with whom the connection is shared
Returns: list of connection ids shared with agent_name
"""
connection_ids = [_id for _id, c in self.connections.items() if c.connection_with == agent_name]
return connection_ids
def _update_connection(self,
connection_id: str,
auto_accept: Optional[bool] = None,
auto_ping: Optional[bool] = None,
alias: Optional[str] = None,
connection_with: Optional[str] = None,
is_active: Optional[bool] = None,
is_duet_connection: Optional[bool] = None,
token_partner: Optional[str] = None,
token: Optional[str] = None,
reset_duet: bool = False
) -> Connection:
"""
Verify if connection_id exists already. If yes, update and return it.
Else, add it to self.connections, configure it, and return it.
Args:
connection_id: connection_id
auto_accept: whether connection is auto_accepted or not
auto_ping: whether connection should be auto_pinged or not
alias: whether connection has an alias or not
Returns: Connection (either new or updated)
"""
# Get conn. If conn does not yet exist, this will return None
conn = self.get_connection(connection_id)
# Else create a new conn
if conn is None:
conn = Connection(connection_id)
# Update variables of conn
if auto_accept is not None:
conn.auto_accept = auto_accept
if auto_ping is not None:
conn.auto_ping = auto_ping
if alias is not None:
conn.alias = alias
if is_active is not None:
conn.is_active = is_active
if connection_with is not None:
conn.connection_with = connection_with
if is_duet_connection is not None:
conn.is_duet_connection = is_duet_connection
self.duet_connection_id = connection_id
update_future = False
# Reset all duet configurations if reset
if reset_duet is True:
self.duet_connection_id = None if is_duet_connection is None else connection_id
conn.is_duet_connection = False if is_duet_connection is None else is_duet_connection
| |
1.2.3.4
iso:
description: Name of ISO the instance was deployed with.
returned: if available
type: str
sample: Debian-8-64bit
template:
description: Name of template the instance was deployed with.
returned: success
type: str
sample: Linux Debian 9 64-bit
template_display_text:
description: Display text of template the instance was deployed with.
returned: success
type: str
sample: Linux Debian 9 64-bit 200G Disk (2017-10-08-622866)
version_added: '2.6'
service_offering:
description: Name of the service offering the instance has.
returned: success
type: str
sample: 2cpu_2gb
zone:
description: Name of zone the instance is in.
returned: success
type: str
sample: ch-gva-2
state:
description: State of the instance.
returned: success
type: str
sample: Running
security_groups:
description: Security groups the instance is in.
returned: success
type: list
sample: '[ "default" ]'
affinity_groups:
description: Affinity groups the instance is in.
returned: success
type: list
sample: '[ "webservers" ]'
tags:
description: List of resource tags associated with the instance.
returned: success
type: list
sample: '[ { "key": "foo", "value": "bar" } ]'
hypervisor:
description: Hypervisor related to this instance.
returned: success
type: str
sample: KVM
host:
description: Hostname of hypervisor an instance is running on.
returned: success and instance is running
type: str
sample: host-01.example.com
version_added: '2.6'
instance_name:
description: Internal name of the instance (ROOT admin only).
returned: success
type: str
sample: i-44-3992-VM
user-data:
description: Optional data sent to the instance.
returned: success
type: str
sample: VXNlciBkYXRhIGV4YW1wbGUK
'''
import base64
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_bytes, to_text
from ansible.module_utils.cloudstack import (
AnsibleCloudStack,
cs_argument_spec,
cs_required_together
)
class AnsibleCloudStackInstance(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackInstance, self).__init__(module)
self.returns = {
'group': 'group',
'hypervisor': 'hypervisor',
'instancename': 'instance_name',
'publicip': 'public_ip',
'passwordenabled': '<PASSWORD>',
'password': 'password',
'serviceofferingname': 'service_offering',
'isoname': 'iso',
'templatename': 'template',
'templatedisplaytext': 'template_display_text',
'keypair': 'ssh_key',
'hostname': 'host',
}
self.instance = None
self.template = None
self.iso = None
def get_service_offering_id(self):
service_offering = self.module.params.get('service_offering')
service_offerings = self.query_api('listServiceOfferings')
if service_offerings:
if not service_offering:
return service_offerings['serviceoffering'][0]['id']
for s in service_offerings['serviceoffering']:
if service_offering in [s['name'], s['id']]:
return s['id']
self.fail_json(msg="Service offering '%s' not found" % service_offering)
def get_host_id(self):
host_name = self.module.params.get('host')
if not host_name:
return None
args = {
'type': 'routing',
'zoneid': self.get_zone(key='id'),
}
hosts = self.query_api('listHosts', **args)
if hosts:
for h in hosts['host']:
if h['name'] == host_name:
return h['id']
self.fail_json(msg="Host '%s' not found" % host_name)
def get_template_or_iso(self, key=None):
template = self.module.params.get('template')
iso = self.module.params.get('iso')
if not template and not iso:
return None
args = {
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
'projectid': self.get_project(key='id'),
'zoneid': self.get_zone(key='id'),
'isrecursive': True,
'fetch_list': True,
}
if template:
if self.template:
return self._get_by_key(key, self.template)
rootdisksize = self.module.params.get('root_disk_size')
args['templatefilter'] = self.module.params.get('template_filter')
args['fetch_list'] = True
templates = self.query_api('listTemplates', **args)
if templates:
for t in templates:
if template in [t['displaytext'], t['name'], t['id']]:
if rootdisksize and t['size'] > rootdisksize * 1024 ** 3:
continue
self.template = t
return self._get_by_key(key, self.template)
if rootdisksize:
more_info = " (with size <= %s)" % rootdisksize
else:
more_info = ""
self.module.fail_json(msg="Template '%s' not found%s" % (template, more_info))
elif iso:
if self.iso:
return self._get_by_key(key, self.iso)
args['isofilter'] = self.module.params.get('template_filter')
args['fetch_list'] = True
isos = self.query_api('listIsos', **args)
if isos:
for i in isos:
if iso in [i['displaytext'], i['name'], i['id']]:
self.iso = i
return self._get_by_key(key, self.iso)
self.module.fail_json(msg="ISO '%s' not found" % iso)
def get_instance(self):
instance = self.instance
if not instance:
instance_name = self.get_or_fallback('name', 'display_name')
args = {
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
'projectid': self.get_project(key='id'),
'fetch_list': True,
}
# Do not pass zoneid, as the instance name must be unique across zones.
instances = self.query_api('listVirtualMachines', **args)
if instances:
for v in instances:
if instance_name.lower() in [v['name'].lower(), v['displayname'].lower(), v['id']]:
self.instance = v
break
return self.instance
def _get_instance_user_data(self, instance):
# Query the user data if we need to
if 'userdata' in instance:
return instance['userdata']
user_data = ""
if self.get_user_data() is not None and instance.get('id'):
res = self.query_api('getVirtualMachineUserData', virtualmachineid=instance['id'])
user_data = res['virtualmachineuserdata'].get('userdata', "")
return user_data
def get_iptonetwork_mappings(self):
network_mappings = self.module.params.get('ip_to_networks')
if network_mappings is None:
return
if network_mappings and self.module.params.get('networks'):
self.module.fail_json(msg="networks and ip_to_networks are mutually exclusive.")
network_names = [n['network'] for n in network_mappings]
ids = self.get_network_ids(network_names)
res = []
for i, data in enumerate(network_mappings):
res.append({'networkid': ids[i], 'ip': data['ip']})
return res
def get_ssh_keypair(self, key=None, name=None, fail_on_missing=True):
ssh_key_name = name or self.module.params.get('ssh_key')
if ssh_key_name is None:
return
args = {
'domainid': self.get_domain('id'),
'account': self.get_account('name'),
'projectid': self.get_project('id'),
'name': ssh_key_name,
}
ssh_key_pairs = self.query_api('listSSHKeyPairs', **args)
if 'sshkeypair' in ssh_key_pairs:
return self._get_by_key(key=key, my_dict=ssh_key_pairs['sshkeypair'][0])
elif fail_on_missing:
self.module.fail_json(msg="SSH key not found: %s" % ssh_key_name)
def ssh_key_has_changed(self):
ssh_key_name = self.module.params.get('ssh_key')
if ssh_key_name is None:
return False
# Fails if keypair for param is inexistent
param_ssh_key_fp = self.get_ssh_keypair(key='fingerprint')
# CloudStack 4.5 does return keypair on instance for a non existent key.
instance_ssh_key_name = self.instance.get('keypair')
if instance_ssh_key_name is None:
return True
# Get fingerprint for keypair of instance but do not fail if inexistent.
instance_ssh_key_fp = self.get_ssh_keypair(key='fingerprint', name=instance_ssh_key_name, fail_on_missing=False)
if not instance_ssh_key_fp:
return True
# Compare fingerprints to ensure the keypair changed
if instance_ssh_key_fp != param_ssh_key_fp:
return True
return False
def security_groups_has_changed(self):
security_groups = self.module.params.get('security_groups')
if security_groups is None:
return False
security_groups = [s.lower() for s in security_groups]
instance_security_groups = self.instance.get('securitygroup') or []
instance_security_group_names = []
for instance_security_group in instance_security_groups:
if instance_security_group['name'].lower() not in security_groups:
return True
else:
instance_security_group_names.append(instance_security_group['name'].lower())
for security_group in security_groups:
if security_group not in instance_security_group_names:
return True
return False
def get_network_ids(self, network_names=None):
if network_names is None:
network_names = self.module.params.get('networks')
if not network_names:
return None
args = {
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
'projectid': self.get_project(key='id'),
'zoneid': self.get_zone(key='id'),
'fetch_list': True,
}
networks = self.query_api('listNetworks', **args)
if not networks:
self.module.fail_json(msg="No networks available")
network_ids = []
network_displaytexts = []
for network_name in network_names:
for n in networks:
if network_name in [n['displaytext'], n['name'], n['id']]:
network_ids.append(n['id'])
network_displaytexts.append(n['name'])
break
if len(network_ids) != len(network_names):
self.module.fail_json(msg="Could not find all networks, networks list found: %s" % network_displaytexts)
return network_ids
def present_instance(self, start_vm=True):
instance = self.get_instance()
if not instance:
instance = self.deploy_instance(start_vm=start_vm)
else:
instance = self.recover_instance(instance=instance)
instance = self.update_instance(instance=instance, start_vm=start_vm)
# In check mode, we do not necessarily have an instance
if instance:
instance = self.ensure_tags(resource=instance, resource_type='UserVm')
# refresh instance data
self.instance = instance
return instance
def get_user_data(self):
user_data = self.module.params.get('user_data')
if user_data is not None:
user_data = to_text(base64.b64encode(to_bytes(user_data)))
return user_data
def get_details(self):
details = self.module.params.get('details')
cpu = self.module.params.get('cpu')
cpu_speed = self.module.params.get('cpu_speed')
memory = self.module.params.get('memory')
if all([cpu, cpu_speed, memory]):
details.extends({
'cpuNumber': cpu,
'cpuSpeed': cpu_speed,
'memory': memory,
})
return details
def deploy_instance(self, start_vm=True):
self.result['changed'] = True
networkids = self.get_network_ids()
if networkids is not None:
networkids = ','.join(networkids)
args = {}
args['templateid'] = self.get_template_or_iso(key='id')
if not args['templateid']:
self.module.fail_json(msg="Template or ISO is required.")
args['zoneid'] = self.get_zone(key='id')
args['serviceofferingid'] = self.get_service_offering_id()
args['account'] = self.get_account(key='name')
args['domainid'] = self.get_domain(key='id')
args['projectid'] = self.get_project(key='id')
args['diskofferingid'] = self.get_disk_offering(key='id')
args['networkids'] = networkids
args['iptonetworklist'] = self.get_iptonetwork_mappings()
args['userdata'] = self.get_user_data()
args['keyboard'] = self.module.params.get('keyboard')
args['ipaddress'] = self.module.params.get('ip_address')
args['ip6address'] = self.module.params.get('ip6_address')
args['name'] = self.module.params.get('name')
args['displayname'] = self.get_or_fallback('display_name', 'name')
args['group'] = self.module.params.get('group')
args['keypair'] = self.get_ssh_keypair(key='name')
args['size'] = self.module.params.get('disk_size')
args['startvm'] = start_vm
args['rootdisksize'] = self.module.params.get('root_disk_size')
args['affinitygroupnames'] = self.module.params.get('affinity_groups')
args['details'] = self.get_details()
args['securitygroupnames'] = self.module.params.get('security_groups')
args['hostid'] = self.get_host_id()
template_iso = self.get_template_or_iso()
if 'hypervisor' not in template_iso:
args['hypervisor'] = self.get_hypervisor()
instance = None
if not self.module.check_mode:
instance = self.query_api('deployVirtualMachine', **args)
poll_async = self.module.params.get('poll_async')
if poll_async:
instance = self.poll_job(instance, 'virtualmachine')
return instance
def update_instance(self, instance, start_vm=True):
# Service offering data
args_service_offering = {
'id': instance['id'],
}
if self.module.params.get('service_offering'):
args_service_offering['serviceofferingid'] = self.get_service_offering_id()
service_offering_changed = self.has_changed(args_service_offering, instance)
# Instance data
args_instance_update = {
'id': instance['id'],
'userdata': self.get_user_data(),
}
instance['userdata'] = self._get_instance_user_data(instance)
args_instance_update['ostypeid'] = self.get_os_type(key='id')
if self.module.params.get('group'):
args_instance_update['group'] = self.module.params.get('group')
if self.module.params.get('display_name'):
args_instance_update['displayname'] = self.module.params.get('display_name')
instance_changed = self.has_changed(args_instance_update, instance)
ssh_key_changed = self.ssh_key_has_changed()
security_groups_changed = self.security_groups_has_changed()
# Volume data
args_volume_update = {}
root_disk_size = self.module.params.get('root_disk_size')
root_disk_size_changed = False
if root_disk_size is not None:
res = self.query_api('listVolumes', type='ROOT', virtualmachineid=instance['id'])
[volume] = res['volume']
size = volume['size'] >> 30
args_volume_update['id'] = volume['id']
args_volume_update['size'] = root_disk_size
shrinkok = self.module.params.get('allow_root_disk_shrink')
if shrinkok:
args_volume_update['shrinkok'] = shrinkok
root_disk_size_changed = root_disk_size != size
changed = [
service_offering_changed,
instance_changed,
security_groups_changed,
ssh_key_changed,
root_disk_size_changed,
]
if any(changed):
force = self.module.params.get('force')
instance_state = instance['state'].lower()
if instance_state == 'stopped' or force:
self.result['changed'] = True
if not self.module.check_mode:
# Ensure VM has stopped
instance = self.stop_instance()
instance = self.poll_job(instance, 'virtualmachine')
self.instance = instance
# Change service offering
if service_offering_changed:
res = self.query_api('changeServiceForVirtualMachine', **args_service_offering)
instance = res['virtualmachine']
self.instance = | |
<filename>src/data_manager.py
import random
import pandas as pd
import numpy as np
import torch
from torch.utils.data import Dataset
class PytorchDataset(Dataset):
"""Dataset to import augmented data."""
def __init__(self, df, init_transform, getitem_transform=None):
"""
:param df Dataframe containing data, must have "concepts" and "tokens" columns, every entry of such a column
is a list of strings, entries for the same sample must have the same length, given that they are representing
the same sentence, either using tokens or concepts.
:param init_transform: Transform function to be used on data points at import time.
:param getitem_transform: Transform function to be used on data points when they are retrieved with __getitem__.
"""
self.init_transform = init_transform
self.getitem_transform = getitem_transform
# transform and save data
self.data = dict()
for i in range(len(df)):
sample = df.iloc[i, :]
self.data[i] = self.init_transform(sample)
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
return self.getitem_transform(self.data[idx]) if self.getitem_transform is not None else self.data[idx]
def w2v_matrix_vocab_generator(w2v_pickle):
"""
Creates the w2v dict mapping word to index and a numpy matrix of (num words, size of embedding), words will
be mapped to their index, such that row ith will be the embedding of the word mapped to the i index.
:param w2v_pickle: Dataframe containing token and vector columns, where token is a string and vector is the
embedding vector, each vector must have the same length (and the length must be equal to the argument embedding_dim).
:return: A dict, np matrix pair, the dict maps words to indexes, the matrix ith row will contain the embedding
of the word mapped to the ith index.
"""
# create internal w2v dictionary
w2v_df = pd.read_pickle(w2v_pickle)
w2v = dict()
embedding_dim = len(w2v_df.iloc[0, 1])
# shape +2 for unknown and padding tokens
w2v_weights = np.zeros(shape=(w2v_df.shape[0] + 2, embedding_dim))
for index, data_point in w2v_df.iterrows():
curr_index = len(w2v)
w2v[data_point["token"]] = curr_index
w2v_weights[curr_index, :] = np.array(data_point["vector"])
w2v["<UNK>"] = len(w2v_weights) - 2
w2v["<padding>"] = len(w2v_weights) - 1
return w2v, w2v_weights
class Data(object):
"""
Class to contain data, once initialized it is organized in this way:
- properties that allow you to get counter maps for words, lemmas, pos tags, concepts, and pairs
- properties that allow you to get words, lemmas, pos tags, concepts lexicon
- methods that allow you to get the counter for a specific word, lemma, pos tags, concept or pair
note: this class is something i copy pasted from an old project of mine, not really that useful in here, mostly used
for the wfst script.
"""
def __init__(self, file):
"""
Inits the structure, importing the file
and elaborating all the counts.
:param file: File to pass, format should be
token lemma pos concept for each line, separated
by an empty line to signal the end of a phrase.
"""
self._data = [] # list of phrases, each phrase is a list of data points (word lemma pos concept)
with open(file, 'r') as file:
phrase = []
for line in file:
split = line.split()
if len(split) > 0:
# keep building current phrase
phrase.append(split)
else:
# end of phrase, append it to data
self._data.append(phrase)
phrase = []
"""
init and compute counters
"""
# singletons
self.__words_counter = dict()
self.__concepts_counter = dict()
self.__concepts_clean_counter = dict() # concepts without IOB notation
# pairs of stuff
self.__word_concept_counter = dict()
for phrase in self._data:
for data_point in phrase:
word, concept = data_point
# singletons
self.__words_counter[word] = 1 + self.__words_counter.get(word, 0)
self.__concepts_counter[concept] = 1 + self.__concepts_counter.get(concept, 0)
clean_c = concept if concept == "O" else concept[2:]
self.__concepts_clean_counter[clean_c] = 1 + self.__concepts_clean_counter.get(clean_c, 0)
# pairs of stuff
self.__word_concept_counter[word + " " + concept] = 1 + self.__word_concept_counter.get(
word + " " + concept, 0)
@property
def size(self):
"""
:return: Number of phrases stored.
"""
return len(self._data)
@property
def counter_words(self):
"""
:return: Dictionary that maps a word to its counter.
"""
return self.__words_counter
@property
def counter_concepts(self):
"""
:return: Dictionary that maps a concept to its counter.
"""
return self.__concepts_counter
@property
def counter_clean_concepts(self):
"""
:return: Dictionary that maps a concept (no IOB) to its counter.
"""
return self.__concepts_clean_counter
@property
def counter_word_concept(self):
"""
:return: Dictionary that maps a word + concept pair to its counter, separated by space.
"""
return self.__word_concept_counter
@property
def lexicon_words(self):
"""
:return: List of words in the corpus.<epsilon> and <unk> not included.
"""
return list(self.counter_words.keys())
@property
def lexicon_concepts(self):
"""
:return: List of concepts in the corpus.<epsilon> and <unk> not included.
"""
return list(self.counter_concepts.keys())
@property
def lexicon_clean_concepts(self):
"""
:return: List of clean concepts (no IOB notation ) in the corpus.<epsilon> and <unk> not included.
"""
return list(self.__concepts_clean_counter.keys())
def word(self, word):
"""
:param word: Word for which to return the count for.
:return: Count of the word, >= 0.
"""
return self.__words_counter.get(word, 0)
def concept(self, concept):
"""
:param concept: Concept for which to return the count for.
:return: Count of the concept, >= 0.
"""
return self.__concepts_counter.get(concept, 0)
def word_concept(self, word, concept):
"""
:param word: Word of the word - concept pair.
:param concept: Concept of the word - concept pair.
:return: Count of the word - concept pair >= 0.
"""
return self.__word_concept_counter.get(word + " " + concept, 0)
def to_dataframe(self):
"""
Transform the data to a df containing the tokens, lemmas, pos and concepts columns.
Each sentence will correspond to a row, each column (for each row) contains a list of strings.
:return: Dataframe transposition of this data object.
"""
df = pd.DataFrame(columns=["tokens", "concepts"])
for i, phrase in enumerate(self._data):
words = []
concepts = []
for data_point in phrase:
word, concept = data_point
words.append(word)
concepts.append(concept)
df.loc[i] = [words, concepts]
return df
def batch_sequence(batch, device, preserve_indexes=False, preserve_elmo=False):
"""
Given a batch return sequence data, labels and chars data (if present) in a "batched" way, as a tensor
where the first dimension is the dimension of the batch.
:param batch: List of sample points, each sample point should contain "tokens" and "concepts" data, list
of integers, it may also contain "chars" data, which is a list of integers.
"""
if not preserve_elmo:
list_of_data_tensors = [sample["tokens"].unsqueeze(0) for sample in batch]
data = torch.cat(list_of_data_tensors, dim=0)
else:
list_of_data_tensors = [sample["tokens"] for sample in batch]
data = torch.cat(list_of_data_tensors, dim=0)
list_of_labels_tensors = [sample["concepts"].unsqueeze(0) for sample in batch]
labels = torch.cat(list_of_labels_tensors, dim=0)
list_of_pos_tensors = [sample["pos_enc"].unsqueeze(0) for sample in batch]
pos = torch.cat(list_of_pos_tensors, dim=0)
list_of_ner_tensors = [sample["ner_enc"].unsqueeze(0) for sample in batch]
ner = torch.cat(list_of_ner_tensors, dim=0)
char_data = None
if "chars" in batch[0]:
list_of_char_data_tensors = [sample["chars"] for sample in batch]
char_data = torch.cat(list_of_char_data_tensors, dim=0).to(device)
if preserve_indexes:
list_of_data_tensors_index = [sample["tokens_indexes"].unsqueeze(0) for sample in batch]
data_index = torch.cat(list_of_data_tensors_index, dim=0)
if preserve_indexes:
return data.to(device), labels.to(device), char_data, pos, ner, data_index
return data.to(device), labels.to(device), char_data, pos, ner
class DropTransform(object):
""" Transformer class to be passed to the pytorch dataset class to transform data at run time, it randomly
drops word indexes to 'simulate' unknown words."""
def __init__(self, drop_chance, unk_idx, preserve_idx, keep_tokens=False, preserve_indexes=False):
"""
:param drop_chance: Chance of dropping a word.
:param unk_idx: Which index to use in place of the one of the dropped word.
:param preserve_idx: Index to never drop (i.e. the padding index).
"""
self.drop_chance = drop_chance
self.unk_idx = unk_idx
self.preserve_idx = preserve_idx
self.keep_tokens = keep_tokens
self.preserve_indexes = preserve_indexes
def _might_drop(self, idx):
"""
Drop idx by chance and if its not the index to preserve.
:param idx:
:return:
"""
return self.unk_idx if (random.uniform(0, 1) < self.drop_chance and idx != self.preserve_idx) else idx
def __call__(self, sample):
"""
Get a sample, concepts and char embeddings idxs (if present) are preserved, each token is instead
replaced by a chance equal to self._drop_chance.
:param sample:
:return:
"""
tsample = dict()
if not self.keep_tokens:
seq = sample["tokens"].clone()
for i in range(len(seq)):
seq[i] = self._might_drop(sample["tokens"][i].item())
tsample["tokens"] = seq
else:
tsample["tokens"] = sample["tokens"]
if self.preserve_indexes:
seq = sample["tokens_indexes"].clone()
for i in range(len(seq)):
seq[i] = self._might_drop(sample["tokens_indexes"][i].item())
tsample["tokens_indexes"] = seq
tsample["pos_enc"] = sample["pos_enc"]
tsample["ner_enc"] = sample["ner_enc"]
tsample["concepts"] = sample["concepts"]
tsample["sequence_extra"] = sample["sequence_extra"]
if "chars" in sample:
tsample["chars"] = sample["chars"]
return tsample
class InitTransform(object):
""" Transformer class to be passed to the PytorchDataset | |
<gh_stars>0
#!/usr/bin/env python
# Class autogenerated from /home/sam/Downloads/aldebaran_sw/nao/naoqi-sdk-2.1.4.13-linux64/include/alproxies/almotionproxy.h
# by <NAME>'s <Sammy.Pfeiffer at student.<EMAIL>.<EMAIL>.au> generator
# You need an ALBroker running
from naoqi import ALProxy
class ALMotion(object):
def __init__(self, session):
self.proxy = None
self.session = session
def force_connect(self):
self.proxy = self.session.service("ALMotion")
def angleInterpolation(self, names, angleLists, timeLists, isAbsolute):
"""Interpolates one or multiple joints to a target angle or along timed trajectories. This is a blocking call.
:param AL::ALValue names: Name or names of joints, chains, "Body", "JointActuators", "Joints" or "Actuators".
:param AL::ALValue angleLists: An angle, list of angles or list of list of angles in radians
:param AL::ALValue timeLists: A time, list of times or list of list of times in seconds
:param bool isAbsolute: If true, the movement is described in absolute angles, else the angles are relative to the current angle.
"""
if not self.proxy:
self.proxy = self.session.service("ALMotion")
return self.proxy.angleInterpolation(names, angleLists, timeLists, isAbsolute)
def angleInterpolationBezier(self, jointNames, times, controlPoints):
"""Interpolates a sequence of timed angles for several motors using bezier control points. This is a blocking call.
:param std::vector<std::string> jointNames: A vector of joint names
:param AL::ALValue times: An ragged ALValue matrix of floats. Each line corresponding to a motor, and column element to a control point.
:param AL::ALValue controlPoints: An ALValue array of arrays each containing [float angle, Handle1, Handle2], where Handle is [int InterpolationType, float dAngle, float dTime] descibing the handle offsets relative to the angle and time of the point. The first bezier param describes the handle that controls the curve preceeding the point, the second describes the curve following the point.
"""
if not self.proxy:
self.proxy = self.session.service("ALMotion")
return self.proxy.angleInterpolationBezier(jointNames, times, controlPoints)
def angleInterpolationWithSpeed(self, names, targetAngles, maxSpeedFraction):
"""Interpolates one or multiple joints to a target angle, using a fraction of max speed. Only one target angle is allowed for each joint. This is a blocking call.
:param AL::ALValue names: Name or names of joints, chains, "Body", "JointActuators", "Joints" or "Actuators".
:param AL::ALValue targetAngles: An angle, or list of angles in radians
:param float maxSpeedFraction: A fraction.
"""
if not self.proxy:
self.proxy = self.session.service("ALMotion")
return self.proxy.angleInterpolationWithSpeed(names, targetAngles, maxSpeedFraction)
def areNotificationsEnabled(self):
"""Return true if notifications are active.
:returns bool: Return True if notifications are active.
"""
if not self.proxy:
self.proxy = self.session.service("ALMotion")
return self.proxy.areNotificationsEnabled()
def areResourcesAvailable(self, resourceNames):
"""Returns true if all the desired resources are available. Only motion API's' blocking call takes resources.
:param std::vector<std::string> resourceNames: A vector of resource names such as joints. Use getBodyNames("Body") to have the list of the available joint for your robot.
:returns bool: True if the resources are available
"""
if not self.proxy:
self.proxy = self.session.service("ALMotion")
return self.proxy.areResourcesAvailable(resourceNames)
def changeAngles(self, names, changes, fractionMaxSpeed):
"""Changes Angles. This is a non-blocking call.
:param AL::ALValue names: The name or names of joints, chains, "Body", "JointActuators", "Joints" or "Actuators".
:param AL::ALValue changes: One or more changes in radians
:param float fractionMaxSpeed: The fraction of maximum speed to use
"""
if not self.proxy:
self.proxy = self.session.service("ALMotion")
return self.proxy.changeAngles(names, changes, fractionMaxSpeed)
def changePosition(self, effectorName, space, positionChange, fractionMaxSpeed, axisMask):
"""DEPRECATED. Use setPositions function instead.
:param str effectorName: Name of the effector.
:param int space: Task frame {FRAME_TORSO = 0, FRAME_WORLD = 1, FRAME_ROBOT = 2}.
:param std::vector<float> positionChange: 6D position change array (xd, yd, zd, wxd, wyd, wzd) in meters and radians
:param float fractionMaxSpeed: The fraction of maximum speed to use
:param int axisMask: Axis mask. True for axes that you wish to control. e.g. 7 for position only, 56 for rotation only and 63 for both
"""
if not self.proxy:
self.proxy = self.session.service("ALMotion")
return self.proxy.changePosition(effectorName, space, positionChange, fractionMaxSpeed, axisMask)
def changeTransform(self, chainName, space, transform, fractionMaxSpeed, axisMask):
"""DEPRECATED. Use setTransforms function instead.
:param str chainName: Name of the chain. Could be: "Head", "LArm","RArm", "LLeg", "RLeg", "Torso"
:param int space: Task frame {FRAME_TORSO = 0, FRAME_WORLD = 1, FRAME_ROBOT = 2}.
:param std::vector<float> transform: Transform arrays
:param float fractionMaxSpeed: The fraction of maximum speed to use
:param int axisMask: Axis mask. True for axes that you wish to control. e.g. 7 for position only, 56 for rotation only and 63 for both
"""
if not self.proxy:
self.proxy = self.session.service("ALMotion")
return self.proxy.changeTransform(chainName, space, transform, fractionMaxSpeed, axisMask)
def closeHand(self, handName):
"""NAO stiffens the motors of desired hand. Then, he closes the hand, then cuts motor current to conserve energy. This is a blocking call.
:param str handName: The name of the hand. Could be: "RHand" or "LHand"
"""
if not self.proxy:
self.proxy = self.session.service("ALMotion")
return self.proxy.closeHand(handName)
def getAngles(self, names, useSensors):
"""Gets the angles of the joints
:param AL::ALValue names: Names the joints, chains, "Body", "JointActuators", "Joints" or "Actuators".
:param bool useSensors: If true, sensor angles will be returned
:returns std::vector<float>: Joint angles in radians.
"""
if not self.proxy:
self.proxy = self.session.service("ALMotion")
return self.proxy.getAngles(names, useSensors)
def getBodyNames(self, name):
"""Gets the names of all the joints and actuators in the collection.
:param str name: Name of a chain, "Arms", "Legs", "Body", "Chains", "JointActuators", "Joints" or "Actuators".
:returns std::vector<std::string>: Vector of strings, one for each joint and actuator in the collection
"""
if not self.proxy:
self.proxy = self.session.service("ALMotion")
return self.proxy.getBodyNames(name)
def getBreathConfig(self):
"""This function gets the current breathing session. bpm is the breathing frequency in beats per minute. amplitude is the normalized amplitude of the breathing animation, between 0 and 1.
:returns AL::ALValue: An ALValue of the form [["Bpm", bpm], ["Amplitude", amplitude]].
"""
if not self.proxy:
self.proxy = self.session.service("ALMotion")
return self.proxy.getBreathConfig()
def getBreathEnabled(self, pChain):
"""This function gets the status of breathing animation on a chain. Chain name can be "Body", "Arms", "LArm", "RArm", "Legs" or "Head".
:param str pChain: Chain name.
:returns bool: True if breathing animation is enabled on the chain.
"""
if not self.proxy:
self.proxy = self.session.service("ALMotion")
return self.proxy.getBreathEnabled(pChain)
def getCOM(self, pName, pSpace, pUseSensorValues):
"""Gets the COM of a joint, chain, "Body" or "Joints".
:param str pName: Name of the body which we want the mass. In chain name case, this function give the com of the chain.
:param int pSpace: Task frame {FRAME_TORSO = 0, FRAME_WORLD = 1, FRAME_ROBOT = 2}.
:param bool pUseSensorValues: If true, the sensor values will be used to determine the position.
:returns std::vector<float>: The COM position (meter).
"""
if not self.proxy:
self.proxy = self.session.service("ALMotion")
return self.proxy.getCOM(pName, pSpace, pUseSensorValues)
def getChainClosestObstaclePosition(self, pName, space):
"""Gets chain closest obstacle Position .
:param str pName: The Chain name {"LArm" or "RArm"}.
:param int space: Task frame {FRAME_TORSO = 0, FRAME_WORLD = 1, FRAME_ROBOT = 2}.
:returns std::vector<float>: Vector containing the Position3D in meters (x, y, z)
"""
if not self.proxy:
self.proxy = self.session.service("ALMotion")
return self.proxy.getChainClosestObstaclePosition(pName, space)
def getCollisionProtectionEnabled(self, pChainName):
"""Allow to know if the collision protection is activated on the given chain.
:param str pChainName: The chain name {"LArm" or "RArm"}.
:returns bool: Return true is the collision protection of the given Arm is activated.
"""
if not self.proxy:
self.proxy = self.session.service("ALMotion")
return self.proxy.getCollisionProtectionEnabled(pChainName)
def getDiagnosisEffectEnabled(self):
"""Give the state of the diagnosis effect.
:returns bool: Return true is the diagnosis reflex is activated.
"""
if not self.proxy:
self.proxy = self.session.service("ALMotion")
return self.proxy.getDiagnosisEffectEnabled()
def getExternalCollisionProtectionEnabled(self, pName):
"""Allow to know if the external collision protection is activated on the given name.
:param str pName: The name {"All", "Move", "Arms", "LArm" or "RArm"}.
:returns bool: Return true is the external collision protection of the given name is activated.
"""
if not self.proxy:
self.proxy = self.session.service("ALMotion")
return self.proxy.getExternalCollisionProtectionEnabled(pName)
def getFallManagerEnabled(self):
"""Give the state of the fall manager.
:returns bool: Return true is the fall manager is activated.
"""
if not self.proxy:
self.proxy = self.session.service("ALMotion")
return self.proxy.getFallManagerEnabled()
def getFootGaitConfig(self, config):
"""DEPRECATED. Use getMoveConfig function instead. Gets the foot Gait config ("MaxStepX", "MaxStepY", "MaxStepTheta", "MaxStepFrequency", "StepHeight", "TorsoWx", "TorsoWy")
:param str config: a string should be "Max", "Min", "Default" ["MaxStepY", value], ["MaxStepTheta", value], ["MaxStepFrequency", value], ["StepHeight", value], ["TorsoWx", value], ["TorsoWy", value]]
:returns AL::ALValue: An ALvalue with the following form :[["MaxStepX", value],
"""
if not self.proxy:
self.proxy = self.session.service("ALMotion")
return self.proxy.getFootGaitConfig(config)
def getFootSteps(self):
| |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""The Python datastore API used by app developers.
Defines Entity, Query, and Iterator classes, as well as methods for all of the
datastore's calls. Also defines conversions between the Python classes and
their PB counterparts.
The datastore errors are defined in the datastore_errors module. That module is
only required to avoid circular imports. datastore imports datastore_types,
which needs BadValueError, so it can't be defined in datastore.
"""
import heapq
import itertools
import logging
import os
import re
import string
import sys
import traceback
from xml.sax import saxutils
from google.appengine.api import api_base_pb
from google.appengine.api import apiproxy_rpc
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import capabilities
from google.appengine.api import datastore_errors
from google.appengine.api import datastore_types
from google.appengine.datastore import datastore_index
from google.appengine.datastore import datastore_pb
from google.appengine.runtime import apiproxy_errors
from google.appengine.datastore import entity_pb
try:
__import__('google.appengine.api.labs.taskqueue.taskqueue_service_pb')
taskqueue_service_pb = sys.modules.get(
'google.appengine.api.labs.taskqueue.taskqueue_service_pb')
except ImportError:
from google.appengine.api.taskqueue import taskqueue_service_pb
MAX_ALLOWABLE_QUERIES = 30
MAXIMUM_RESULTS = 1000
DEFAULT_TRANSACTION_RETRIES = 3
READ_CAPABILITY = capabilities.CapabilitySet('datastore_v3')
WRITE_CAPABILITY = capabilities.CapabilitySet(
'datastore_v3',
capabilities=['write'])
_MAX_INDEXED_PROPERTIES = 5000
_MAX_ID_BATCH_SIZE = 1000 * 1000 * 1000
Key = datastore_types.Key
typename = datastore_types.typename
_txes = {}
_ALLOWED_API_KWARGS = frozenset(['rpc'])
_ALLOWED_FAILOVER_READ_METHODS = set(
('Get', 'RunQuery', 'RunCompiledQuery', 'Count', 'Next'))
ARBITRARY_FAILOVER_MS = -1
STRONG_CONSISTENCY = 0
EVENTUAL_CONSISTENCY = 1
_MAX_INT_32 = 2**31-1
def NormalizeAndTypeCheck(arg, types):
"""Normalizes and type checks the given argument.
Args:
arg: an instance or iterable of the given type(s)
types: allowed type or tuple of types
Returns:
A (list, bool) tuple. The list is a normalized, shallow copy of the
argument. The boolean is True if the argument was a sequence, False
if it was a single object.
Raises:
AssertionError: types includes list or tuple.
BadArgumentError: arg is not an instance or sequence of one of the given
types.
"""
if not isinstance(types, (list, tuple)):
types = (types,)
assert list not in types and tuple not in types
if isinstance(arg, types):
return [arg], False
else:
if isinstance(arg, basestring):
raise datastore_errors.BadArgumentError(
'Expected an instance or iterable of %s; received %s (a %s).' %
(types, arg, typename(arg)))
try:
arg_list = list(arg)
except TypeError:
raise datastore_errors.BadArgumentError(
'Expected an instance or iterable of %s; received %s (a %s).' %
(types, arg, typename(arg)))
for val in arg_list:
if not isinstance(val, types):
raise datastore_errors.BadArgumentError(
'Expected one of %s; received %s (a %s).' %
(types, val, typename(val)))
return arg_list, True
def NormalizeAndTypeCheckKeys(keys):
"""Normalizes and type checks that the given argument is a valid key or keys.
A wrapper around NormalizeAndTypeCheck() that accepts strings, Keys, and
Entities, and normalizes to Keys.
Args:
keys: a Key or sequence of Keys
Returns:
A (list of Keys, bool) tuple. See NormalizeAndTypeCheck.
Raises:
BadArgumentError: arg is not an instance or sequence of one of the given
types.
"""
keys, multiple = NormalizeAndTypeCheck(keys, (basestring, Entity, Key))
keys = [_GetCompleteKeyOrError(key) for key in keys]
return (keys, multiple)
def GetRpcFromKwargs(kwargs):
if not kwargs:
return None
args_diff = set(kwargs) - _ALLOWED_API_KWARGS
if args_diff:
raise TypeError('Invalid arguments: %s' % ', '.join(args_diff))
return kwargs.get('rpc')
def _MakeSyncCall(service, call, request, response, rpc=None):
"""The APIProxy entry point for a synchronous API call.
Args:
service: string representing which service to call
call: string representing which function to call
request: protocol buffer for the request
response: protocol buffer for the response
rpc: datastore.DatastoreRPC to use for this request.
Returns:
Response protocol buffer. Caller should always use returned value
which may or may not be same as passed in 'response'.
Raises:
apiproxy_errors.Error or a subclass.
"""
if not rpc:
rpc = CreateRPC(service)
rpc.make_call(call, request, response)
rpc.wait()
rpc.check_success()
return response
def CreateRPC(service='datastore_v3', deadline=None, callback=None,
read_policy=STRONG_CONSISTENCY):
"""Create an rpc for use in configuring datastore calls.
Args:
deadline: float, deadline for calls in seconds.
callback: callable, a callback triggered when this rpc completes,
accepts one argument: the returned rpc.
read_policy: flag, set to EVENTUAL_CONSISTENCY to enable eventually
consistent reads
Returns:
A datastore.DatastoreRPC instance.
"""
return DatastoreRPC(service, deadline, callback, read_policy)
class DatastoreRPC(apiproxy_stub_map.UserRPC):
"""Specialized RPC for the datastore.
Wraps the default RPC class and sets appropriate values for use by the
datastore.
This class or a sublcass of it is intended to be instatiated by
developers interested in setting specific request parameters, such as
deadline, on API calls. It will be used to make the actual call.
"""
def __init__(self, service='datastore_v3', deadline=None, callback=None,
read_policy=STRONG_CONSISTENCY):
super(DatastoreRPC, self).__init__(service, deadline, callback)
self.read_policy = read_policy
def make_call(self, call, request, response):
if self.read_policy == EVENTUAL_CONSISTENCY:
if call not in _ALLOWED_FAILOVER_READ_METHODS:
raise datastore_errors.BadRequestError(
'read_policy is only supported on read operations.')
if call != 'Next':
request.set_failover_ms(ARBITRARY_FAILOVER_MS)
super(DatastoreRPC, self).make_call(call, request, response)
def clone(self):
"""Make a shallow copy of this instance.
This is usually used when an RPC has been specified with some configuration
options and is being used as a template for multiple RPCs outside of a
developer's easy control.
"""
assert self.state == apiproxy_rpc.RPC.IDLE
return self.__class__(
self.service, self.deadline, self.callback, self.read_policy)
def Put(entities, **kwargs):
"""Store one or more entities in the datastore.
The entities may be new or previously existing. For new entities, Put() will
fill in the app id and key assigned by the datastore.
If the argument is a single Entity, a single Key will be returned. If the
argument is a list of Entity, a list of Keys will be returned.
Args:
entities: Entity or list of Entities
rpc: datastore.RPC to use for this request.
Returns:
Key or list of Keys
Raises:
TransactionFailedError, if the Put could not be committed.
"""
rpc = GetRpcFromKwargs(kwargs)
entities, multiple = NormalizeAndTypeCheck(entities, Entity)
if multiple and not entities:
return []
for entity in entities:
if not entity.kind() or not entity.app():
raise datastore_errors.BadRequestError(
'App and kind must not be empty, in entity: %s' % entity)
req = datastore_pb.PutRequest()
req.entity_list().extend([e._ToPb() for e in entities])
keys = [e.key() for e in entities]
tx = _MaybeSetupTransaction(req, keys)
try:
resp = _MakeSyncCall(
'datastore_v3', 'Put', req, datastore_pb.PutResponse(), rpc)
except apiproxy_errors.ApplicationError, err:
raise _ToDatastoreError(err)
keys = resp.key_list()
num_keys = len(keys)
num_entities = len(entities)
if num_keys != num_entities:
raise datastore_errors.InternalError(
'Put accepted %d entities but returned %d keys.' %
(num_entities, num_keys))
for entity, key in zip(entities, keys):
entity._Entity__key._Key__reference.CopyFrom(key)
if tx:
tx.entity_group = entities[0].entity_group()
if multiple:
return [Key._FromPb(k) for k in keys]
else:
return Key._FromPb(resp.key(0))
def Get(keys, **kwargs):
"""Retrieves one or more entities from the datastore.
Retrieves the entity or entities with the given key(s) from the datastore
and returns them as fully populated Entity objects, as defined below. If
there is an error, raises a subclass of datastore_errors.Error.
If keys is a single key or string, an Entity will be returned, or
EntityNotFoundError will be raised if no existing entity matches the key.
However, if keys is a list or tuple, a list of entities will be returned
that corresponds to the sequence of keys. It will include entities for keys
that were found and None placeholders for keys that were not found.
Args:
# the primary key(s) of the entity(ies) to retrieve
keys: Key or string or list of Keys or strings
rpc: datastore.RPC to use for this request.
Returns:
Entity or list of Entity objects
"""
rpc = GetRpcFromKwargs(kwargs)
keys, multiple = NormalizeAndTypeCheckKeys(keys)
if multiple and not keys:
return []
req = datastore_pb.GetRequest()
req.key_list().extend([key._Key__reference for key in keys])
_MaybeSetupTransaction(req, keys)
try:
resp = _MakeSyncCall(
'datastore_v3', 'Get', req, datastore_pb.GetResponse(), rpc)
except apiproxy_errors.ApplicationError, err:
raise _ToDatastoreError(err)
entities = []
for group in resp.entity_list():
if group.has_entity():
entities.append(Entity._FromPb(group.entity()))
else:
entities.append(None)
if multiple:
return entities
else:
if entities[0] is None:
raise datastore_errors.EntityNotFoundError()
return entities[0]
def Delete(keys, **kwargs):
"""Deletes one or more entities from the datastore. Use with care!
Deletes the given entity(ies) from the datastore. You can only delete
entities from your app. If there is an error, raises a subclass of
datastore_errors.Error.
Args:
# the primary key(s) of the entity(ies) to delete
keys: Key or string or list of | |
return redirect_uri
except Exception as exc:
logger.debug(
"An error occurred while verifying redirect URI: %s" % str(exc))
return None
def is_session_revoked(self, request="", cookie=None):
areq = urlparse.parse_qs(request)
authn, acr = self.pick_auth(areq)
identity, _ts = authn.authenticated_as(cookie)
return self.sdb.is_revoke_uid(identity["uid"])
def let_user_verify_logout(self, uid, esr, cookie, redirect_uri):
if cookie:
headers = [cookie]
else:
headers = []
mte = self.template_lookup.get_template(self.template["verify_logout"])
self.sdb.set_verified_logout(uid)
if redirect_uri is not None:
redirect = redirect_uri
else:
redirect = "/"
try:
tmp_id_token_hint = esr["id_token_hint"]
except:
tmp_id_token_hint = ""
argv = {
"id_token_hint": tmp_id_token_hint,
"post_logout_redirect_uri": esr["post_logout_redirect_uri"],
"key": self.sdb.get_verify_logout(uid),
"redirect": redirect,
"action": "/" + EndSessionEndpoint("").etype
}
return Response(mte.render(**argv), headers=[])
def end_session_endpoint(self, request="", cookie=None, **kwargs):
esr = EndSessionRequest().from_urlencoded(request)
logger.debug("End session request: {}".format(esr.to_dict()))
redirect_uri = None
if "post_logout_redirect_uri" in esr:
redirect_uri = self.verify_post_logout_redirect_uri(esr, cookie)
if not redirect_uri:
return self._error_response(
"Not allowed (Post logout redirect URI verification "
"failed)!")
authn, acr = self.pick_auth(esr)
sid = None
if "id_token_hint" in esr:
id_token_hint = OpenIDRequest().from_jwt(esr["id_token_hint"],
keyjar=self.keyjar,
verify=True)
sub = id_token_hint["sub"]
try:
sid = self.sdb.get_sids_by_sub(sub)[
0] # any sid will do, choose the first
except IndexError:
pass
else:
identity, _ts = authn.authenticated_as(cookie)
if identity:
uid = identity["uid"]
try:
sid = self.sdb.uid2sid[uid][
0] # any sid will do, choose the first
except (KeyError, IndexError):
pass
else:
return self._error_response(
"Not allowed (UID could not be retrieved)!")
# if self.sdb.get_verified_logout(uid):
# return self.let_user_verify_logout(uid, esr, cookie, redirect_uri)
if sid is not None:
del self.sdb[sid]
# Delete cookies
headers = [authn.delete_cookie(), self.delete_session_cookie()]
if redirect_uri is not None:
return Redirect(str(redirect_uri), headers=headers)
return Response("Successful logout", headers=headers)
def verify_endpoint(self, request="", cookie=None, **kwargs):
"""
:param request:
:param cookie:
:param kwargs:
:return:
"""
logger.debug("verify request: %s" % request)
_req = urlparse.parse_qs(request)
if "query" in _req:
try:
# TODO FIX THIS !!! Why query ?
areq = urlparse.parse_qs(_req["query"][0])
except KeyError:
return BadRequest()
else:
areq = _req
logger.debug("REQ: %s" % areq)
try:
authn, acr = self.pick_auth(areq, "exact")
except Exception as err:
logger.exception("%s", err)
raise
kwargs["cookie"] = cookie
return authn.verify(_req, **kwargs)
def setup_session(self, areq, authn_event, cinfo):
try:
oidc_req = areq["request"]
except KeyError:
oidc_req = None
sid = self.sdb.create_authz_session(authn_event, areq, oidreq=oidc_req)
kwargs = {}
for param in ["sector_id", "subject_type"]:
try:
kwargs[param] = cinfo[param]
except KeyError:
pass
self.sdb.do_sub(sid, cinfo['client_salt'], **kwargs)
return sid
def authorization_endpoint(self, request="", cookie=None, **kwargs):
""" The AuthorizationRequest endpoint
:param request: The client request
"""
info = self.auth_init(request, request_class=AuthorizationRequest)
if isinstance(info, Response):
return info
areq = self.handle_oidc_request(info["areq"], info["redirect_uri"])
if isinstance(areq, Response):
return areq
logger.debug("AuthzRequest+oidc_request: %s" % (areq.to_dict(),))
_cid = areq["client_id"]
cinfo = self.cdb[str(_cid)]
if _cid not in self.keyjar.issuer_keys:
if "jwks_uri" in cinfo:
self.keyjar.issuer_keys[_cid] = []
self.keyjar.add(_cid, cinfo["jwks_uri"])
req_user = self.required_user(areq)
if req_user:
sids = self.sdb.get_sids_by_sub(req_user)
if sids:
# anyone will do
authn_event = self.sdb[sids[-1]]["authn_event"]
# Is the authentication event to be regarded as valid ?
if authn_event.valid():
sid = self.setup_session(areq, authn_event, cinfo)
return self.authz_part2(authn_event.uid, areq, sid,
cookie=cookie)
kwargs["req_user"] = req_user
authnres = self.do_auth(info["areq"], info["redirect_uri"],
cinfo, request, cookie, **kwargs)
if isinstance(authnres, Response):
return authnres
logger.debug("- authenticated -")
logger.debug("AREQ keys: %s" % list(areq.keys()))
sid = self.setup_session(areq, authnres["authn_event"], cinfo)
return self.authz_part2(authnres["user"], areq, sid, cookie=cookie)
def authz_part2(self, user, areq, sid, **kwargs):
result = self._complete_authz(user, areq, sid, **kwargs)
if isinstance(result, Response):
return result
else:
aresp, headers, redirect_uri, fragment_enc = result
if "check_session_iframe" in self.capabilities:
salt = rndstr()
state = str(self.sdb.get_authentication_event(
sid).authn_time) # use the last session
aresp["session_state"] = self._compute_session_state(
state, salt, areq["client_id"], redirect_uri)
headers.append(self.write_session_cookie(state))
location = aresp.request(redirect_uri, fragment_enc)
logger.debug("Redirected to: '%s' (%s)" % (location, type(location)))
return Redirect(str(location), headers=headers)
def userinfo_in_id_token_claims(self, session):
"""
Put userinfo claims in the id token
:param session:
:return:
"""
itc = self.server.id_token_claims(session)
if not itc:
return None
_claims = by_schema(self.schema, **itc)
if _claims:
return self._collect_user_info(session, _claims)
else:
return None
def encrypt(self, payload, client_info, cid, val_type="id_token", cty=""):
"""
Handles the encryption of a payload.
Shouldn't get here unless there are encrypt parameters in client info
:param payload: The information to be encrypted
:param client_info: Client information
:param cid: Client id
:return: The encrypted information as a JWT
"""
try:
alg = client_info["%s_encrypted_response_alg" % val_type]
enc = client_info["%s_encrypted_response_enc" % val_type]
except KeyError as err: # both must be defined
logger.warning("undefined parameter: %s" % err)
raise JWEException("%s undefined" % err)
logger.debug("alg=%s, enc=%s, val_type=%s" % (alg, enc, val_type))
keys = self.keyjar.get_encrypt_key(owner=cid)
logger.debug("Encryption keys for %s: %s" % (cid, keys))
try:
_ckeys = self.keyjar[cid]
except KeyError:
# Weird, but try to recuperate
logger.warning(
"Lost keys for {} trying to recuperate!!".format(cid))
self.keyjar.issuer_keys[cid] = []
self.keyjar.add(cid, client_info["jwks_uri"])
_ckeys = self.keyjar[cid]
logger.debug("keys for %s: %s" % (
cid, "[" + ", ".join([str(x) for x in _ckeys])) + "]")
kwargs = {"alg": alg, "enc": enc}
if cty:
kwargs["cty"] = cty
# use the clients public key for encryption
_jwe = JWE(payload, **kwargs)
return _jwe.encrypt(keys, context="public")
def sign_encrypt_id_token(self, sinfo, client_info, areq, code=None,
access_token=None, user_info=None):
"""
Signed and or encrypt a IDToken
:param sinfo: Session information
:param client_info: Client information
:param areq: The request
:param code: Access grant
:param access_token: Access Token
:param user_info: User information
:return: IDToken instance
"""
try:
alg = client_info["id_token_signed_response_alg"]
except KeyError:
try:
alg = self.jwx_def["sign_alg"]["id_token"]
except KeyError:
alg = PROVIDER_DEFAULT["id_token_signed_response_alg"]
else:
if not alg:
alg = PROVIDER_DEFAULT["id_token_signed_response_alg"]
_authn_event = sinfo["authn_event"]
id_token = self.id_token_as_signed_jwt(
sinfo, loa=_authn_event.authn_info, alg=alg, code=code,
access_token=access_token, user_info=user_info,
auth_time=_authn_event.authn_time)
# Then encrypt
if "id_token_encrypted_response_alg" in client_info:
id_token = self.encrypt(id_token, client_info, areq["client_id"],
"id_token", "JWT")
return id_token
def _access_token_endpoint(self, req, **kwargs):
_sdb = self.sdb
_log_debug = logger.debug
client_info = self.cdb[str(req["client_id"])]
assert req["grant_type"] == "authorization_code"
_access_code = req["code"]
# assert that the code is valid
if self.sdb.is_revoked(_access_code):
return self._error(error="access_denied", descr="Token is revoked")
_info = _sdb[_access_code]
# If redirect_uri was in the initial authorization request
# verify that the one given here is the correct one.
if "redirect_uri" in _info:
try:
assert req["redirect_uri"] == _info["redirect_uri"]
except AssertionError:
return self._error(error="access_denied",
descr="redirect_uri mismatch")
_log_debug("All checks OK")
if "issue_refresh" in kwargs:
args = {"issue_refresh": kwargs["issue_refresh"]}
else:
args = {}
try:
_sdb.upgrade_to_token(_access_code, **args)
except Exception as err:
logger.error("%s" % err)
# Should revoke the token issued to this access code
_sdb.revoke_all_tokens(_access_code)
return self._error(error="access_denied", descr="%s" % err)
if "openid" in _info["scope"]:
userinfo = self.userinfo_in_id_token_claims(_info)
# _authn_event = _info["authn_event"]
try:
_idtoken = self.sign_encrypt_id_token(
_info, client_info, req, user_info=userinfo)
except (JWEException, NoSuitableSigningKeys) as err:
logger.warning(str(err))
return self._error(error="access_denied",
descr="Could not sign/encrypt id_token")
_sdb.update_by_token(_access_code, "id_token", _idtoken)
# Refresh the _tinfo
_tinfo = _sdb[_access_code]
_log_debug("_tinfo: %s" % _tinfo)
atr = AccessTokenResponse(**by_schema(AccessTokenResponse, **_tinfo))
_log_debug("access_token_response: %s" % atr.to_dict())
return Response(atr.to_json(), content="application/json")
def _refresh_access_token_endpoint(self, req, **kwargs):
_sdb = self.sdb
_log_debug = logger.debug
client_info = self.cdb[str(req["client_id"])]
assert req["grant_type"] == "refresh_token"
rtoken = req["refresh_token"]
_info = _sdb.refresh_token(rtoken)
if "openid" in _info["scope"]:
userinfo = self.userinfo_in_id_token_claims(_info)
try:
_idtoken = self.sign_encrypt_id_token(
_info, client_info, req, user_info=userinfo)
except (JWEException, NoSuitableSigningKeys) as err:
logger.warning(str(err))
return self._error(error="access_denied",
descr="Could not sign/encrypt id_token")
sid = _sdb.token.get_key(rtoken)
_sdb.update(sid, "id_token", _idtoken)
_log_debug("_info: %s" % _info)
atr = AccessTokenResponse(**by_schema(AccessTokenResponse, **_info))
_log_debug("access_token_response: %s" % atr.to_dict())
return Response(atr.to_json(), content="application/json")
# noinspection PyUnusedLocal
def token_endpoint(self, request="", authn=None, dtype='urlencoded',
**kwargs):
"""
This is where clients come to get their access tokens
:param request: The request
:param authn: Authentication info, comes from HTTP header
:returns:
"""
logger.debug("- token -")
logger.info("token_request: %s" % request)
req = AccessTokenRequest().deserialize(request, dtype)
if "refresh_token" in req:
req = RefreshAccessTokenRequest().deserialize(request, dtype)
logger.debug("%s: %s" % (req.__class__.__name__, req))
try:
client_id = self.client_authn(self, req, authn)
except Exception as err:
logger.error("Failed to verify client due to: %s" % err)
client_id = ""
if not client_id:
err = TokenErrorResponse(error="unauthorized_client")
return Unauthorized(err.to_json(), content="application/json")
if "client_id" not in req: # Optional for access token request
req["client_id"] = client_id
if isinstance(req, AccessTokenRequest):
try:
return self._access_token_endpoint(req, **kwargs)
except JWEException as err:
return self._error_response("invalid_request",
descr="%s" % err)
else:
return self._refresh_access_token_endpoint(req, **kwargs)
def _collect_user_info(self, session, userinfo_claims=None):
"""
Collect information about a user.
This can happen in two cases, either when constructing an IdToken or
when returning user info through the UserInfo endpoint
:param session: Session information
:param userinfo_claims: user info claims
:return: User info
"""
if userinfo_claims is None:
uic = self._scope2claims(session["scope"])
# Get only keys allowed by user and update the dict if such info
# is stored in session
perm_set = session.get('permission')
if perm_set:
uic = {key: uic[key] for key in uic | |
= '%s + photon' % compoundName
Q = (pMass + tMass - cMass)*amu
rrcap = gchannelName
print("Reich-Moore particle pair: ",gchannelName,' with CN mass %.5f so Q=%.3f, label=%s' % (cMass,Q,rrcap))
# gData = { '0' : [ 0.0, .0, 1, None, 1, +1 ] }
gammaParticle = miscModule.buildParticleFromRawData( gaugeBosonModule.particle, 'photon',
mass = ( 0, 'amu' ), spin = ( zero, spinUnit ), parity = ( 1, '' ), charge = ( 0, 'e' ))
PoPs_data.add(gammaParticle)
nucleus = miscModule.buildParticleFromRawData( nucleusModule.particle, compoundNameIndex, index = level, energy = ( 0.0, 'MeV' ) ,
spin=(zero,spinUnit), parity=(1,''), charge=(compoundZ,'e') )
compoundParticle = miscModule.buildParticleFromRawData( nuclideModule.particle, compoundNameIndex, nucleus = nucleus, mass=(cMass,'amu') )
#print PoPs_data.toXML()
PoPs_data.add(compoundParticle)
# if verbose: print PoPs_data.toXML()
# Create background ReichMoore cross section (zero to start with)
MT_capture = 102
# label = 'capture'
label = rrcap
capture = zeroReaction(label,MT_capture, Q, [gammaParticle,compoundParticle], 'damping', emin,emax,'MeV', debug)
# MTchannels.append((rrcap, capture, gchannelName,None,'photon'))
MTchannels.append((rrcap, (label,MT_capture, Q, gammaParticle,compoundParticle, emin,emax), gchannelName,None,'photon'))
# After making all the channels, and gnd is generated for the elastic channel, now add them to gnd
p,tex = elastics
gnd = reactionSuiteModule.reactionSuite( p, tex, 'fresco R-matrix fit', PoPs = PoPs_data, style = style, interaction='nuclear')
for rr,reacInfo,channelName,prmax,p in MTchannels:
# Get zero background cross section and link to it
#reaction,channelName,prmax = MTchannels[rr]
rr,MT, QI, pn,tn, emi,ema = reacInfo
reaction = zeroReaction(rr, MT, QI - Q_offset, [pn,tn], None, emi,ema,'MeV', debug)
gnd.reactions.add(reaction)
eliminated = channelName == gchannelName
reactionLink = linkModule.link(reaction)
computeShiftFactor = BC != resolvedResonanceModule.BoundaryCondition.EliminateShiftFunction and not eliminated
computePenetrability = not eliminated # Should be False also for fission channels (but they are not specified yet) TEMPORARY
rreac = commonResonanceModule.resonanceReaction ( label=rr, reactionLink=reactionLink, ejectile=p,
computePenetrability=computePenetrability,
computeShiftFactor=computeShiftFactor, Q=None, eliminated=eliminated )
if prmax is not None and prmax != Rm_global:
rreac.scatteringRadius = scatteringRadiusModule.scatteringRadius(
constantModule.constant1d(prmax, domainMin=emin, domainMax=emax,
axes=axesModule.axes(labelsUnits={1: ('energy_in', 'MeV'), 0: ('radius', 'fm')})) )
resonanceReactions.add(rreac)
if debug: print("RR <"+rr+"> is "+channelName)
if cm2lab<1e-5:
print("Missed elastic channel for cm2lab factor!")
raise SystemExit
if Lvals is not None:
print("Remake channels in each pair for L values up to ",Lvals)
# Now read and collate the reduced channel partial waves and their reduced width amplitudes
# next we have NJS spin groups, each containing channels and resonances
spinGroups = resolvedResonanceModule.spinGroups()
JpiList = []
for i in range(0,len(variables)):
v = variables[i]
if v['kind']==3:
pi = v['par']
J = v['jtot']
Jpi = J,pi
if Jpi not in JpiList: JpiList.append(Jpi)
if debug: print(" List of Jpi",JpiList)
NJS = len(JpiList)
JpiMissing = []
frac = J-int(J) # to fix whether integer or half-integer spins!
NJ = int(jtmax-frac+0.1)+1
for i in range(NJ):
J = frac + i
for pi in [-1,1]:
if (J,pi) not in JpiList: JpiMissing.append( (J,pi) )
NJM = len(JpiMissing)
if NJM>0: print("Spin-parity groups with no poles:",JpiMissing)
kvar = 0 # ; ivar2G = {}; G2ivar = [];
kvarData = []
# if debug: print(resonanceReactions.toXML())
for spinGroupIndex in range(NJS+NJM):
J,piv = JpiList [ spinGroupIndex ] if spinGroupIndex < NJS else JpiMissing[spinGroupIndex-NJS]
JJ = resolvedResonanceModule.spin( J )
pi= resolvedResonanceModule.spin( piv)
x = (1-pi)//2
if verbose: print('\n',spinGroupIndex,': J,pi,x =',JJ,pi,x)
# Previously we got channel quantum numbers from looking at which combinations have poles.
# But this is not good from physics, as we have to be careful to cater for channels even without poles.
# So now (Oct 9, 2017) I re-organize how to make list of channels.
#
chans = set()
itc = 0
for rreac in resonanceReactions:
if not rreac.eliminated:
icch=0; iach=0
for ic in range(len(rrc)): # find icch,iach for this reaction pair
for ia in range(len(rrc[ic])):
if rreac.label==rrc[ic][ia]:
icch=ic+1; iach=ia+1
if debug: print(" pair:",rreac.label," at ic,ia",icch,iach)
p = rreac.ejectile
t = rreac.residual
projectile = PoPs_data[p];
target = PoPs_data[t];
if hasattr(projectile, 'nucleus'): projectile = projectile.nucleus
if hasattr(target, 'nucleus'): target = target.nucleus
jp,pt = projectile.spin[0].float('hbar'), projectile.parity[0].value
jt,tt = target.spin[0].float('hbar'), target.parity[0].value
smin = abs(jt-jp)
smax = jt+jp
s2min = int(2*smin+0.5)
s2max = int(2*smax+0.5)
for s2 in range(s2min,s2max+1,2):
sch = s2*0.5
lmin = int(abs(sch-JJ) +0.5)
lmax = int(sch+JJ +0.5)
if Lvals is not None: lmax = min(lmax,Lvals[itc])
for lch in range(lmin,lmax+1):
if pi != pt*tt*(-1)**lch: continue
chans.add((icch,iach,lch,sch))
if debug: print(' Partial wave channels IC,IA,L,S:',icch,iach,lch,sch)
itc += 1
channelList = sorted(chans)
NCH = len(channelList)
if debug: print(' channels =',chans,' (',NCH,')')
if debug: print(' channelList =',channelList,' (',NCH,')')
columnHeaders = [ tableModule.columnHeader(0, name="energy", unit="MeV") ]
width_units = 'MeV' ## 'MeV**{1/2}' if amplitudes else 'MeV' # wrong units given to GND: correct later if needed
channelNames = []
channels = resolvedResonanceModule.channels()
firstp =1
if damped:
columnHeaders.append( tableModule.columnHeader(1, name=gchannelName, unit= width_units) )
Sch = resolvedResonanceModule.spin( 0.0 )
channels.add( resolvedResonanceModule.channel('1', rrcap, columnIndex=1, L=0, channelSpin=Sch) )
firstp = 2
for chidx in range(NCH):
icch,iach,lch,sch = channelList[chidx]
rr = rrc[icch-1][iach-1]
if debug: print("From ic,ia =",icch,iach," find channel ",rr)
thisChannel = resonanceReactions[rr]
channelName = "%s width" % thisChannel.label
jdx = 2
while True:
if channelName not in channelNames:
channelNames.append( channelName ); break
channelName = '%s width_%d' % (thisChannel.label, jdx)
jdx += 1
columnHeaders.append( tableModule.columnHeader(chidx+firstp, name=channelName, unit= width_units) )
Sch = resolvedResonanceModule.spin( sch )
channels.add( resolvedResonanceModule.channel(str(chidx+firstp), rr, columnIndex=chidx+firstp, L=lch, channelSpin=Sch) )
if debug: print(str(chidx), str(chidx), int(lch), float(sch), chidx+firstp)
terms = set() # for this J,pi spin group
damping = {}
for i in range(0,len(variables)):
v = variables[i]
ivare = v.get('ivar',None)
if ivare is not None and ivare!=i+1 and Covariances is not None:
print("Variable namelists out of order. Expect %i but found %i" % (i+1,ivare))
if v['kind']==3:
Jv = v['jtot']
if Jv==J and v['par']==pi:
term = v['term']
terms.add((term,v['energy'],ivare))
if damping.get(term,None) is None: damping[term] = 0.0
try: d = v['damp']
except: d = 0.
damping[term] += d
if debug: print(i,':',v,'for term',term,' damping',d)
if v['kind']==7:
term = v['term']
if damping.get(term,None) is None: damping[term] = 0.0
try: d = v['damp']
except: d = 0.
damping[term] += d
if debug: print(i,':',v,'for term',term,' damping',d)
terms = sorted(terms)
if debug: print(' terms =',terms)
resonances = []
for term,energy,ivare in terms:
# G2ivar.append(ivare) # energies come before widths, in GNDS
kvar += 1 # ; ivar2G[ivare] = kvar
if debug: print('Jpi',JJ,pi,'kvar=',kvar,'for E=',energy)
kvarData.append([J,piv,'E',energy])
energy += Q_offset
row = [energy*cm2lab]
if damped:
damp = damping.get(term,0.0)*cm2lab
row.append(damp)
kvar += 1 # ; ivar2G[ivare] = kvar
kvarData.append([JJ,piv,'d',damp])
if debug: print('kvar=',kvar,'for damping=',damp)
else:
damp = 0.0
for ch in channelList:
found = False
ic,ia,lch,sch = ch
for i in range(0,len(variables)):
v = variables[i]
#print v['kind'],4 , v['term'],term , v['icch'],ch[0] , v['iach'],ch[1] , v['lch'],ch[2] , v['sch'],ch[3]
if v['kind']==4 and v['term']==term and v['icch']==ic and v['iach']==ia and v['lch']==lch and v['sch']==sch:
#print ' After ch',ch,' widths =',v['width']
p1,p2 = p1p2[(ic,ia)]
phaz = p1 + p2 + lch - x
if phaz % 2 == 1: # should be even. If not: stop
print('Error: ic,ia,p1,p2,lch,x,phaz=',ic,ia,p1,p2,lch,x,phaz)
sys.exit(1)
phase = (-1)**(phaz//2)
w = v['width'] * phase # convert to phase from Fresco, which has explicit i^L Huby phases.
if debug: print(' E= %.3f MeV, damp=%.2e width=%.4e l=%i S=%.1f p1,p2,phaz,s=%i,%i,%i: %i %i' % (energy, damp, w,lch,sch,p1,p2,x,phaz,phase))
try:
is_rwa = v['rwa']
except:
is_rwa = True # same as Frescox
if is_rwa != amplitudes: # fix to give correct output: rwa or formal width
rr = rr = rrc[ch[0]-1][ch[1]-1]
pMass,tMass,pZ,tZ,QI,prmax = ZAdict[ rr ]
e_ch = energy + QI - Q_offset
penetrability,shift,dSdE,W = getCoulomb_PSdSW(
abs(e_ch),lch, prmax, pMass,tMass,pZ,tZ, fmscal,etacns, False) # CWF at abs(e_ch)
if debug: print('p,t =',p,tex,'QI=',QI,': call coulombPenetrationFactor(L=',lch,'e_ch=',e_ch,') =',penetrability,dSdE,W)
# find gamma or Gamma_formal from the G_obs in the AZR input
# Gamma_formal = G_obs * shifty_denom
# gamma = sqrt(Gamma_formal/(2*P))
if amplitudes: # GND to have rwa from Gammaf
width = ( abs(w) /(2. * penetrability) ) **0.5
if w < 0: width = -width
if debug: print(" Converting Gammaf",w," to rwa",width)
else: # GND to have Gammaf from rwa
width = 2.0 * w*w * penetrability
if w < 0: width = -width
if debug: print(" Converting rwa",w," to Gammaf",width,'ampl:',amplitudes)
else:
width = w
width *= cm2lab**0.5 if amplitudes else cm2lab
if nonzero is not None and abs(width)<1e-20:
print('Change',width,'to',nonzero)
width = nonzero
# else:
# print('No change',width,'to',nonzero,'as',abs(width),1e-20,abs(width)<1e-20)
row.append(width)
found | |
self.sum += answer
if answer > self.max:
self.max = answer
if answer < self.min:
self.min = answer
self.average = self.sum / float(self.cnt)
# -------------------------------------------------------------------------
def advancedResults(self):
try:
from numpy import array
except:
current.log.error("ERROR: S3Survey requires numpy library installed.")
array = array(self.valueList)
self.std = array.std()
self.mean = array.mean()
self.zscore = {}
for answer in self.answerList:
complete_id = answer["complete_id"]
try:
value = self.castRawAnswer(complete_id, answer["value"])
except:
continue
if value != None:
self.zscore[complete_id] = (value - self.mean) / self.std
# -------------------------------------------------------------------------
def priority(self, complete_id, priorityObj):
priorityList = priorityObj.range
priority = 0
try:
zscore = self.zscore[complete_id]
for limit in priorityList:
if zscore <= limit:
return priority
priority += 1
return priority
except:
return -1
# -------------------------------------------------------------------------
def priorityBand(self, priorityObj):
priorityList = priorityObj.range
priority = 0
band = [""]
cnt = 0
for limit in priorityList:
value = int(self.mean + limit * self.std)
if value < 0:
value = 0
priorityList[cnt] = - self.mean / self.std
band.append(value)
cnt += 1
return band
# -------------------------------------------------------------------------
def chartButton(self, series_id):
# At the moment only draw charts for integers
if self.qstnWidget.get("Format", "n") != "n":
return None
if len(self.valueList) < self.histCutoff:
return None
return S3AbstractAnalysis.chartButton(self, series_id)
# -------------------------------------------------------------------------
def drawChart(self, series_id, output="xml",
data=None, label=None, xLabel=None, yLabel=None):
chartFile = self.getChartName(series_id)
cached = S3Chart.getCachedFile(chartFile)
if cached:
return cached
chart = S3Chart(path=chartFile)
chart.asInt = True
if data == None:
chart.survey_hist(self.qstnWidget.question.name,
self.valueList,
10,
0,
self.max,
xlabel = self.qstnWidget.question.name,
ylabel = current.T("Count")
)
else:
chart.survey_bar(self.qstnWidget.question.name,
data,
label,
[]
)
image = chart.draw(output=output)
return image
# -------------------------------------------------------------------------
def filter(self, filterType, groupedData):
filteredData = {}
if filterType == "Sum":
for (key, valueList) in groupedData.items():
sum = 0
for value in valueList:
try:
sum += self.castRawAnswer(None, value)
except:
pass
filteredData[key] = sum
return filteredData
return groupedData
# =============================================================================
class S3OptionAnalysis(S3AbstractAnalysis):
# -------------------------------------------------------------------------
def summary(self):
T = current.T
for (key, value) in self.listp.items():
self.result.append((T(key), value))
return self.format()
# -------------------------------------------------------------------------
def basicResults(self):
self.cnt = 0
self.list = {}
for answer in self.valueList:
self.cnt += 1
if answer in self.list:
self.list[answer] += 1
else:
self.list[answer] = 1
self.listp = {}
if self.cnt != 0:
for (key, value) in self.list.items():
self.listp[key] = "%3.1f%%" % round((100.0 * value) / self.cnt,1)
# -------------------------------------------------------------------------
def drawChart(self, series_id, output="xml",
data=None, label=None, xLabel=None, yLabel=None):
chartFile = self.getChartName(series_id)
cached = S3Chart.getCachedFile(chartFile)
if cached:
return cached
chart = S3Chart(path=chartFile)
data = []
label = []
for (key, value) in self.list.items():
data.append(value)
label.append(key)
chart.survey_pie(self.qstnWidget.question.name,
data,
label)
image = chart.draw(output=output)
return image
# =============================================================================
class S3OptionYNAnalysis(S3OptionAnalysis):
# -------------------------------------------------------------------------
def summary(self):
T = current.T
self.result.append((T("Yes"), self.yesp))
self.result.append((T("No"), self.nop))
return self.format()
# -------------------------------------------------------------------------
def basicResults(self):
S3OptionAnalysis.basicResults(self)
T = current.T
if "Yes" in self.listp:
self.yesp = self.listp["Yes"]
else:
if self.cnt == 0:
self.yesp = "" # No replies so can't give a percentage
else:
self.list["Yes"] = 0
self.yesp = T("0%")
if "No" in self.listp:
self.nop = self.listp["No"]
else:
if self.cnt == 0:
self.nop = "" # No replies so can't give a percentage
else:
self.list["No"] = 0
self.nop = T("0%")
# =============================================================================
class S3OptionYNDAnalysis(S3OptionAnalysis):
# -------------------------------------------------------------------------
def summary(self):
T = current.T
self.result.append((T("Yes"), self.yesp))
self.result.append((T("No"), self.nop))
self.result.append((T("Don't Know"), self.dkp))
return self.format()
# -------------------------------------------------------------------------
def basicResults(self):
S3OptionAnalysis.basicResults(self)
T = current.T
if "Yes" in self.listp:
self.yesp = self.listp["Yes"]
else:
if self.cnt == 0:
self.yesp = "" # No replies so can't give a percentage
else:
self.list["Yes"] = 0
self.yesp = T("0%")
if "No" in self.listp:
self.nop = self.listp["No"]
else:
if self.cnt == 0:
self.nop = "" # No replies so can't give a percentage
else:
self.list["No"] = 0
self.nop = T("0%")
if "Don't Know" in self.listp:
self.dkp = self.listp["Don't Know"]
else:
if self.cnt == 0:
self.dkp = "" # No replies so can't give a percentage
else:
self.list["Don't Know"] = 0
self.dkp = T("0%")
# =============================================================================
class S3OptionOtherAnalysis(S3OptionAnalysis):
pass
# =============================================================================
class S3MultiOptionAnalysis(S3OptionAnalysis):
# -------------------------------------------------------------------------
def castRawAnswer(self, complete_id, answer):
"""
Used to modify the answer from its raw text format.
Where necessary, this function will be overridden.
"""
valueList = current.s3db.survey_json2list(answer)
return valueList
# -------------------------------------------------------------------------
def basicResults(self):
self.cnt = 0
self.list = {}
for answer in self.valueList:
if isinstance(answer, list):
answerList = answer
else:
answerList = [answer]
self.cnt += 1
for answer in answerList:
if answer in self.list:
self.list[answer] += 1
else:
self.list[answer] = 1
self.listp = {}
if self.cnt != 0:
for (key, value) in self.list.items():
self.listp[key] = "%s%%" %((100 * value) / self.cnt)
# -------------------------------------------------------------------------
def drawChart(self, series_id, output="xml",
data=None, label=None, xLabel=None, yLabel=None):
chartFile = self.getChartName(series_id)
cached = S3Chart.getCachedFile(chartFile)
if cached:
return cached
chart = S3Chart(path=chartFile)
data = []
label = []
for (key, value) in self.list.items():
data.append(value)
label.append(key)
chart.survey_bar(self.qstnWidget.question.name,
data,
label,
None
)
image = chart.draw(output=output)
return image
# =============================================================================
class S3LocationAnalysis(S3AbstractAnalysis):
"""
Widget for analysing Location type questions
The analysis will compare the location values provided with
data held on the gis_location table.
The data held can be in its raw form (the actual value imported) or
in a more refined state, which may include the actual location id
held on the database or an alternative value which is a string.
The raw value may be a local name for the place whilst the altervative
value should be the actual value held on the database.
The alternative value is useful for matching duplicate responses that
are using the same local name.
"""
# -------------------------------------------------------------------------
def castRawAnswer(self, complete_id, answer):
"""
Convert the answer for the complete_id into a database record.
This can have one of three type of return values.
A single record: The actual location
Multiple records: The set of location, on of which is the location
None: No match is found on the database.
"""
records = self.qstnWidget.getLocationRecord(complete_id, answer)
return records
# -------------------------------------------------------------------------
def summary(self):
"""
Returns a summary table
"""
T = current.T
self.result.append((T("Known Locations"), self.kcnt))
self.result.append((T("Duplicate Locations"), self.dcnt))
self.result.append((T("Unknown Locations"), self.ucnt))
return self.format()
# -------------------------------------------------------------------------
def count(self):
"""
Returns a table of basic results
"""
T = current.T
self.result.append((T("Total Locations"), len(self.valueList)))
self.result.append((T("Unique Locations"), self.cnt))
return self.format()
# -------------------------------------------------------------------------
def basicResults(self):
"""
Calculate the basic results, which consists of a number of list
related to the locations
LISTS (dictionaries)
====================
All maps are keyed on the value used in the database lookup
locationList - holding the number of times the value exists
complete_id - a list of complete_id at this location
duplicates - a list of duplicate records
known - The record from the database
Calculated Values
=================
cnt - The number of unique locations
dcnt - The number of locations with duplicate values
kcnt - The number of known locations (single match on the database)
ucnt - The number of unknown locations
dper - The percentage of locations with duplicate values
kper - The percentage of known locations
NOTE: Percentages are calculated from the unique locations
and not from the total responses.
"""
self.locationList = {}
self.duplicates = {}
self.known = {}
self.complete_id = {}
for answer in self.valueList:
if answer != None:
key = answer.key
if key in self.locationList:
self.locationList[key] += 1
else:
self.locationList[key] = 1
if key in self.complete_id:
self.complete_id[key].append(answer.complete_id)
else:
self.complete_id[key] = [answer.complete_id]
result = answer.records
if len(result) > 1:
self.duplicates[key] = result
if len(result) == 1:
self.known[key] = result[0]
self.cnt = len(self.locationList)
self.dcnt = len(self.duplicates)
self.kcnt = len(self.known)
if self.cnt == 0:
self.dper = "0%%"
self.kper = "0%%"
else:
self.dper = "%s%%" %((100 * self.dcnt) / self.cnt)
self.kper = "%s%%" %((100 * self.kcnt) / self.cnt)
self.ucnt = self.cnt - self.kcnt - self.dcnt
# -------------------------------------------------------------------------
def chartButton(self, series_id):
"""
Ensures that no button is set up
"""
return None
# -------------------------------------------------------------------------
def uniqueCount(self):
"""
Calculate the number of occurances of each value
"""
map = {}
for answer in self.valueList:
if answer.key in map:
map[answer.key] += 1
else:
map[answer.key] = 1
return map
# =============================================================================
class S3LinkAnalysis(S3AbstractAnalysis):
def __init__(self,
type,
question_id,
answerList
):
S3AbstractAnalysis.__init__(self, type, question_id, answerList)
linkWidget = S3QuestionTypeLinkWidget(question_id)
parent = linkWidget.get("Parent")
relation = linkWidget.get("Relation")
type = linkWidget.get("Type")
parent_qid = linkWidget.getParentQstnID()
valueMap = {}
for answer in self.answerList:
complete_id = answer["complete_id"]
parent_answer = linkWidget.loadAnswer(complete_id,
parent_qid,
forceDB=True
)
if relation == | |
def __r2d2(self, win, waitc, timeout):
"""Read and Display 3270 Terminal
"""
# See https://en.wikipedia.org/wiki/Table_of_keyboard_shortcuts
# when assigning keyboard shortcuts.
show = (waitc != _WAIT_GOTO)
if waitc == _WAIT_NONE:
timeout = 0
elif waitc == _WAIT_FOREVER:
timeout = -1
elif waitc == _WAIT_GOTO:
timeout = -1
elif timeout <= 0:
raise ValueError("Expected timeout>0")
insmode = False
if show:
self.__prog_curs_vis = 0
else:
self.__prog_curs_vis = 1
tout = timeout
if timeout > 0:
etime = time.time() + timeout
self.__refresh()
refresh = False
paste = None
try:
while True:
if self.cmdqueue: # plugin added cmd to process?
self.shell_mode()
return 1 # timeout?
cstr = self.__tty_read(win, 0, refresh=refresh)
refresh = None
if (cstr == "" and # session change
(waitc == _WAIT_KEYLOCK or
waitc == _WAIT_SCREEN)):
# TODO be more specific about
# keylock and screen change
return 1 # condition sastisfied
if not cstr:
tns = ati.ati.get_tnz()
if not tns:
return 12
if tns.seslost:
return 12
if tns.ddmdata or tns.ddmdict:
return 10 # have ddmdata
if self.downloadaction:
self.downloadaction = False
return 11 # have download action
if ((self.rewrite or
self.__dirty_ranges or
self.rewrite_cursor or
self.rewrite_keylock or
self.rewrite_status)):
self.__display(win, not show)
# show cursor
if not show:
begx, begy = self.twin_beg
endx, endy = self.twin_end
currow = tns.curadd // tns.maxcol + 1
curcol = tns.curadd % tns.maxcol + 1
if (currow > begy and
currow <= endy and
curcol > begx and
curcol <= endx):
if insmode:
self.__prog_curs_vis = 2 # very visible
else:
self.__prog_curs_vis = 1 # visible
xpos, ypos = self.twin_loc
_logger.debug("before win.move")
win.move(ypos+currow-1, xpos+curcol-1)
_logger.debug("after win.move")
if self.cmdqueue: # plugin added cmd to process?
self.shell_mode()
return 1 # timeout?
cstr = self.__tty_read(win, tout)
if waitc in (_WAIT_KEYLOCK, _WAIT_SCREEN):
if cstr == "":
# TODO be more specific about
# keylock and screen change
return 1 # condition sastisfied
if cstr is None:
return 0 # timeout
tns = ati.ati.get_tnz()
if not tns:
return 12 # seslost
if tns.seslost:
return 12 # seslost
if tns.ddmdata or tns.ddmdict:
return 10 # have ddmdata
if self.downloadaction:
self.downloadaction = False
return 11 # have download action
# check for Alt+letter shortcut
altc = 0
if isinstance(cstr, str):
if ((cstr and cstr.startswith('\x1b') and
len(cstr) == 2 and
cstr.lower() == cstr) or
(cstr and cstr.startswith("ALT_") and
len(cstr) == 5 and
str.isalpha(cstr[-1]))):
maxcol = tns.maxcol
alet1 = cstr[-1].lower()
alet2 = alet1.upper()
elet1 = tns.codec_info[0].encode(alet1)[0][0]
elet2 = tns.codec_info[0].encode(alet2)[0][0]
for i in range(0, maxcol):
if ((tns.plane_dc[i] != elet1 and
tns.plane_dc[i] != elet2)):
continue
exn = tns.plane_eh[i]
if (exn & 0x0C0) == 0x0C0: # if underline
altc = i+1
break
# process input
if not cstr: # session update
pass
elif cstr is True:
# maybe resize?
(maxy, maxx) = win.getmaxyx()
(columns, lines) = os.get_terminal_size()
if (maxy != lines) or (maxx != columns):
win.clear()
win.noutrefresh()
win.resize(lines, columns)
self.rewrite = True
elif cstr == "\x1b" or cstr == "KEY_ESC": # ESC
_logger.debug("keyed Esc")
return cstr
elif cstr == "KEY_RESIZE":
_logger.warning("KEY_RESIZE")
self.rewrite = True
try:
curses.resize_term(0, 0) # hack for Windows
except Exception:
pass
(maxy, maxx) = win.getmaxyx()
(columns, lines) = os.get_terminal_size()
if (maxy != lines) or (maxx != columns):
win.resize(lines, columns)
win.erase()
win.noutrefresh()
elif show:
curses.flash()
elif cstr.startswith("\x1b[200~"): # bracketed paste
paste = cstr[6:]
if paste.endswith("\x1b[201~"):
paste = paste[:-6]
if paste:
if tns.pwait or tns.system_lock_wait:
curses.flash()
curses.beep()
else:
self.__paste_data(tns, paste)
paste = None
elif cstr.endswith("\x1b[201~"): # bracketed paste end
paste += cstr[:-6]
if paste:
if tns.pwait or tns.system_lock_wait:
curses.flash()
curses.beep()
else:
self.__paste_data(tns, paste)
paste = None
elif paste is not None: # more bracketed paste data
paste += cstr
elif cstr == "KEY_SIC":
_logger.debug("keyed Shift+Insert")
if tns.pwait or tns.system_lock_wait:
curses.flash()
curses.beep()
elif _osname != "Windows":
curses.flash()
curses.beep()
else:
paste = None
fmt = 13 # CF_UNICODETEXT
k32 = ctypes.windll.kernel32
k32.GlobalLock.argtypes = [ctypes.c_void_p]
k32.GlobalLock.restype = ctypes.c_void_p
k32.GlobalUnlock.argtypes = [ctypes.c_void_p]
u32 = ctypes.windll.user32
u32.GetClipboardData.restype = ctypes.c_void_p
u32.OpenClipboard(0)
try:
if u32.IsClipboardFormatAvailable(fmt):
data = u32.GetClipboardData(fmt)
data_locked = k32.GlobalLock(data)
paste = ctypes.wstring_at(data_locked)
k32.GlobalUnlock(data_locked)
finally:
u32.CloseClipboard()
if paste:
self.__paste_data(tns, paste)
paste = None
elif cstr == "\x0c": # Ctrl+L
_logger.debug("keyed Ctrl+L")
self.rewrite = True
(maxy, maxx) = win.getmaxyx()
(columns, lines) = os.get_terminal_size()
if (maxy != lines) or (maxx != columns):
# assume Ctrl+L is for size change
win.resize(lines, columns)
win.erase()
win.noutrefresh()
if tns.pwait or tns.system_lock_wait:
curses.flash()
curses.beep()
else:
tns.clear()
elif len(cstr) == 1 and cstr.isprintable():
keylock = ati.value("KEYLOCK", trace=False)
if keylock == "1":
curses.flash()
else:
if insmode:
tns.key_ins_data(cstr, zti=self)
else:
self.__key_data(tns, cstr)
elif cstr == "\r":
_logger.debug("keyed Enter")
if tns.pwait or tns.system_lock_wait:
curses.flash()
curses.beep()
else:
tns.enter()
self.rewrite_keylock = True
elif cstr == "\n":
_logger.debug("keyed Shift+Enter")
tns.key_newline()
elif cstr == "\t":
_logger.debug("keyed Tab")
tns.key_tab(zti=self)
self.rewrite_cursor = True
elif (cstr == "\b" or
cstr == "KEY_BACKSPACE" or
cstr == "\x7f"):
_logger.debug("keyed Backspace")
tns.key_backspace(zti=self)
self.rewrite_cursor = True
elif (cstr == "\x0b" or # Ctrl+K
cstr == "\x1b[4~" or # Shift+End
cstr == "\x1b[F" or # Shift+End
cstr == "KEY_SEND"): # Shift+End
_logger.debug("keyed Shift+End or Ctrl+K")
tns.key_eraseeof(zti=self)
elif cstr == "KEY_END": # End
_logger.debug("keyed End")
tns.key_end()
self.rewrite_cursor = True
elif (cstr == "\x1b[1~" or
cstr == "\x1b H" or
cstr == "KEY_HOME"):
_logger.debug("keyed Home")
tns.key_home(zti=self)
self.rewrite_cursor = True
elif cstr == "KEY_DC":
_logger.debug("keyed Delete")
tns.key_delete(zti=self)
elif (cstr == "KEY_BTAB" or # Shift+Tab
cstr == "\x1b[~"): # Shift+Tab Windows->ssh
_logger.debug("keyed Shift+Tab")
tns.key_backtab(zti=self)
self.rewrite_cursor = True
elif altc > 0:
if tns.pwait or tns.system_lock_wait:
curses.flash()
curses.beep()
else:
tns.set_cursor_position(1, altc)
tns.enter()
self.rewrite_keylock = True
self.rewrite_cursor = True
elif cstr == "KEY_PPAGE": # PgUp
_logger.debug("keyed PgUp")
if tns.pwait or tns.system_lock_wait:
curses.flash()
curses.beep()
else:
tns.pf7()
self.rewrite_keylock = True
elif cstr == "KEY_NPAGE": # PgDn
_logger.debug("keyed PgDn")
if tns.pwait or tns.system_lock_wait:
curses.flash()
curses.beep()
else:
tns.pf8()
self.rewrite_keylock = True
elif cstr == "KEY_UP":
_logger.debug("keyed Up")
tns.key_curup(zti=self)
self.rewrite_cursor = True
elif cstr == "KEY_DOWN":
_logger.debug("keyed Dn")
tns.key_curdown(zti=self)
self.rewrite_cursor = True
elif cstr == "KEY_LEFT":
_logger.debug("keyed Left")
tns.key_curleft(zti=self)
self.rewrite_cursor = True
elif cstr == "KEY_RIGHT":
_logger.debug("keyed Right")
tns.key_curright(zti=self)
self.rewrite_cursor = True
elif cstr in ("\x1b\x1b[D", # Alt+LEFT
"\x1bb", # Alt+LEFT (Terminal.app)
"\x1b[1;3D"): # Alt+LEFT (Windows)
_logger.debug("keyed Alt+Left")
tns.key_word_left()
self.rewrite_cursor = True
elif cstr in ("\x1b\x1b[C", # Alt+RIGHT
"\x1bf", # Alt+RIGHT (Terminal.app)
"\x1b[1;3C"): # Alt+RIGHT (Windows)
_logger.debug("keyed Alt+Right")
tns.key_word_right()
self.rewrite_cursor = True
elif cstr == "KEY_IC":
_logger.debug("keyed Insert")
insmode = (not insmode)
if insmode:
self.__prog_curs_vis = 2 # very visible
else:
self.__prog_curs_vis = 1 # visible
elif (cstr == "\x1b1" or # ESC+1 (Alt+1)
cstr == "ALT_1" or # ESC+1 (Alt+1)
cstr == "\x1bKEY_IC" or # ESC+Insert (Alt+Insert)
cstr == "ALT_INS"): # Alt+Insert
_logger.debug("keyed Alt+1 or Alt+Insert")
if tns.pwait or tns.system_lock_wait:
curses.flash()
curses.beep()
else:
tns.pa1()
self.rewrite_keylock = True
elif (cstr == "\x1b2" or # ESC+2 (Alt+2)
cstr == "ALT_2" or # ESC+2 (Alt+2)
cstr == "\x1b\x1b[1~" or # ESC+Home (Alt+Home)
cstr == "ALT_HOME"): # Alt+Home
_logger.debug("keyed Alt+2 or Alt+Home")
if tns.pwait or tns.system_lock_wait:
curses.flash()
curses.beep()
else:
tns.pa2()
self.rewrite_keylock = True
elif (cstr == "\x1b3" or # ESC+3 (Alt+3)
cstr == "ALT_3"): # ESC+3 (Alt+3)
_logger.debug("keyed Alt+3")
if tns.pwait or tns.system_lock_wait:
curses.flash()
curses.beep()
else:
tns.pa3()
self.rewrite_keylock = True
elif (cstr == "\x1ba" or # ESC+a (Alt+A)
cstr == "ALT_A" or # ESC+a (Alt+A)
cstr == "\x03"): # Ctrl+C
_logger.debug("keyed Alt+A or Ctrl+C")
tns.attn()
elif (cstr == "\x1bc" or # ESC+c (Alt+c)
cstr == "ALT_C"): # ESC+c (Alt+c)
_logger.debug("keyed Alt+C")
if tns.pwait or tns.system_lock_wait:
curses.flash()
curses.beep()
else:
tns.clear()
self.rewrite = True
self.rewrite_keylock = True
elif (cstr == "KEY_F(1)" or
cstr == "\x1b[11~"):
_logger.debug("keyed F1")
if tns.pwait or tns.system_lock_wait:
curses.flash()
curses.beep()
else:
tns.pf1()
self.rewrite_keylock = True
elif (cstr == "KEY_F(2)" or
cstr == "\x1b[12~"):
_logger.debug("keyed F2")
if tns.pwait or tns.system_lock_wait:
curses.flash()
curses.beep()
else:
tns.pf2()
self.rewrite_keylock = True
elif (cstr == "KEY_F(3)" or
cstr == "\x1b[13~"):
_logger.debug("keyed F3")
if tns.pwait or tns.system_lock_wait:
curses.flash()
curses.beep()
else:
tns.pf3()
self.rewrite_keylock = True
elif (cstr == "KEY_F(4)" or
cstr == "\x1b[14~"):
_logger.debug("keyed F4")
if tns.pwait or tns.system_lock_wait:
curses.flash()
curses.beep()
else:
tns.pf4()
self.rewrite_keylock = True
elif cstr == "KEY_F(5)":
_logger.debug("keyed F5")
if tns.pwait or tns.system_lock_wait:
curses.flash()
curses.beep()
else:
tns.pf5()
self.rewrite_keylock = True
| |
{'status': fields.GroupStatus.ERROR_DELETING}
with mock.patch.object(
self.rest, 'delete_storage_group',
side_effect=exception.VolumeBackendAPIException):
model_update, __ = self.common._delete_group(
group, volumes)
self.assertEqual(ref_model_update, model_update)
@mock.patch.object(
common.PowerMaxCommon, '_get_clone_vol_info',
return_value=(tpd.PowerMaxData.device_id,
tpd.PowerMaxData.extra_specs, 1, 'tgt_vol'))
@mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type',
return_value=True)
@mock.patch.object(volume_utils, 'is_group_a_type',
return_value=False)
@mock.patch.object(common.PowerMaxCommon, 'get_volume_metadata',
return_value='')
def test_create_group_from_src_success(self, mck_meta, mock_type,
mock_cg_type, mock_info):
ref_model_update = {'status': fields.GroupStatus.AVAILABLE}
model_update, volumes_model_update = (
self.common.create_group_from_src(
None, self.data.test_group_1, [self.data.test_volume],
self.data.test_group_snapshot_1, [], None, []))
self.assertEqual(ref_model_update, model_update)
@mock.patch.object(
common.PowerMaxCommon, '_remove_vol_and_cleanup_replication')
@mock.patch.object(
masking.PowerMaxMasking, 'remove_volumes_from_storage_group')
def test_rollback_create_group_from_src(
self, mock_rm, mock_clean):
rollback_dict = {
'target_group_name': self.data.target_group_name,
'snap_name': 'snap1', 'source_group_name': 'src_grp',
'volumes': (self.data.device_id, self.data.extra_specs,
self.data.test_volume),
'device_ids': [self.data.device_id],
'interval_retries_dict': self.data.extra_specs}
for x in range(0, 2):
self.common._rollback_create_group_from_src(
self.data.array, rollback_dict)
self.assertEqual(2, mock_rm.call_count)
def test_get_snap_src_dev_list(self):
src_dev_ids = self.common._get_snap_src_dev_list(
self.data.array, [self.data.test_snapshot])
ref_dev_ids = [self.data.device_id]
self.assertEqual(ref_dev_ids, src_dev_ids)
def test_get_clone_vol_info(self):
ref_dev_id = self.data.device_id
source_vols = [self.data.test_volume,
self.data.test_attached_volume]
src_snapshots = [self.data.test_snapshot]
src_dev_id1, extra_specs1, vol_size1, tgt_vol_name1 = (
self.common._get_clone_vol_info(
self.data.test_clone_volume, source_vols, []))
src_dev_id2, extra_specs2, vol_size2, tgt_vol_name2 = (
self.common._get_clone_vol_info(
self.data.test_clone_volume, [], src_snapshots))
self.assertEqual(ref_dev_id, src_dev_id1)
self.assertEqual(ref_dev_id, src_dev_id2)
def test_get_attributes_from_cinder_config_new_and_old(self):
kwargs_expected = (
{'RestServerIp': '1.1.1.1', 'RestServerPort': 8443,
'RestUserName': 'smc', 'RestPassword': '<PASSWORD>', 'SSLVerify': False,
'SerialNumber': self.data.array, 'srpName': 'SRP_1',
'PortGroup': self.data.port_group_name_i})
old_conf = tpfo.FakeConfiguration(None, 'CommonTests', 1, 1)
configuration = tpfo.FakeConfiguration(
None, 'CommonTests', 1, 1, san_ip='1.1.1.1', san_login='smc',
vmax_array=self.data.array, vmax_srp='SRP_1', san_password='<PASSWORD>',
san_api_port=8443, vmax_port_groups=[self.data.port_group_name_i])
self.common.configuration = configuration
kwargs_returned = self.common.get_attributes_from_cinder_config()
self.assertEqual(kwargs_expected, kwargs_returned)
self.common.configuration = old_conf
kwargs = self.common.get_attributes_from_cinder_config()
self.assertIsNone(kwargs)
def test_get_attributes_from_cinder_config_with_port(self):
kwargs_expected = (
{'RestServerIp': '1.1.1.1', 'RestServerPort': 3448,
'RestUserName': 'smc', 'RestPassword': '<PASSWORD>', 'SSLVerify': False,
'SerialNumber': self.data.array, 'srpName': 'SRP_1',
'PortGroup': self.data.port_group_name_i})
configuration = tpfo.FakeConfiguration(
None, 'CommonTests', 1, 1, san_ip='1.1.1.1', san_login='smc',
vmax_array=self.data.array, vmax_srp='SRP_1', san_password='<PASSWORD>',
san_api_port=3448, vmax_port_groups=[self.data.port_group_name_i])
self.common.configuration = configuration
kwargs_returned = self.common.get_attributes_from_cinder_config()
self.assertEqual(kwargs_expected, kwargs_returned)
def test_get_attributes_from_cinder_config_no_port(self):
kwargs_expected = (
{'RestServerIp': '1.1.1.1', 'RestServerPort': 8443,
'RestUserName': 'smc', 'RestPassword': '<PASSWORD>', 'SSLVerify': False,
'SerialNumber': self.data.array, 'srpName': 'SRP_1',
'PortGroup': self.data.port_group_name_i})
configuration = tpfo.FakeConfiguration(
None, 'CommonTests', 1, 1, san_ip='1.1.1.1', san_login='smc',
vmax_array=self.data.array, vmax_srp='SRP_1', san_password='<PASSWORD>',
vmax_port_groups=[self.data.port_group_name_i])
self.common.configuration = configuration
kwargs_returned = self.common.get_attributes_from_cinder_config()
self.assertEqual(kwargs_expected, kwargs_returned)
def test_get_ssl_attributes_from_cinder_config(self):
conf = tpfo.FakeConfiguration(
None, 'CommonTests', 1, 1, san_ip='1.1.1.1', san_login='smc',
vmax_array=self.data.array, vmax_srp='SRP_1', san_password='<PASSWORD>',
vmax_port_groups=[self.data.port_group_name_i],
driver_ssl_cert_verify=True,
driver_ssl_cert_path='/path/to/cert')
self.common.configuration = conf
conf_returned = self.common.get_attributes_from_cinder_config()
self.assertEqual('/path/to/cert', conf_returned['SSLVerify'])
conf.driver_ssl_cert_verify = True
conf.driver_ssl_cert_path = None
conf_returned = self.common.get_attributes_from_cinder_config()
self.assertTrue(conf_returned['SSLVerify'])
conf.driver_ssl_cert_verify = False
conf.driver_ssl_cert_path = None
conf_returned = self.common.get_attributes_from_cinder_config()
self.assertFalse(conf_returned['SSLVerify'])
@mock.patch.object(rest.PowerMaxRest, 'get_size_of_device_on_array',
return_value=2.0)
def test_manage_snapshot_get_size_success(self, mock_get_size):
size = self.common.manage_existing_snapshot_get_size(
self.data.test_snapshot)
self.assertEqual(2, size)
@mock.patch.object(rest.PowerMaxRest, 'get_volume_snap',
return_value={'snap_name': 'snap_name'})
@mock.patch.object(
common.PowerMaxCommon, 'get_snapshot_metadata',
return_value={'snap-meta-key-1': 'snap-meta-value-1',
'snap-meta-key-2': 'snap-meta-value-2'})
def test_manage_snapshot_success(self, mck_meta, mock_snap):
snapshot = deepcopy(self.data.test_snapshot_manage)
snapshot.metadata = {'user-meta-key-1': 'user-meta-value-1',
'user-meta-key-2': 'user-meta-value-2'}
existing_ref = {u'source-name': u'test_snap'}
updates_response = self.common.manage_existing_snapshot(
snapshot, existing_ref)
prov_loc = {'source_id': self.data.device_id,
'snap_name': 'OS-%s' % existing_ref['source-name']}
updates = {'display_name': 'my_snap',
'provider_location': six.text_type(prov_loc),
'metadata': {'snap-meta-key-1': 'snap-meta-value-1',
'snap-meta-key-2': 'snap-meta-value-2',
'user-meta-key-1': 'user-meta-value-1',
'user-meta-key-2': 'user-meta-value-2'}}
self.assertEqual(updates_response, updates)
def test_manage_snapshot_fail_already_managed(self):
snapshot = self.data.test_snapshot_manage
existing_ref = {u'source-name': u'OS-test_snap'}
self.assertRaises(exception.VolumeBackendAPIException,
self.common.manage_existing_snapshot,
snapshot, existing_ref)
@mock.patch.object(utils.PowerMaxUtils, 'is_volume_failed_over',
return_value=True)
def test_manage_snapshot_fail_vol_failed_over(self, mock_failed):
snapshot = self.data.test_snapshot_manage
existing_ref = {u'source-name': u'test_snap'}
self.assertRaises(exception.VolumeBackendAPIException,
self.common.manage_existing_snapshot,
snapshot, existing_ref)
@mock.patch.object(rest.PowerMaxRest, 'get_volume_snap',
return_value=False)
def test_manage_snapshot_fail_vol_not_snap_src(self, mock_snap):
snapshot = self.data.test_snapshot_manage
existing_ref = {u'source-name': u'test_snap'}
self.assertRaises(exception.VolumeBackendAPIException,
self.common.manage_existing_snapshot,
snapshot, existing_ref)
@mock.patch.object(utils.PowerMaxUtils, 'modify_snapshot_prefix',
side_effect=exception.VolumeBackendAPIException)
def test_manage_snapshot_fail_add_prefix(self, mock_mod):
snapshot = self.data.test_snapshot_manage
existing_ref = {u'source-name': u'test_snap'}
self.assertRaises(exception.VolumeBackendAPIException,
self.common.manage_existing_snapshot,
snapshot, existing_ref)
@mock.patch.object(rest.PowerMaxRest, 'modify_volume_snap')
def test_unmanage_snapshot_success(self, mock_mod, ):
self.common.unmanage_snapshot(self.data.test_snapshot_manage)
mock_mod.assert_called_once()
@mock.patch.object(common.PowerMaxCommon, '_sync_check')
@mock.patch.object(rest.PowerMaxRest, 'modify_volume_snap')
def test_unmanage_snapshot_no_sync_check(self, mock_mod, mock_sync):
self.common.unmanage_snapshot(self.data.test_snapshot_manage)
mock_mod.assert_called_once()
mock_sync.assert_not_called()
@mock.patch.object(utils.PowerMaxUtils, 'is_volume_failed_over',
return_value=True)
def test_unmanage_snapshot_fail_failover(self, mock_failed):
self.assertRaises(exception.VolumeBackendAPIException,
self.common.unmanage_snapshot,
self.data.test_snapshot_manage)
@mock.patch.object(rest.PowerMaxRest, 'modify_volume_snap',
side_effect=exception.VolumeBackendAPIException)
def test_unmanage_snapshot_fail_rename(self, mock_snap):
self.assertRaises(exception.VolumeBackendAPIException,
self.common.unmanage_snapshot,
self.data.test_snapshot_manage)
@mock.patch.object(provision.PowerMaxProvision, 'delete_volume_snap')
@mock.patch.object(provision.PowerMaxProvision, 'is_restore_complete',
return_value=True)
@mock.patch.object(common.PowerMaxCommon, '_clone_check')
@mock.patch.object(provision.PowerMaxProvision, 'revert_volume_snapshot')
def test_revert_to_snapshot(self, mock_revert, mock_clone,
mock_complete, mock_delete):
volume = self.data.test_volume
snapshot = self.data.test_snapshot
array = self.data.array
device_id = self.data.device_id
snap_name = self.data.snap_location['snap_name']
extra_specs = deepcopy(self.data.extra_specs_intervals_set)
extra_specs['storagetype:portgroupname'] = (
self.data.port_group_name_f)
self.common.revert_to_snapshot(volume, snapshot)
mock_revert.assert_called_once_with(
array, device_id, snap_name, extra_specs)
mock_clone.assert_called_once_with(array, device_id, extra_specs)
mock_complete.assert_called_once_with(array, device_id,
snap_name, extra_specs)
mock_delete.assert_called_once_with(array, snap_name, device_id,
restored=True, generation=0)
@mock.patch.object(utils.PowerMaxUtils, 'is_replication_enabled',
return_value=True)
def test_revert_to_snapshot_replicated(self, mock_rep):
volume = self.data.test_volume
snapshot = self.data.test_snapshot
self.assertRaises(exception.VolumeDriverException,
self.common.revert_to_snapshot, volume, snapshot)
def test_get_initiator_check_flag(self):
self.common.configuration.initiator_check = False
initiator_check = self.common._get_initiator_check_flag()
self.assertFalse(initiator_check)
def test_get_initiator_check_flag_true(self):
self.common.configuration.initiator_check = True
initiator_check = self.common._get_initiator_check_flag()
self.assertTrue(initiator_check)
def test_get_manageable_volumes_success(self):
marker = limit = offset = sort_keys = sort_dirs = None
with mock.patch.object(
self.rest, 'get_private_volume_list',
return_value=self.data.priv_vol_func_response_single):
vols_lists = self.common.get_manageable_volumes(
marker, limit, offset, sort_keys, sort_dirs)
expected_response = [
{'reference': {'source-id': '00001'}, 'safe_to_manage': True,
'size': 1.0, 'reason_not_safe': None, 'cinder_id': None,
'extra_info': {'config': 'TDEV', 'emulation': 'FBA'}}]
self.assertEqual(vols_lists, expected_response)
def test_get_manageable_volumes_filters_set(self):
marker, limit, offset = '00002', 2, 1
sort_keys, sort_dirs = 'size', 'desc'
with mock.patch.object(
self.rest, 'get_private_volume_list',
return_value=self.data.priv_vol_func_response_multi):
vols_lists = self.common.get_manageable_volumes(
marker, limit, offset, sort_keys, sort_dirs)
expected_response = [
{'reference': {'source-id': '00003'}, 'safe_to_manage': True,
'size': 300, 'reason_not_safe': None, 'cinder_id': None,
'extra_info': {'config': 'TDEV', 'emulation': 'FBA'}},
{'reference': {'source-id': '00004'}, 'safe_to_manage': True,
'size': 400, 'reason_not_safe': None, 'cinder_id': None,
'extra_info': {'config': 'TDEV', 'emulation': 'FBA'}}]
self.assertEqual(vols_lists, expected_response)
def test_get_manageable_volumes_fail_no_vols(self):
marker = limit = offset = sort_keys = sort_dirs = None
with mock.patch.object(
self.rest, 'get_private_volume_list',
return_value=[]):
expected_response = []
vol_list = self.common.get_manageable_volumes(
marker, limit, offset, sort_keys, sort_dirs)
self.assertEqual(vol_list, expected_response)
def test_get_manageable_volumes_fail_no_valid_vols(self):
marker = limit = offset = sort_keys = sort_dirs = None
with mock.patch.object(
self.rest, 'get_private_volume_list',
return_value=self.data.priv_vol_func_response_multi_invalid):
expected_response = []
vol_list = self.common.get_manageable_volumes(
marker, limit, offset, sort_keys, sort_dirs)
self.assertEqual(vol_list, expected_response)
def test_get_manageable_snapshots_success(self):
marker = limit = offset = sort_keys = sort_dirs = None
with mock.patch.object(
self.rest, 'get_private_volume_list',
return_value=self.data.priv_vol_func_response_single):
snap_list = self.common.get_manageable_snapshots(
marker, limit, offset, sort_keys, sort_dirs)
expected_response = [{
'reference': {'source-name': 'testSnap1'},
'safe_to_manage': True, 'size': 1,
'reason_not_safe': None, 'cinder_id': None,
'extra_info': {
'generation': 0, 'secured': False, 'timeToLive': 'N/A',
'timestamp': mock.ANY},
'source_reference': {'source-id': '00001'}}]
self.assertEqual(snap_list, expected_response)
def test_get_manageable_snapshots_filters_set(self):
marker, limit, offset = 'testSnap2', 2, 1
sort_keys, sort_dirs = 'size', 'desc'
with mock.patch.object(
self.rest, 'get_private_volume_list',
return_value=self.data.priv_vol_func_response_multi):
vols_lists = self.common.get_manageable_snapshots(
marker, limit, offset, sort_keys, sort_dirs)
expected_response = [
{'reference': {'source-name': 'testSnap3'},
'safe_to_manage': True, 'size': 300, 'reason_not_safe': None,
'cinder_id': None, 'extra_info': {
'generation': 0, 'secured': False, 'timeToLive': 'N/A',
'timestamp': mock.ANY},
'source_reference': {'source-id': '00003'}},
{'reference': {'source-name': 'testSnap4'},
'safe_to_manage': True, 'size': 400, 'reason_not_safe': None,
'cinder_id': None, 'extra_info': {
'generation': 0, 'secured': False, 'timeToLive': 'N/A',
'timestamp': mock.ANY},
'source_reference': {'source-id': '00004'}}]
self.assertEqual(vols_lists, expected_response)
def test_get_manageable_snapshots_fail_no_snaps(self):
marker = limit = offset = sort_keys = sort_dirs = None
with mock.patch.object(self.rest, 'get_private_volume_list',
return_value=[]):
expected_response = []
vols_lists = self.common.get_manageable_snapshots(
marker, limit, offset, sort_keys, sort_dirs)
self.assertEqual(vols_lists, expected_response)
def test_get_manageable_snapshots_fail_no_valid_snaps(self):
marker = limit = offset = sort_keys = sort_dirs = None
with mock.patch.object(
self.rest, 'get_private_volume_list',
return_value=self.data.priv_vol_func_response_multi_invalid):
expected_response = []
vols_lists = self.common.get_manageable_snapshots(
marker, limit, offset, sort_keys, sort_dirs)
self.assertEqual(vols_lists, expected_response)
def test_get_slo_workload_combo_from_cinder_conf(self):
self.common.configuration.vmax_service_level = 'Diamond'
self.common.configuration.vmax_workload = 'DSS'
response1 = self.common.get_attributes_from_cinder_config()
self.assertEqual('Diamond', response1['ServiceLevel'])
self.assertEqual('DSS', response1['Workload'])
self.common.configuration.vmax_service_level = 'Diamond'
self.common.configuration.vmax_workload = None
response2 = self.common.get_attributes_from_cinder_config()
self.assertEqual(self.common.configuration.vmax_service_level,
response2['ServiceLevel'])
self.assertIsNone(response2['Workload'])
expected_response = {
'RestServerIp': '1.1.1.1', 'RestServerPort': 8443,
'RestUserName': 'smc', 'RestPassword': '<PASSWORD>', 'SSLVerify': False,
'SerialNumber': '000197800123', 'srpName': 'SRP_1',
'PortGroup': 'OS-fibre-PG'}
self.common.configuration.vmax_service_level = None
self.common.configuration.vmax_workload = 'DSS'
response3 = self.common.get_attributes_from_cinder_config()
self.assertEqual(expected_response, response3)
self.common.configuration.vmax_service_level = None
self.common.configuration.vmax_workload = None
response4 = self.common.get_attributes_from_cinder_config()
self.assertEqual(expected_response, response4)
def test_get_u4p_failover_info(self):
configuration = tpfo.FakeConfiguration(
None, 'CommonTests', 1, 1, san_ip='1.1.1.1', san_login='test',
san_password='<PASSWORD>', san_api_port=8443,
driver_ssl_cert_verify='/path/to/cert',
u4p_failover_target=(self.data.u4p_failover_config[
'u4p_failover_targets']), u4p_failover_backoff_factor='2',
u4p_failover_retries='3', u4p_failover_timeout='10',
u4p_primary='10.10.10.10')
self.common.configuration = configuration
self.common._get_u4p_failover_info()
self.assertTrue(self.rest.u4p_failover_enabled)
self.assertIsNotNone(self.rest.u4p_failover_targets)
def test_update_vol_stats_retest_u4p(self):
self.rest.u4p_in_failover = True
self.rest.u4p_failover_autofailback = True
with mock.patch.object(
self.common, 'retest_primary_u4p') as mock_retest:
self.common.update_volume_stats()
mock_retest.assert_called_once()
self.rest.u4p_in_failover = True
self.rest.u4p_failover_autofailback = False
with mock.patch.object(
self.common, 'retest_primary_u4p') as mock_retest:
self.common.update_volume_stats()
mock_retest.assert_not_called()
@mock.patch.object(rest.PowerMaxRest, 'request', return_value=[200, None])
@mock.patch.object(
common.PowerMaxCommon, 'get_attributes_from_cinder_config',
return_value=tpd.PowerMaxData.u4p_failover_target[0])
def test_retest_primary_u4p(self, mock_primary_u4p, mock_request):
self.common.retest_primary_u4p()
self.assertFalse(self.rest.u4p_in_failover)
@mock.patch.object(rest.PowerMaxRest, 'is_vol_in_rep_session',
return_value=(None, False, None))
@mock.patch.object(common.PowerMaxCommon, '_sync_check')
def test_extend_vol_validation_checks_success(self, mck_sync, mck_rep):
volume = self.data.test_volume
array = self.data.array
device_id = self.data.device_id
new_size = self.data.test_volume.size + 1
extra_specs = deepcopy(self.data.extra_specs)
self.common._extend_vol_validation_checks(
array, device_id, volume.name, extra_specs, volume.size, new_size)
@mock.patch.object(rest.PowerMaxRest, 'is_vol_in_rep_session',
return_value=(None, False, None))
@mock.patch.object(common.PowerMaxCommon, '_sync_check')
def test_extend_vol_val_check_no_device(self, mck_sync, mck_rep):
volume = self.data.test_volume
array = self.data.array
device_id = None
new_size = self.data.test_volume.size + 1
extra_specs = deepcopy(self.data.extra_specs)
self.assertRaises(
exception.VolumeBackendAPIException,
self.common._extend_vol_validation_checks,
array, device_id, volume.name, extra_specs, volume.size, new_size)
@mock.patch.object(rest.PowerMaxRest, 'is_vol_in_rep_session',
return_value=(None, True, None))
@mock.patch.object(common.PowerMaxCommon, '_sync_check')
def test_extend_vol_val_check_snap_src(self, mck_sync, mck_rep):
volume = self.data.test_volume
array = self.data.array
device_id = self.data.device_id
new_size = self.data.test_volume.size + 1
extra_specs = deepcopy(self.data.extra_specs)
self.common.next_gen = False
self.assertRaises(
exception.VolumeBackendAPIException,
self.common._extend_vol_validation_checks,
array, device_id, volume.name, extra_specs, volume.size, new_size)
@mock.patch.object(rest.PowerMaxRest, 'is_vol_in_rep_session',
return_value=(None, False, None))
@mock.patch.object(common.PowerMaxCommon, '_sync_check')
def test_extend_vol_val_check_wrong_size(self, mck_sync, mck_rep):
volume = self.data.test_volume
array = self.data.array
device_id = self.data.device_id
new_size = volume.size - 1
extra_specs = deepcopy(self.data.extra_specs)
self.assertRaises(
exception.VolumeBackendAPIException,
self.common._extend_vol_validation_checks,
array, device_id, volume.name, extra_specs, volume.size, new_size)
def test_array_ode_capabilities_check_non_next_gen_local(self):
"""Rep enabled, neither array next gen, returns F,F,F,F"""
array = self.data.powermax_model_details['symmetrixId']
self.common.next_gen = False
(r1_ode, r1_ode_metro,
r2_ode, r2_ode_metro) = self.common._array_ode_capabilities_check(
array, True)
self.assertFalse(r1_ode)
self.assertFalse(r1_ode_metro)
self.assertFalse(r2_ode)
self.assertFalse(r2_ode_metro)
@mock.patch.object(rest.PowerMaxRest, 'get_array_detail',
return_value={'ucode': '5977.1.1'})
@mock.patch.object(common.PowerMaxCommon, 'get_rdf_details',
return_value=(10, tpd.PowerMaxData.remote_array))
| |
nrecombs, ncoals = nlineages
node, a = state
treelen_b = get_treelen_branch(tree, times, node, times[a], use_basal=True)
treelen = get_treelen_branch(tree, times, node, times[a], use_basal=False)
k = recomb_time
w = max(a, times.index(tree.root.age))
nbranches_k = nbranches[k] + int(k < a)
nrecombs_k = nrecombs[k] + int(k <= a) + int(k == a < w)
return (nbranches_k * time_steps[k] / float(nrecombs_k * treelen_b)
* (1.0 - exp(- rho * max(treelen, 1.0))))
def prob_recoal(tree, state, nlineages, times, time_steps, popsizes,
recomb_node, recomb_time, coal_time):
nbranches, nrecombs, ncoals = nlineages
node, a = state
k = recomb_time
b = coal_time
if recomb_node == -1 or not tree[recomb_node].parents:
recomb_parent_age = a
else:
recomb_parent_age = times.index(tree[recomb_node].parents[0].age)
if recomb_node == node:
recomb_parent_age = a
assert recomb_parent_age == a, (recomb_parent_age, a)
s = 0.0
for m in range(k, b):
nbranches_m = nbranches[m] + int(m < a) - int(m < recomb_parent_age)
s += time_steps[m] * nbranches_m / (2.0 * popsizes[m])
p = exp(- s)
if b < len(time_steps) - 2:
nbranches_b = nbranches[b] + int(b < a) - int(b < recomb_parent_age)
ncoals_b = ncoals[b]
p *= ((1.0 - exp(-time_steps[b] * nbranches_b / (2.0 * popsizes[b]))) /
ncoals_b)
return p
def iter_transition_recombs(tree, state1, state2, times):
node1, a = state1
node2, b = state2
end_time = min(a, b)
if node1 == node2:
# y = v, k in [0, min(timei, last_timei)]
# y = node, k in Sr(node)
for k in range(times.index(tree[node1].age), end_time+1):
yield node1, k
for k in range(0, end_time+1):
yield -1, k
def calc_transition_probs(tree, states, nlineages, times,
time_steps, popsizes, rho):
"""
Calculate transition probabilities very literally for testing
"""
tree = tree.copy()
arglib.remove_single_lineages(tree)
nstates = len(states)
#ntimes = len(time_steps)
#minlen = time_steps[0]
treelen = sum(x.get_dist() for x in tree)
nbranches, nrecombs, ncoals = nlineages
# calculate full state transition matrix
transprob = util.make_matrix(nstates, nstates, 0.0)
for i in range(nstates):
node1, a = states[i]
#c = times.index(tree[node1].age)
for j in range(nstates):
node2, b = states[j]
coal_time = b
p = 0.0
for recomb_node, recomb_time in iter_transition_recombs(
tree, states[i], states[j], times):
p += (prob_recomb(tree, states[i], nlineages, times,
time_steps, rho, recomb_time) *
prob_recoal(tree, states[i], nlineages, times,
time_steps, popsizes,
recomb_node, recomb_time, coal_time))
# probability of no recomb
if i == j:
treelen = get_treelen_branch(tree, times, node1, times[a],
use_basal=False)
p += exp(-rho * max(treelen, 1.0))
transprob[i][j] = log(p)
return transprob
def get_recomb_transition_switch(tree, last_tree, spr, states1, states2,
times):
# SPR subtree moves out from underneath us
# therefore therefore the new chromosome coalesces with
# the branch above the subtree
(recomb_branch, recomb_time), (coal_branch, coal_time) = spr
# search up for parent
recomb = last_tree[recomb_branch]
parent = recomb.parents[0]
b = times.index(parent.age)
# find other child
c = parent.children
other = (c[0] if c[1] == recomb else c[1])
# find new state in tree
if other.name == coal_branch:
next_state = (tree[other.name].parents[0].name, b)
else:
next_state = (other.name, b)
a = states2.index((recomb_branch, recomb_time))
b = states2.index(next_state)
return (a, b)
def calc_transition_probs_switch(tree, last_tree, recomb_name,
states1, states2,
nlineages, times,
time_steps, popsizes, rho):
treelen = get_treelen(last_tree, times)
nbranches, nrecombs, ncoals = nlineages
(recomb_branch, recomb_time), (coal_branch, coal_time) = \
find_recomb_coal(tree, last_tree, recomb_name=recomb_name)
k = times.index(recomb_time)
coal_time = times.index(coal_time)
last_tree2 = last_tree.copy()
arglib.remove_single_lineages(last_tree2)
tree2 = tree.copy()
arglib.remove_single_lineages(tree2)
# compute transition probability matrix
transprob = util.make_matrix(len(states1), len(states2), -util.INF)
determ = get_deterministic_transitions(states1, states2, times,
tree2, last_tree2,
recomb_branch, k,
coal_branch, coal_time)
for i, (node1, a) in enumerate(states1):
if (node1, a) == (recomb_branch, k):
# probabilistic transition case (recomb case)
spr = (recomb_branch, k), (coal_branch, coal_time)
recomb_next_states = get_recomb_transition_switch(
tree2, last_tree2, spr, states1, states2, times)
# placeholders
transprob[i][recomb_next_states[0]] = log(.5)
transprob[i][recomb_next_states[1]] = log(.5)
elif (node1, a) == (coal_branch, coal_time):
# probabilistic transition case (re-coal case)
# determine if node1 is still here or not
last_recomb = last_tree2[recomb_branch]
last_parent = last_recomb.parents[0]
if last_parent.name == node1:
# recomb breaks node1 branch, we need to use the other child
c = last_parent.children
node3 = c[0].name if c[1] == last_recomb else c[1].name
else:
node3 = node1
# find parent of recomb_branch and node1
last_parent_age = times.index(last_parent.age)
parent = tree2[recomb_branch].parents[0]
assert parent == tree2[node3].parents[0]
# treelen of T^n_{i-1}
blen = times[a]
treelen2 = treelen + blen
if node1 == last_tree2.root.name:
treelen2 += blen - last_tree2.root.age
treelen2 += time_steps[a]
else:
treelen2 += time_steps[times.index(last_tree2.root.age)]
for j, (node2, b) in enumerate(states2):
transprob[i][j] = 0.0
if not ((node2 == recomb_branch and b >= k) or
(node2 == node3 and b == a) or
(node2 == parent.name and b == a)):
continue
# get lineage counts
# remove recombination branch and add new branch
kbn = nbranches[b]
kcn = ncoals[b] + 1
if times[b] < parent.age:
kbn -= 1
kcn -= 1
if b < a:
kbn += 1
twon = 2.0 * popsizes[b]
transprob[i][j] = (
(1.0 - exp(- time_steps[b] * kbn / twon)) / kcn *
exp(- sum(time_steps[m] * (nbranches[m] + 1
- (1 if m < last_parent_age else 0))
/ (2.0 * popsizes[m])
for m in xrange(k, b))))
# normalize row to ensure they add up to one
tot = sum(transprob[i])
for j in xrange(len(states2)):
x = transprob[i][j]
if tot > 0.0 and x > 0.0:
transprob[i][j] = log(x / tot)
else:
transprob[i][j] = -1e1000
else:
# deterministic transition
assert determ[i] != -1, determ
transprob[i][determ[i]] = 0.0
return transprob
def get_deterministic_transitions(states1, states2, times,
tree, last_tree,
recomb_branch, recomb_time,
coal_branch, coal_time):
# recomb_branch in tree and last_tree
# coal_branch in last_tree
state2_lookup = util.list2lookup(states2)
next_states = []
for i, state1 in enumerate(states1):
node1, a = state1
if (node1, a) == (coal_branch, coal_time):
# not a deterministic case
next_states.append(-1)
elif node1 != recomb_branch:
# SPR only removes a subset of descendents, if any
# trace up from remaining leaf to find correct new state
node = last_tree.nodes.get(node1, None)
if node is None:
print node1
treelib.draw_tree_names(last_tree.get_tree(),
minlen=8, maxlen=8)
raise Exception("unknown node name '%s'" % node1)
if node.is_leaf():
# SPR can't disrupt leaf branch
node2 = node1
else:
child1 = node.children[0]
child2 = node.children[1]
if recomb_branch == child1.name:
# right child is not disrupted
node2 = child2.name
elif recomb_branch == child2.name:
# left child is not disrupted
node2 = child1.name
else:
# node is not disrupted
node2 = node1
# optionally walk up
if ((coal_branch == node1 or coal_branch == node2) and
coal_time <= a):
# coal occurs under us
node2 = tree[node2].parents[0].name
next_states.append(state2_lookup[(node2, a)])
else:
# SPR is on same branch as new chromosome
if recomb_time >= a:
# we move with SPR subtree
# TODO: we could probabilistically have subtree move
# out from underneath.
next_states.append(state2_lookup[(recomb_branch, a)])
else:
# SPR should not be able to coal back onto same branch
# this would be a self cycle
assert coal_branch != node1
# SPR subtree moves out from underneath us
# therefore therefore the new chromosome coalesces with
# the branch above the subtree
# search up for parent
recomb = last_tree[recomb_branch]
parent = recomb.parents[0]
b = times.index(parent.age)
# find other child
c = parent.children
other = (c[0] if c[1] == recomb else c[1])
# find new state in tree
if other.name == coal_branch:
next_state = (tree[other.name].parents[0].name, b)
else:
next_state = (other.name, b)
next_states.append(state2_lookup[next_state])
return next_states
def calc_state_priors(tree, states, nlineages,
times, time_steps, popsizes, rho):
"""Calculate state priors"""
priormat = [
log((1 - exp(- time_steps[b] * nlineages[0][b] /
(2.0 * popsizes[b]))) / nlineages[2][b] *
exp(-sum(time_steps[m] * nlineages[0][m] /
(2.0 * popsizes[m])
for m in range(0, b))))
for node, b in states]
return priormat
def est_arg_popsizes(arg, times=None, popsize_mu=1e4, popsize_sigma=.5e4):
nleaves = len(list(arg.leaves()))
assert times
eps = 1e-3
def get_local_children(node, pos, local):
return set(child for child in arg.get_local_children(node, pos)
if child in local)
def get_parent(node, pos, local):
parent = arg.get_local_parent(node, pos)
while len(get_local_children(parent, pos, local)) == 1:
parent = arg.get_local_parent(parent, pos)
return parent
ntimes = len(times)
time_steps = [times[i] - times[i-1]
for i in range(1, ntimes)]
ncoals = [0] * ntimes
k_lineages = [0] * ntimes
# loop through sprs
for recomb_pos, (rnode, rtime), (cnode, ctime), local in \
arglib.iter_arg_sprs(arg, use_local=True):
i, _ = util.binsearch(times, ctime)
ncoals[i] += 1
recomb_node = arg[rnode]
broken_node = get_parent(recomb_node, recomb_pos-eps, | |
<gh_stars>0
#!/usr/bin/env python
##
# This software was developed and / or modified by Raytheon Company,
# pursuant to Contract DG133W-05-CQ-1067 with the US Government.
#
# U.S. EXPORT CONTROLLED TECHNICAL DATA
# This software product contains export-restricted data whose
# export/transfer/disclosure is restricted by U.S. law. Dissemination
# to non-U.S. persons whether in the United States or abroad requires
# an export license or other authorization.
#
# Contractor Name: <NAME>
# Contractor Address: 6825 Pine Street, Suite 340
# Mail Stop B8
# Omaha, NE 68106
# 402.291.0100
#
# See the AWIPS II Master Rights File ("Master Rights File.pdf") for
# further licensing information.
##
# SOFTWARE HISTORY
#
# Date Ticket# Engineer Description
# ------------ ---------- ----------- --------------------------
# 3 Mar 2010 #3771 jelkins Initial Creation.
from __future__ import with_statement
# the version is derived from the date last updated y.y.m.d
version = "1.0.3.12"
from optparse import OptionParser
from optparse import OptionGroup
from os import pathsep
from os import rename
from os.path import basename
from os.path import splitext
import sys
import re
import logging
from FileTypeConfig import FileTypeConfig
import OptionCallback
_regexCache = {}
def getRegex(fileType, regexKey, value=None):
global _regexCache
fileTypeNode = {}
if fileType in _regexCache:
fileTypeNode = _regexCache[fileType]
else:
_regexCache[fileType] = fileTypeNode
if not(regexKey in fileTypeNode):
fileTypeNode[regexKey] = None
if value != None:
fileTypeNode[regexKey] = re.compile(value,re.DOTALL)
return fileTypeNode[regexKey]
def getLastMatch(matches, matchSplit, endOffset=None, splitGroup= - 1):
result = re.split(matchSplit, matches)
result = result[splitGroup]
if endOffset != None:
result = result[:endOffset]
return re.escape(result)
def getHeader(headerFileName, fileConfig):
headerText = ""
with open(headerFileName, 'r') as headerInput:
for line in headerInput:
searchText = fileConfig.getConfig("textSearch")
replaceText = fileConfig.getConfig("textReplace")
if searchText != None and replaceText != None:
line = re.sub(re.escape(searchText), replaceText, line)
headerText += fileConfig.getConfig("lineTemplate", {"lineText":line})
result = fileConfig.getConfig("headerTemplate", {"headerText":headerText[: - 1]})
return result
def addOptions(commandLineParser):
commandLineParser.add_option("-a", "--disable-addmissing", dest="addMissing",
default=True, action="store_false",
help="do not add a header if an existing header is not found.")
commandLineParser.add_option("-v", "--verbose", dest="verbose",
action="callback", callback=OptionCallback.flagWithOption,
help="output what's happening to stderr. -v [DEBUG] enable "
+ "debug output")
commandLineParser.add_option("-i", "--in-place", dest="backupSuffix",
action="callback", callback=OptionCallback.flagWithOption,
help="update FILE in place. -i [BACKUPSUFFIX] create a backup "
+ "of the original file.")
commandLineParser.add_option("-r", "--revert-backup", dest="revertSuffix",
help="revert FILE to FILEREVERTSUFFIX and remove backup")
commandLineParser.add_option("-t", "--textheader", dest="headerFile",
help="read header text from HEADERFILE")
commandLineParser.add_option("-s", "--search", dest="searchString",
default="",
help="look for an existing header with a matching SEARCHSTRING.")
commandLineParser.add_option("-S", "--search-regex", dest="searchPattern",
help="look for an existing header with a matching SEARCHPATTERN.")
commandLineParser.add_option_group(OptionGroup(commandLineParser,
"SEARCHPATTERN|SEARCHSTRING", "Without specifying a SEARCHPATTERN"
+ " or SEARCHSTRING a search will only be made for an existing"
+ " header that matches the template. Specify a SEARCHSTRING or"
+ " SEARCHPATTERN to enable block and line block header searching."
+ " If both a SEARCHSTRING and SEARCHPATTERN are given, The"
+ " SEARCHPATTERN will override the SEARCHSTRING."))
commandLineParser.add_option("-l", "--search-limit", dest="searchLimit",
default=3000, type=int,
help="look for an existing header within the first SEARCHLIMIT "
+ "bytes. Recommend setting this to about 200% the size of the current"
+ " header. default %default")
commandLineParser.add_option("-f", "--filetypes", dest="fileTypesDir",
help="include the filetype configurations from FILETYPESDIR. "
+ "Multiple directories may be specified using the `" + pathsep
+ "' path separater character")
commandLineParser.add_option("-e", "--ext", dest="fileExtension",
help="specifiy the FILEEXTENSION to use")
def main(commandOption=None, FILE=None):
""" Execute HeaderUpdater from the command line
"""
# define the command line options
commandLineParser = OptionParser(usage="usage: %prog [OPTIONS] [FILE]",
version="%prog " + version)
commandLineParser.add_option_group(OptionGroup(commandLineParser,
"FILE", "Specify an input FILE. If no FILE is given or if"
+ " FILE is `-' read input from stdin. When reading from stdin"
+ " the -e option is required."))
addOptions(commandLineParser)
# parse the arguments
commandLineOption = None
args = None
if commandOption != None:
commandLineOption = commandOption
else:
(commandLineOption, args) = commandLineParser.parse_args()
if FILE != None:
args = [FILE]
if len(args) == 1:
inputFileName = args[0]
elif commandLineOption.fileExtension != None:
inputFileName = "-"
else:
commandLineParser.error("stdin requires -e option")
# setup the logger
logging.basicConfig(stream=sys.stderr,
format='%(name)-12s: %(levelname)-8s %(message)s')
logger = logging.getLogger(basename(inputFileName))
logLevel = logging.WARNING
verbose = commandLineOption.verbose
if verbose != None:
logLevel = logging.INFO
if verbose != "":
if verbose == "DEBUG":
logLevel = logging.DEBUG
logger.setLevel(logLevel)
# quickly restore a file from backup
revertSuffix = commandLineOption.revertSuffix
if revertSuffix != None:
try:
rename(inputFileName + revertSuffix, inputFileName)
except OSError, v:
logger.error(v)
return
# load the filetype configurations
fileTypeConfig = FileTypeConfig()
fileTypeConfig.fileType = splitext(inputFileName)[1]
if commandLineOption.fileExtension != None:
fileTypeConfig.fileType = commandLineOption.fileExtension
if commandLineOption.fileTypesDir != None:
fileTypeConfig.loadConfig(commandLineOption.fileTypesDir)
logger.debug("Loaded fileType configs from: " + commandLineOption.fileTypesDir)
# check for a configuration for the input file
if not(fileTypeConfig.isAvailable()):
logger.error("no " + fileTypeConfig.fileType + " configuration exists")
return 10
# read the inputfile
inputFile = sys.stdin
if inputFileName != "-":
inputFile = open(inputFileName, 'r')
inputHeader = inputFile.read(commandLineOption.searchLimit)
inputFooter = inputFile.read()
inputFile.close()
logger.info("Ready to process " + inputFileName)
searchOption = re.escape(commandLineOption.searchString)
if commandLineOption.searchPattern != None:
searchOption = commandLineOption.searchPattern
searchString = ".*?" + searchOption + ".*?"
# these offsets provide an easy way to handle line returns caught
# by the match
headerStartOffset = 0
headerEndOffset = 0
# create the newHeader
newHeader = None
if commandLineOption.headerFile != None:
newHeader = getHeader(commandLineOption.headerFile, fileTypeConfig)
# check that we don't already have the new header in the inputFile
notUpdated = False
logger.info("Checking if file already contains updated header")
headerMatch = None if newHeader == None else re.search(re.escape(newHeader), inputHeader, re.DOTALL)
if headerMatch != None:
notUpdated = True
logger.info("File already contains the updated header")
else:
# check if we can find a header matching the template
searchHeader = "\n*" + re.escape(fileTypeConfig.getConfig("headerTemplate", {"headerText":"searchStringPlaceholder"})) + "\n"
searchHeader = re.sub("searchStringPlaceholder", searchString, searchHeader)
logger.info("Checking if file contains a header matching the template")
headerMatch = re.search(searchHeader, inputHeader, re.DOTALL)
if headerMatch != None:
headerEndOffset = - 1
logger.info("Searching for the start of the header")
headerStartOffset = len(re.search("\n*", headerMatch.group()).group())
# we must check that each line starts with the lineTemplate
validTemplateMatch = True
header = headerMatch.group()[headerStartOffset:headerEndOffset]
logger.info("Ensuring each line in the header starts with the lineTemplate")
for line in header.split("\n")[1: - 1]:
lineSearch = fileTypeConfig.getConfig("lineTemplate", {"lineText":""})
lineMatch = re.search(re.escape(lineSearch), line)
if lineMatch == None:
validTemplateMatch = False
headerMatch = None
break
if validTemplateMatch == True:
logger.info("Found existing header matching template")
if headerMatch == None and searchString != ".*?.*?" and fileTypeConfig.getConfig("blockBegin") != None:
# try and find a header located inside a block comment
searchBlock = re.escape(fileTypeConfig.getConfig("blockBegin"))
searchBlock += searchString
searchBlock += re.escape(fileTypeConfig.getConfig("blockEnd"))
logger.info("Searching for header inside block comment")
headerMatch = re.search(searchBlock, inputHeader, re.DOTALL)
if headerMatch != None:
blockBegin = re.escape(fileTypeConfig.getConfig("blockBegin"))
isAmbiguousBlock = fileTypeConfig.getConfig("blockBegin") == fileTypeConfig.getConfig("blockEnd")
splitGroup = - 1
if isAmbiguousBlock == True:
splitGroup = - 2
headerSubGroup = getLastMatch(headerMatch.group(), blockBegin, splitGroup=splitGroup)
headerSubGroup = blockBegin + headerSubGroup
if isAmbiguousBlock == True:
headerSubGroup += blockBegin
logger.info("Searching last header inside block comment")
headerMatch = re.search(headerSubGroup, inputHeader, re.DOTALL)
if headerMatch != None:
logger.info("Found existing header inside block section")
if headerMatch == None and searchString != ".*?.*?" and fileTypeConfig.getConfig("lineComment") != None:
# try and find a header offset by line comments
# this is only done if the searchRegEx isn't the default,
# otherwise we will probably match something that isn't a header
lineComment = fileTypeConfig.getConfig("lineComment")
searchLine = re.escape(lineComment) + ".*?"
searchLine += searchString + "\n"
# lookahead assertions are AWESOME!
searchLine += "(?!" + re.escape(lineComment) + ")"
lineHeaderRegex = getRegex(fileTypeConfig.fileType, "lineHeader", searchLine)
logger.info("Searching for a header in a block of line comments")
headerMatch = lineHeaderRegex.match(inputHeader)
if headerMatch != None:
logger.info("Splitting the header into its line comment groups")
headerSubGroup = getLastMatch(headerMatch.group(),
"\n(?!" + re.escape(lineComment) + ").*?\n", - 1)
logger.info("Searching for the last header in a block of line comments")
headerMatch = re.search(headerSubGroup, inputHeader, re.DOTALL)
# handle situations where the header and placeAfter portion
# are not split by a a line
placeAfter = fileTypeConfig.getConfig("placeAfter")
if headerMatch != None and placeAfter != None:
placeAfterSearch = placeAfter + "(.*)"
logger.info("Searching to see if the header is directly after a placeAfter")
headerMinusPlaceAfter = re.search(placeAfterSearch, headerMatch.group(), re.DOTALL)
if headerMinusPlaceAfter != None:
logger.info("Extracting the header from the placeAfter")
headerMatch = re.search(re.escape(
headerMinusPlaceAfter.group(1)), inputHeader, re.DOTALL)
# we must check that each line starts with the lineComment
if headerMatch != None:
header = headerMatch.group()
logger.info("Verifying all lines in the header begin with a lineComment")
for line in header.split("\n"):
lineMatch = re.search("^" + re.escape(lineComment) + ".*", line)
if lineMatch == None:
headerMatch = None
break
if headerMatch != None:
logger.info("Found existing header in line comment section")
if (headerMatch != None
and commandLineOption.headerFile != None
and notUpdated == False):
# an existing header was found, we will need to replace it
outputHeader = (inputHeader[:headerMatch.start() + headerStartOffset] +
newHeader + inputHeader[headerMatch.end() + headerEndOffset:])
logger.info("Updated existing header")
logger.debug("\n" + headerMatch.group() + "\nwith: \n" + newHeader)
elif ((commandLineOption.addMissing and fileTypeConfig.getBooleanConfig("addMissing") != False)
and notUpdated == False
and commandLineOption.headerFile != None):
# an existing header was not found, we need to add a new one
placementSearch = fileTypeConfig.getConfig("placeAfter")
if placementSearch != None:
logger.info("Searching for the placeAfter")
placementMatch = re.search(placementSearch, inputHeader)
if placementMatch != None:
outputHeader = inputHeader[:placementMatch.end()]
if outputHeader[ - 1] != "\n":
outputHeader += "\n"
outputHeader += newHeader
if inputHeader[placementMatch.end()] != "\n":
outputHeader += "\n"
outputHeader += inputHeader[placementMatch.end():]
logger.info("Added new header after placement match")
logger.debug("\n" + newHeader + "\nplacement match:\n" +
placementMatch.group())
else:
# we didn't find the placement match
info = "Failed to find placement match, "
requirePlaceAfter = fileTypeConfig.getBooleanConfig("requirePlaceAfter")
if requirePlaceAfter == None:
requirePlaceAfter = True
if requirePlaceAfter == True:
outputHeader = inputHeader
logger.info(info + "no file modifications were made")
notUpdated = True
else:
outputHeader = newHeader
if len(inputHeader) != 0 and inputHeader[0] != "\n":
outputHeader += "\n"
outputHeader += inputHeader
logger.info(info + "but placement matching is not required")
logger.info("Added new header")
logger.debug("\n" + newHeader)
else:
outputHeader = newHeader
if inputHeader[0] != "\n":
outputHeader += "\n"
outputHeader += inputHeader
logger.info("Added new header")
logger.debug("\n" + newHeader)
else:
# don't do anything
outputHeader = inputHeader
logInfo = ""
if newHeader == None:
logInfo = "No header file | |
<reponame>tahme/etsin-finder<gh_stars>0
# This file is part of the Etsin service
#
# Copyright 2017-2018 Ministry of Education and Culture, Finland
#
# :author: CSC - IT Center for Science Ltd., Espoo Finland <<EMAIL>>
# :license: MIT
"""Used for performing operations related to Metax for Qvain Light"""
import requests
from flask import jsonify
from etsin_finder.finder import app
from etsin_finder.app_config import get_metax_qvain_api_config
from etsin_finder.utils import json_or_empty, FlaskService
import json
log = app.logger
class MetaxQvainLightAPIService(FlaskService):
"""Metax API Service"""
def __init__(self, app):
"""
Init Metax API Service.
:param metax_api_config:
"""
super().__init__(app)
metax_qvain_api_config = get_metax_qvain_api_config(app.testing)
if metax_qvain_api_config:
self.METAX_GET_DIRECTORY_FOR_PROJECT_URL = 'https://{0}/rest/directories'.format(metax_qvain_api_config['HOST']) + \
'/files?project={0}&path=%2F'
self.METAX_GET_DIRECTORY = 'https://{0}/rest/directories'.format(metax_qvain_api_config['HOST']) + \
'/{0}/files'
self.METAX_GET_FILE = 'https://{0}/rest/files'.format(metax_qvain_api_config['HOST']) + \
'/{0}'
self.METAX_GET_DATASET = 'https://{0}/rest/datasets'.format(metax_qvain_api_config['HOST'], ) + \
'/{0}?file_details'
self.METAX_GET_DATASETS_FOR_USER = 'https://{0}/rest/datasets'.format(metax_qvain_api_config['HOST']) + \
'?metadata_provider_user={0}&file_details&ordering=-date_created'
self.METAX_GET_ALL_DATASETS_FOR_USER = 'https://{0}/rest/datasets'.format(metax_qvain_api_config['HOST']) + \
'?metadata_provider_user={0}&file_details&ordering=-date_created&no_pagination=true'
self.METAX_CREATE_DATASET = 'https://{0}/rest/datasets?file_details'.format(metax_qvain_api_config['HOST'])
self.METAX_PATCH_DATASET = 'https://{0}/rest/datasets'.format(metax_qvain_api_config['HOST'], ) + \
'/{0}?file_details'
self.METAX_DELETE_DATASET = 'https://{0}/rest/datasets'.format(metax_qvain_api_config['HOST'], ) + \
'/{0}'
self.METAX_CHANGE_CUMULATIVE_STATE = 'https://{0}/rpc/datasets/change_cumulative_state'.format(metax_qvain_api_config['HOST'])
self.METAX_REFRESH_DIRECTORY_CONTENT = 'https://{0}/rpc/datasets/refresh_directory_content'.format(metax_qvain_api_config['HOST'])
self.METAX_FIX_DEPRECATED = 'https://{0}/rpc/datasets/fix_deprecated'.format(metax_qvain_api_config['HOST'])
self.user = metax_qvain_api_config['USER']
self.pw = metax_qvain_api_config['PASSWORD']
self.verify_ssl = metax_qvain_api_config.get('VERIFY_SSL', True)
elif not self.is_testing:
log.error("Unable to initialize MetaxAPIService due to missing config")
def get_directory_for_project(self, project_identifier):
"""
Get directory contents for a specific project
:param project_identifier:
:return:
"""
req_url = self.METAX_GET_DIRECTORY_FOR_PROJECT_URL.format(project_identifier)
try:
metax_qvain_api_response = requests.get(req_url,
headers={'Accept': 'application/json'},
auth=(self.user, self.pw),
verify=self.verify_ssl,
timeout=10)
metax_qvain_api_response.raise_for_status()
except Exception as e:
if isinstance(e, requests.HTTPError):
log.warning(
"Failed to get data for project \"{0}\" from Metax API\nResponse status code: {1}\nResponse text: {2}".format(
project_identifier,
metax_qvain_api_response.status_code,
json_or_empty(metax_qvain_api_response) or metax_qvain_api_response.text
))
else:
log.error("Failed to get data for project \"{0}\" from Metax API\n{1}".
format(project_identifier, e))
return None
return metax_qvain_api_response.json()
def get_directory(self, dir_identifier):
"""
Get a specific directory with directory's id
:param dir_identifier:
:return:
"""
req_url = self.METAX_GET_DIRECTORY.format(dir_identifier)
try:
metax_qvain_api_response = requests.get(req_url,
headers={'Accept': 'application/json'},
auth=(self.user, self.pw),
verify=self.verify_ssl,
timeout=10)
metax_qvain_api_response.raise_for_status()
except Exception as e:
if isinstance(e, requests.HTTPError):
log.warning(
"Failed to get data for directory \"{0}\" from Metax API\nResponse status code: {1}\nResponse text: {2}".format(
dir_identifier,
metax_qvain_api_response.status_code,
json_or_empty(metax_qvain_api_response) or metax_qvain_api_response.text
))
else:
log.error("Failed to get data for directory \"{0}\" from Metax API\n{1}".
format(dir_identifier, e))
return None
return metax_qvain_api_response.json()
def get_file(self, file_identifier):
"""
Get a specific file with file's id
:param file_identifier:
:return:
"""
req_url = self.METAX_GET_FILE.format(file_identifier)
try:
metax_qvain_api_response = requests.get(req_url,
headers={'Accept': 'application/json'},
auth=(self.user, self.pw),
verify=self.verify_ssl,
timeout=10)
metax_qvain_api_response.raise_for_status()
except Exception as e:
if isinstance(e, requests.HTTPError):
log.warning(
"Failed to get data for file \"{0}\" from Metax API\nResponse status code: {1}\nResponse text: {2}".format(
file_identifier,
metax_qvain_api_response.status_code,
json_or_empty(metax_qvain_api_response) or metax_qvain_api_response.text
))
else:
log.error("Failed to get data for file \"{0}\" from Metax API\n{1}".
format(file_identifier, e))
return None
return metax_qvain_api_response.json()
def patch_file(self, file_identifier, data):
"""
Patch metadata for a file with given data.
Useful for updating file_characteristics. Can be also used to change other fields
such as identifier, so be careful when passing user input to avoid data corruption.
Arguments:
file_identifier {data} -- The identifier of the file.
data {dict} -- Dictionary of fields that will be replaced in file metadata, other fields directly under the file will be
preserved. For example, data = { 'file_characteristics': { 'csv_has_header': True } } would enable
file_characteristics.csv_has_header and remove any other fields nested under file_characteristics.
Returns:
[type] -- The response from Metax.
"""
req_url = self.METAX_GET_FILE.format(file_identifier)
try:
metax_qvain_api_response = requests.patch(req_url,
headers={'Accept': 'application/json', 'Content-Type': 'application/json'},
data=json.dumps(data),
auth=(self.user, self.pw),
verify=self.verify_ssl,
timeout=10)
metax_qvain_api_response.raise_for_status()
except Exception as e:
if isinstance(e, requests.HTTPError):
log.warning(
"Failed to patch file \"{0}\" from Metax API\nResponse status code: {1}\nResponse text: {2}".format(
file_identifier,
metax_qvain_api_response.status_code,
json_or_empty(metax_qvain_api_response) or metax_qvain_api_response.text
))
else:
log.error("Failed to patch file \"{0}\" from Metax API\n{1}".
format(file_identifier, e))
return (json_or_empty(metax_qvain_api_response) or metax_qvain_api_response.text), metax_qvain_api_response.status_code
return metax_qvain_api_response.json()
def get_datasets_for_user(self, user_id, limit, offset, no_pagination):
"""
Get datasets created by the specified user. Uses pagination, so offset and limit are used as well.
:param user_id:
:return datasets:
"""
req_url = self.METAX_GET_DATASETS_FOR_USER.format(user_id)
if (no_pagination):
req_url = self.METAX_GET_ALL_DATASETS_FOR_USER.format(user_id)
if (limit):
req_url = req_url + "&limit={0}".format(limit[0])
if (offset):
req_url = req_url + "&offset={}".format(offset[0])
try:
metax_api_response = requests.get(req_url,
headers={'Accept': 'application/json'},
auth=(self.user, self.pw),
verify=self.verify_ssl,
timeout=10)
metax_api_response.raise_for_status()
except Exception as e:
if isinstance(e, requests.HTTPError):
log.warning(
"Failed to get datasets for user \"{0}\" from Metax API\nResponse status code: {1}\nResponse text: {2}".format(
user_id,
metax_api_response.status_code,
json_or_empty(metax_api_response) or metax_api_response.text
))
else:
log.error("Failed to get datasets for user \"{0}\" from Metax API \n{1}".
format(user_id, e))
return None
if (len(metax_api_response.json()) == 0):
log.info('No datasets found.')
return 'no datasets'
return metax_api_response.json()
def create_dataset(self, data, params=None, use_doi=False):
"""
Send the data from the frontend to Metax.
Arguments:
data {object} -- Object with the dataset data that has been validated and converted to comply with the Metax schema.
params {dict} -- Dictionary of key-value pairs of query parameters.
Returns:
[type] -- The response from Metax.
"""
req_url = self.METAX_CREATE_DATASET
if use_doi is True:
req_url += '&pid_type=doi'
headers = {'Accept': 'application/json'}
try:
metax_api_response = requests.post(req_url,
params=params,
json=data,
headers=headers,
auth=(self.user, self.pw),
verify=self.verify_ssl,
timeout=30)
metax_api_response.raise_for_status()
except Exception as e:
if isinstance(e, requests.HTTPError):
log.warning(
"Failed to create dataset.\nResponse status code: {0}\nResponse text: {1}".format(
metax_api_response.status_code,
json_or_empty(metax_api_response) or metax_api_response.text
))
return metax_api_response.json(), metax_api_response.status_code
else:
log.error("Error creating dataset\n{0}".format(e))
return {'Error_message': 'Error trying to send data to metax.'}, metax_api_response.status_code
log.info('Created dataset with identifier: {}'.format(json.loads(metax_api_response.text).get('identifier', 'COULD-NOT-GET-IDENTIFIER')))
return metax_api_response.json(), metax_api_response.status_code
def update_dataset(self, data, cr_id, last_modified, params):
"""
Update a dataset with the data that the user has entered in Qvain-light.
Arguments:
data {object} -- Object with the dataset data that has been validated and converted to comply with the Metax schema.
cr_id {string} -- The identifier of the dataset.
last_modified {string} -- HTTP datetime string (RFC2616)
params {dict} -- Dictionary of key-value pairs of query parameters.
Returns:
[type] -- The response from Metax.
"""
req_url = self.METAX_PATCH_DATASET.format(cr_id)
headers = {'Accept': 'application/json', 'If-Unmodified-Since': last_modified}
log.debug('Request URL: {0}\nHeaders: {1}\nData: {2}'.format(req_url, headers, data))
try:
metax_api_response = requests.patch(req_url,
params=params,
json=data,
headers=headers,
auth=(self.user, self.pw),
verify=self.verify_ssl,
timeout=30)
metax_api_response.raise_for_status()
except Exception as e:
if isinstance(e, requests.HTTPError):
log.warning(
"Failed to update dataset {0}.\nResponse status code: {1}\nResponse text: {2}".format(
cr_id,
metax_api_response.status_code,
json_or_empty(metax_api_response) or metax_api_response.text
))
return metax_api_response.json(), metax_api_response.status_code
else:
log.error("Error updating dataset {0}\n{1}"
.format(cr_id, e))
return 'Error trying to send data to metax.', 500
log.info('Updated dataset with identifier: {}'.format(cr_id))
if metax_api_response.status_code == 412:
return 'Resource has been modified since last publish', 412
return metax_api_response.json(), metax_api_response.status_code
def get_dataset(self, cr_id):
"""
Get dataset.
Arguments:
cr_id {string} -- The identifier of the dataset.
Returns:
[type] -- Metax response.
"""
req_url = self.METAX_GET_DATASET.format(cr_id)
headers = {'Accept': 'application/json'}
try:
metax_api_response = requests.get(req_url,
headers=headers,
auth=(self.user, self.pw),
verify=self.verify_ssl,
timeout=10)
metax_api_response.raise_for_status()
except Exception as e:
if isinstance(e, requests.HTTPError):
log.warning(
"Failed to get dataset {0}\nResponse status code: {1}\nResponse text: {2}".format(
cr_id,
metax_api_response.status_code,
json_or_empty(metax_api_response) or metax_api_response.text
))
else:
log.error("Error getting dataset {0}\n{1}".format(cr_id, e))
return {'Error_message': 'Error getting data from Metax.'}, metax_api_response.status_code
return json_or_empty(metax_api_response), metax_api_response.status_code
def delete_dataset(self, cr_id):
"""
Delete dataset from Metax.
Arguments:
cr_id {string} -- The identifier of the dataset.
Returns:
[type] -- Metax response.
"""
req_url = self.METAX_DELETE_DATASET.format(cr_id)
headers = {'Accept': 'application/json'}
try:
metax_api_response = requests.delete(req_url,
headers=headers,
auth=(self.user, self.pw),
verify=self.verify_ssl,
timeout=10)
metax_api_response.raise_for_status()
except Exception as e:
if isinstance(e, requests.HTTPError):
log.warning(
"Failed to delete dataset {0}\nResponse status code: {1}\nResponse text: {2}".format(
cr_id,
metax_api_response.status_code,
json_or_empty(metax_api_response) or metax_api_response.text
))
else:
log.error("Error deleting dataset {0}\n{1}".format(cr_id, e))
return {'Error_message': 'Error trying to send data to metax.'}
log.info('Deleted dataset with identifier: {}'.format(cr_id))
return metax_api_response.status_code
def change_cumulative_state(self, cr_id, cumulative_state):
"""
Call Metax change_cumulative_state RPC.
Arguments:
cr_id {string} -- The identifier of the dataset.
cumulative_state {integer} -- New cumulative state.
Returns:
[type] -- Metax response.
"""
req_url = self.METAX_CHANGE_CUMULATIVE_STATE
params = {
"identifier": cr_id,
"cumulative_state": cumulative_state
}
headers = {'Accept': 'application/json'}
try:
metax_api_response = requests.post( req_url,
headers=headers,
auth=(self.user, self.pw),
verify=self.verify_ssl,
params=params,
timeout=10)
metax_api_response.raise_for_status()
except Exception as e:
if isinstance(e, requests.HTTPError):
log.warning(
"Failed to change cumulative state of dataset {0}\nResponse status code: {1}\nResponse text: {2}".format(
cr_id,
metax_api_response.status_code,
json_or_empty(metax_api_response) or metax_api_response.text
))
else:
log.error("Error changing cumulative state of dataset {0}\n{1}".format(cr_id, e))
return {'detail': 'Error trying to send data to metax.'}, 500
log.info('Changed cumulative state of dataset {} to {}'.format(cr_id, cumulative_state))
return (json_or_empty(metax_api_response) or metax_api_response.text), metax_api_response.status_code
def refresh_directory_content(self, cr_identifier, dir_identifier):
"""
Call Metax refresh_directory_content RPC.
Arguments:
cr_identifier {string} -- The identifier of the dataset.
dir_identifier {integer} -- The identifier of the directory.
Returns:
[type] -- Metax response.
"""
req_url = self.METAX_REFRESH_DIRECTORY_CONTENT
params = {
| |
}
Returns
-------
K_new : numpy array
Updated Kraus parametrizations
"""
#setup
pdim = int(np.sqrt(r))
n = rK*pdim
nt = rK*r
H = np.zeros((2*nt,2*nt)).astype(np.complex128)
P_T = np.zeros((2*nt,2*nt)).astype(np.complex128)
Delta_K = np.zeros((d,rK,pdim,pdim)).astype(np.complex128)
X = np.einsum('ijkl,ijnm -> iknlm', K, K.conj()).reshape(d,r,r)
#compute derivatives
dK_, dM10, dM11 = dK_dMdM(X,K,E,rho,J,y,l,d,r,rK)
dd, dconjd = ddM(X,K,E,rho,J,y,l,d,r,rK)
#Second derivatives
Fyconjy = dM11.reshape(d,nt,d,nt) + np.einsum('ijklmnop->ikmojlnp',dconjd).reshape(d,nt,d,nt)
Fyy = dM10.reshape(d,nt,d,nt) + np.einsum('ijklmnop->ikmojlnp',dd).reshape(d,nt,d,nt)
for k in range(d):
Fy = dK_[k].reshape(n,pdim)
Y = K[k].reshape(n,pdim)
rGrad = Fy.conj() - Y@Fy.T@Y #riemannian gradient, taken from conjugate derivative
G = np.array([rGrad,rGrad.conj()]).reshape(-1)
P = np.eye(n) - Y@Y.T.conj()
T = transp(n,pdim)
#Riemannian Hessian with correction terms
H00 = -(np.kron(Y,Y.T))@T@Fyy[k,:,k,:].T + Fyconjy[k,:,k,:].T.conj() -(np.kron(np.eye(n),Y.T@Fy))/2 - (np.kron(Y@Fy.T,np.eye(pdim)))/2 -(np.kron(P,Fy.T.conj()@Y.conj()))/2
H01 = Fyy[k,:,k,:].T.conj() - np.kron(Y,Y.T)@T@Fyconjy[k,:,k,:].T + (np.kron(Fy.conj(),Y.T)@T)/2 + (np.kron(Y,Fy.T.conj())@T)/2
H[:nt,:nt] = H00
H[:nt,nt:] = H01
H[nt:,:nt] = H[:nt,nt:].conj()
H[nt:,nt:] = H[:nt,:nt].conj()
#Tangent space projection
P_T[:nt,:nt] = np.eye(nt)- np.kron(Y@Y.T.conj(),np.eye(pdim))/2
P_T[:nt,nt:] = - np.kron(Y,Y.T)@T/2
P_T[nt:,:nt] = P_T[:nt,nt:].conj()
P_T[nt:,nt:] = P_T[:nt,:nt].conj()
H = H@P_T
#saddle free newton method
evals,S = eig(H)
H_abs_inv = S@np.diag(1/(np.abs(evals)+ lam))@la.inv(S)
Delta_K[k] = ((H_abs_inv@G)[:nt]).reshape(rK,pdim,pdim)
Delta = tangent_proj(K,Delta_K,d,rK)
res = minimize(lineobjf_isom_geodesic, 1e-8, args=(Delta,K,E,rho,J,y), method = ls, options={'maxiter':20})
a = res.x
K_new = update_K_geodesic(K,Delta,a), np.linalg.norm(Delta_K)
return K_new
def SFN_riem_Hess_full(K,E,rho,y,J,l,d,r,rK,lam = 1e-3, ls = 'COBYLA'):
"""!
Riemannian saddle free Newton step on product manifold of all gates
Parameters
-------
K : numpy array
Each subarray along the first axis contains a set of Kraus operators.
The second axis enumerates Kraus operators for a gate specified by the first axis.
E : numpy array
Current POVM estimate
rho : numpy array
Current initial state estimate
y : numpy array
2D array of measurement outcomes for sequences in J;
Each column contains the outcome probabilities for a fixed sequence
J : numpy array
2D array where each row contains the gate indices of a gate sequence
l : int
Length of the test sequences
d : int
Number of different gates in the gate set
r : int
Superoperator dimension of the gates given by the square of the physical dimension
rK : int
Target Kraus rank
lam : float
Damping parameter for dampled Newton method; Default: 1e-3
ls : {"COBYLA", ...}
Line search method, takes "method" arguments of scipy.optimize.minimize
}
Returns
-------
K_new : numpy array
Updated Kraus parametrizations
"""
#setup
pdim = int(np.sqrt(r))
n = rK*pdim
nt = rK*r
H = np.zeros((2,d,nt,2,d,nt)).astype(np.complex128)
P_T = np.zeros((2,d,nt,2,d,nt)).astype(np.complex128)
G = np.zeros((2,d,nt)).astype(np.complex128)
X = np.einsum('ijkl,ijnm -> iknlm', K, K.conj()).reshape(d,r,r)
#compute derivatives
dK_, dM10, dM11 = dK_dMdM(X,K,E,rho,J,y,l,d,r,rK)
dd, dconjd = ddM(X,K,E,rho,J,y,l,d,r,rK)
#Second derivatives
Fyconjy = dM11.reshape(d,nt,d,nt) + np.einsum('ijklmnop->ikmojlnp',dconjd).reshape(d,nt,d,nt)
Fyy = dM10.reshape(d,nt,d,nt) + np.einsum('ijklmnop->ikmojlnp',dd).reshape(d,nt,d,nt)
for k in range(d):
Fy = dK_[k].reshape(n,pdim)
Y = K[k].reshape(n,pdim)
rGrad = Fy.conj() - Y@Fy.T@Y
G[0,k,:] = rGrad.reshape(-1)
G[1,k,:] = rGrad.conj().reshape(-1)
P = np.eye(n) - Y@Y.T.conj()
T = transp(n,pdim)
H00 = -(np.kron(Y,Y.T))@T@Fyy[k,:,k,:].T + Fyconjy[k,:,k,:].T.conj() -(np.kron(np.eye(n),Y.T@Fy))/2 - (np.kron(Y@Fy.T,np.eye(pdim)))/2 -(np.kron(P,Fy.T.conj()@Y.conj()))/2
H01 = Fyy[k,:,k,:].T.conj() - np.kron(Y,Y.T)@T@Fyconjy[k,:,k,:].T + (np.kron(Fy.conj(),Y.T)@T)/2 + (np.kron(Y,Fy.T.conj())@T)/2
#Riemannian Hessian with correction terms
H[0,k,:,0,k,:] = H00
H[0,k,:,1,k,:] = H01
H[1,k,:,0,k,:] = H01.conj()
H[1,k,:,1,k,:] = H00.conj()
#Tangent space projection
P_T[0,k,:,0,k,:] = np.eye(nt) - np.kron(Y@Y.T.conj(),np.eye(pdim))/2
P_T[0,k,:,1,k,:] = - np.kron(Y,Y.T)@T/2
P_T[1,k,:,0,k,:] = P_T[0,k,:,1,k,:].conj()
P_T[1,k,:,1,k,:] = P_T[0,k,:,0,k,:].conj()
for k2 in range(d):
if k2 != k:
Yk2 = K[k2].reshape(n,pdim)
H[0,k2,:,0,k,:] = Fyconjy[k,:,k2,:].T.conj()-np.kron(Yk2,Yk2.T)@T@Fyy[k,:,k2,:].T
H[0,k2,:,1,k,:] = Fyy[k,:,k2,:].T.conj()-np.kron(Yk2,Yk2.T)@T@Fyconjy[k,:,k2,:].T
H[1,k2,:,0,k,:] = H[0,k2,:,1,k,:].conj()
H[1,k2,:,1,k,:] = H[0,k2,:,0,k,:].conj()
H = H.reshape(2*d*nt,-1)@P_T.reshape(2*d*nt,-1)
#application of saddle free newton method
H = (H + H.T.conj())/2
evals,U = eigh(H)
H_abs_inv = U@np.diag(1/(np.abs(evals) + lam))@U.T.conj()
Delta_K = ((H_abs_inv@G.reshape(-1))[:d*nt]).reshape(d,rK,pdim,pdim)
Delta = tangent_proj(K,Delta_K,d,rK) #Delta_K is already in tangent space but not to sufficient numerical accuracy
res = minimize(lineobjf_isom_geodesic, 1e-8, args=(Delta,K,E,rho,J,y), method = ls, options={'maxiter':20})
a = res.x
K_new = update_K_geodesic(K,Delta,a)
return K_new
def optimize(y,J,l,d,r,rK,n_povm, method, K, E, rho, A, B):
"""!
Full gate set optimization update alternating on E, K and rho
Parameters
-------
y : numpy array
2D array of measurement outcomes for sequences in J;
Each column contains the outcome probabilities for a fixed sequence
J : numpy array
2D array where each row contains the gate indices of a gate sequence
l : int
Length of the test sequences
d : int
Number of different gates in the gate set
r : int
Superoperator dimension of the gates given by the square of the physical dimension
rK : int
Target Kraus rank
n_povm : int
Number of POVM-Elements
method : {"SFN", "GD"}
Optimization method, Default: "SFN"
K : numpy array
Current estimates of Kraus operators
E : numpy array
Current POVM estimate
rho : numpy array
Current initial state estimate
A : numpy array
Current POVM parametrization
B : numpy array
Current initial state parametrization
}
Returns
-------
K_new : numpy array
Updated estimates of Kraus operators
X_new : numpy array
Updated estimates of superoperatos corresponding to K_new
E_new : numpy array
Updated POVM estimate
rho_new : numpy array
Updated initial state estimate
A_new : numpy array
Updated POVM parametrization
B_new : numpy array
Updated initial state parametrization
"""
pdim = int(np.sqrt(r))
A_new = A_SFN_riem_Hess(K,A,B,y,J,l,d,r,rK,n_povm)
E_new = np.array([(A_new[i].T.conj()@A_new[i]).reshape(-1) for i in range(n_povm)])
if method == 'SFN':
K_new = SFN_riem_Hess_full(K,E_new,rho,y,J,l,d,r,rK,lam = 1e-3, ls = 'COBYLA')
elif method == 'GD':
K_new = gd(K,E_new,rho,y,J,l,d,r,rK, ls = 'COBYLA')
B_new = A_B_SFN(K_new,A_new,B,y,J,l,d,r,rK,argument = "rho")
rho_new = (B_new@B_new.T.conj()).reshape(-1)
rho_new = rho_new/np.trace(rho_new.reshape(pdim,pdim))
X_new = np.einsum('ijkl,ijnm -> iknlm', K_new, K_new.conj()).reshape(d,r,r)
return K_new, X_new, E_new, rho_new, A_new, B_new
def run_mGST(*args, method = 'SFN', max_inits = 10,
max_iter = 200, final_iter = 70, target_rel_prec = 1e-4,
init = []):
"""!
Main mGST routine
Parameters
-------
y : numpy array
2D array of measurement outcomes for sequences in J;
Each column contains the outcome probabilities for a fixed sequence
J : numpy array
2D array where each row contains the gate indices of a gate sequence
l : int
Length of the test sequences
d : int
Number of different gates in the gate set
r : int
Superoperator dimension of the gates given by the square of the physical dimension
rK : int
Target Kraus rank
n_povm : int
Number of POVM-Elements
bsize : int
Size of the batch (number of sequences)
meas_samples : int
Number of samples taken per gate sequence to obtain measurement array y
method : {"SFN", "GD"}
Optimization method, Default: "SFN"
max_reruns : int
Maximum number or reinitializations; Default: 10
max_iter : int
Maximum number of iterations on batches; Default: 200
final_iter : int
Maximum number of iterations on full data set; Default: 70
target_rel_prec : float
Target precision relative to stopping value at which the final iteration loop breaks
init : [ , , ]
List of 3 numpy arrays in the format [X,E,rho], that can be used as an initialization;
If no initialization is given a random initialization is used
Returns
-------
K : numpy array
Updated estimates of Kraus operators
X : numpy array
Updated estimates of superoperatos corresponding to K_new
E : numpy array
Updated POVM estimate
rho : numpy array
Updated initial state estimate
res_list : list
Collected objective function values after each iteration
"""
y,J,l,d,r,rK,n_povm, bsize, meas_samples = args
t0 = time.time()
pdim = int(np.sqrt(r))
delta = 3*(1-y.reshape(-1))@y.reshape(-1)/len(J)/n_povm/meas_samples
if init:
K = init[0]
E = init[1]
rho = init[2]+1e-14*np.eye(pdim).reshape(-1) #offset small negative eigenvalues for stability
A = np.array([la.cholesky(E[k].reshape(pdim,pdim)+1e-14*np.eye(pdim)).T.conj()
for k in range(n_povm)])
B = la.cholesky(rho.reshape(pdim,pdim))
X = np.einsum('ijkl,ijnm -> iknlm', K, K.conj()).reshape(d,r,r)
max_reruns = 1
succ = 0
for i in range(max_inits):
if not init:
K,X,E,rho = random_gs(d,r,rK,n_povm)
A = np.array([la.cholesky(E[k].reshape(pdim,pdim)+1e-14*np.eye(pdim)).T.conj()
for k in range(n_povm)])
B = la.cholesky(rho.reshape(pdim,pdim))
res_list = [objf(X,E,rho,J,y,d,l)]
for j in range(max_iter):
yb,Jb = batch(y,J,bsize)
K,X,E,rho,A,B = optimize(yb,Jb,l,d,r,rK, n_povm, method, K, E, rho, A, B)
res_list.append(objf(X,E,rho,J,y,d,l))
if res_list[-1] < delta:
| |
extension
env: the environment
"""
if args is None:
args = []
if vars is None:
vars = {}
if c != "static":
# Hide disabled modules
settings = current.deployment_settings
if not settings.has_module(c):
return False
if t is None:
t = "%s_%s" % (c, f)
table = current.s3db.table(t)
if not table:
t = None
if not p:
p = "read"
permitted = self.has_permission(p, c=c, f=f, t=t)
if permitted:
return URL(a = a,
c = c,
f = f,
args = args,
vars = vars,
anchor = anchor,
extension = extension,
env = env,
)
else:
return False
# -------------------------------------------------------------------------
def fail(self):
""" Action upon insufficient permissions """
if self.format == "html":
# HTML interactive request => flash message + redirect
if self.auth.s3_logged_in():
current.session.error = self.INSUFFICIENT_PRIVILEGES
redirect(self.homepage)
else:
current.session.error = self.AUTHENTICATION_REQUIRED
redirect(self.loginpage)
else:
# Non-HTML request => raise HTTP status
if self.auth.s3_logged_in():
raise HTTP(403, body=self.INSUFFICIENT_PRIVILEGES)
# RFC1945/2617 compliance:
# Must raise an HTTP Auth challenge with status 401
headers = {"WWW-Authenticate":
"Basic realm=\"%s\"" % current.request.application,
}
# Add Master Key Auth token if enabled + requested
if current.deployment_settings.get_auth_masterkey():
from s3.s3masterkey import S3MasterKey
S3MasterKey.challenge(headers)
raise HTTP(401, body=self.AUTHENTICATION_REQUIRED, **headers)
# -------------------------------------------------------------------------
# ACL Lookup
# -------------------------------------------------------------------------
def applicable_acls(self, racl,
realms = None,
c = None,
f = None,
t = None,
entity = None,
):
"""
Find all applicable ACLs for the specified situation for
the specified realms and delegations
Args:
racl: the required ACL
realms: the realms
delegations: the delegations
c: the controller name, falls back to current request
f: the function name, falls back to current request
t: the tablename
entity: the realm entity
Returns:
- None for no ACLs defined (allow), or
- [] for no ACLs applicable (deny), or
- list of applicable ACLs
"""
if not self.use_cacls:
# We do not use ACLs at all (allow all)
return None
else:
acls = {}
# Get all roles
if realms:
roles = set(realms.keys())
else:
# No roles available (deny all)
return acls
db = current.db
table = self.table
c = c or self.controller
f = f or self.function
page_restricted = self.page_restricted(c=c, f=f)
# Base query
query = (table.group_id.belongs(roles)) & \
(table.deleted == False)
# Page ACLs
if page_restricted:
q = (table.function == None)
if f and self.use_facls:
q |= (table.function == f)
q = (table.controller == c) & q
else:
q = None
# Table ACLs
if t and self.use_tacls:
# Be sure to use the original table name
if hasattr(t, "_tablename"):
t = original_tablename(t)
tq = (table.tablename == t) & \
(table.controller == None) & \
(table.function == None)
q = tq if q is None else q | tq
table_restricted = self.table_restricted(t)
else:
table_restricted = False
# Retrieve the ACLs
if q is not None:
query = q & query
rows = db(query).select(table.group_id,
table.controller,
table.function,
table.tablename,
table.unrestricted,
table.entity,
table.uacl,
table.oacl,
cacheable = True,
)
else:
rows = []
# Cascade ACLs
ANY = "ANY"
ALL = (self.ALL, self.ALL)
NONE = (self.NONE, self.NONE)
use_facls = self.use_facls
def rule_type(r):
if r.controller is not None:
if r.function is None:
return "c"
elif use_facls:
return "f"
elif r.tablename is not None:
return "t"
return None
most_permissive = lambda x, y: (x[0] | y[0], x[1] | y[1])
most_restrictive = lambda x, y: (x[0] & y[0], x[1] & y[1])
# Realms
use_realms = self.entity_realm
for row in rows:
# Get the assigning entities
group_id = row.group_id
if group_id not in realms:
continue
rtype = rule_type(row)
if rtype is None:
continue
if use_realms:
if row.unrestricted:
entities = [ANY]
elif row.entity is not None:
entities = [row.entity]
else:
entities = realms[group_id]
if entities is None:
entities = [ANY]
else:
entities = [ANY]
# Merge the ACL
acl = (row["uacl"], row["oacl"])
for e in entities:
if e in acls:
eacls = acls[e]
if rtype in eacls:
eacls[rtype] = most_permissive(eacls[rtype], acl)
else:
eacls[rtype] = acl
else:
acls[e] = {rtype: acl}
acl = acls.get(ANY, {})
# Default page ACL
if "c" in acl:
default_page_acl = acl["f"] if "f" in acl else acl["c"]
elif page_restricted:
default_page_acl = NONE
else:
default_page_acl = ALL
# Default table ACL
if "t" in acl:
# If we have a table rule, apply it
default_table_acl = acl["t"]
elif self.use_tacls and table_restricted:
# A restricted table is not accessible on any page without an
# explicit table rule (once explicit => always explicit!)
default_table_acl = NONE
else:
# An unrestricted table is accessible under the page rule
default_table_acl = default_page_acl if page_restricted else ALL
# No ACLs inevitably causes a "no applicable ACLs" permission failure,
# so for unrestricted pages or tables, we must create a default ACL
# here in order to have the default apply:
if not acls:
if t and self.use_tacls:
if not table_restricted:
acls[ANY] = {"t": default_table_acl}
elif not page_restricted:
acls[ANY] = {"c": default_page_acl}
# Order by precedence
s3db = current.s3db
ancestors = set()
if entity and self.entity_hierarchy and \
s3db.pr_instance_type(entity) == "pr_person":
# If the realm entity is a person, then we apply the ACLs
# for the immediate OU ancestors, for two reasons:
# a) it is not possible to assign roles for personal realms anyway
# b) looking up OU ancestors of a person (=a few) is much more
# efficient than looking up pr_person OU descendants of the
# role realm (=could be tens or hundreds of thousands)
ancestors = set(s3db.pr_default_realms(entity))
result = {}
for e in acls:
# Skip irrelevant ACLs
if entity and e != entity and e != ANY:
if e in ancestors:
key = entity
else:
continue
else:
key = e
acl = acls[e]
# Get the page ACL
if "f" in acl:
page_acl = most_permissive(default_page_acl, acl["f"])
elif "c" in acl:
page_acl = most_permissive(default_page_acl, acl["c"])
elif page_restricted:
page_acl = default_page_acl
else:
page_acl = ALL
# Get the table ACL
if "t" in acl:
table_acl = most_permissive(default_table_acl, acl["t"])
elif table_restricted:
table_acl = default_table_acl
else:
table_acl = ALL
# Merge
acl = most_restrictive(page_acl, table_acl)
# Include ACL if relevant
if acl[0] & racl == racl or acl[1] & racl == racl:
result[key] = acl
#for pe in result:
# import sys
# sys.stderr.write("ACL for PE %s: %04X %04X\n" %
# (pe, result[pe][0], result[pe][1]))
return result
# -------------------------------------------------------------------------
# Utilities
# -------------------------------------------------------------------------
def page_restricted(self, c=None, f=None):
"""
Checks whether a page is restricted (=whether ACLs
are to be applied)
Args:
c: controller name
f: function name
"""
page = "%s/%s" % (c, f)
if page in self.unrestricted_pages:
restricted = False
elif c != "default" or f not in ("tables", "table"):
modules = current.deployment_settings.modules
restricted = c in modules and modules[c].get("restricted", True)
else:
restricted = True
return restricted
# -------------------------------------------------------------------------
def table_restricted(self, t=None):
"""
Check whether access to a table is restricted
Args:
t: the table name or Table
"""
s3 = current.response.s3
if not "restricted_tables" in s3:
table = self.table
query = (table.controller == None) & \
(table.function == None) & \
(table.deleted == False)
rows = current.db(query).select(table.tablename,
groupby = table.tablename,
)
s3.restricted_tables = [row.tablename for row in rows]
return str(t) in s3.restricted_tables
# -------------------------------------------------------------------------
def hidden_modules(self):
""" List of modules to hide from the main menu """
hidden_modules = []
if self.use_cacls:
sr = self.auth.get_system_roles()
modules = current.deployment_settings.modules
restricted_modules = [m for m in modules
if modules[m].get("restricted", True)]
roles = []
if current.session.s3 is not None:
roles = current.session.s3.roles or []
if sr.ADMIN in roles: # or sr.EDITOR in roles:
return []
if not roles:
hidden_modules = restricted_modules
else:
t = self.table
query = (t.deleted == False) & \
(t.controller.belongs(restricted_modules)) & \
(t.tablename == None)
if roles:
query = query & (t.group_id.belongs(roles))
else:
query = query & (t.group_id == None)
rows = current.db(query).select()
acls = {}
for acl in rows:
if acl.controller not in acls:
acls[acl.controller] = self.NONE
acls[acl.controller] |= acl.oacl | acl.uacl
hidden_modules = [m for m in restricted_modules
if m not in acls | |
# encoding: utf-8
'''
Created on Nov 26, 2015
@author: tal
Based in part on:
Learn math - https://github.com/fchollet/keras/blob/master/examples/addition_rnn.py
See https://medium.com/@majortal/deep-spelling-9ffef96a24f6#.2c9pu8nlm
'''
from __future__ import print_function, division, unicode_literals
import os
import errno
from collections import Counter
from hashlib import sha256
import re
import json
import itertools
import logging
import requests
import numpy as np
from numpy.random import choice as random_choice, randint as random_randint, shuffle as random_shuffle, seed as random_seed, rand
from numpy import zeros as np_zeros # pylint:disable=no-name-in-module
from keras.models import Sequential, load_model
from keras.layers import Activation, TimeDistributed, Dense, RepeatVector, Dropout, recurrent
from keras.callbacks import Callback
# Set a logger for the module
LOGGER = logging.getLogger(__name__) # Every log will use the module name
LOGGER.addHandler(logging.StreamHandler())
LOGGER.setLevel(logging.DEBUG)
random_seed(123) # Reproducibility
class Configuration(object):
"""Dump stuff here"""
CONFIG = Configuration()
#pylint:disable=attribute-defined-outside-init
# Parameters for the model:
CONFIG.input_layers = 2
CONFIG.output_layers = 2
CONFIG.amount_of_dropout = 0.2
CONFIG.hidden_size = 500
CONFIG.initialization = "he_normal" # : Gaussian initialization scaled by fan-in (He et al., 2014)
CONFIG.number_of_chars = 100
CONFIG.max_input_len = 60
CONFIG.inverted = True
# parameters for the training:
CONFIG.batch_size = 100 # As the model changes in size, play with the batch size to best fit the process in memory
CONFIG.epochs = 500 # due to mini-epochs.
CONFIG.steps_per_epoch = 1000 # This is a mini-epoch. Using News 2013 an epoch would need to be ~60K.
CONFIG.validation_steps = 10
CONFIG.number_of_iterations = 10
#pylint:enable=attribute-defined-outside-init
# DIGEST = sha256(json.dumps(CONFIG.__dict__, sort_keys=True)).hexdigest()
# Parameters for the dataset
MIN_INPUT_LEN = 5
AMOUNT_OF_NOISE = 0.2 / CONFIG.max_input_len
CHARS = list("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ .")
PADDING = "☕"
DATA_FILES_PATH = "~/Downloads/data"
DATA_FILES_FULL_PATH = os.path.expanduser(DATA_FILES_PATH)
DATA_FILES_URL = "http://www.statmt.org/wmt14/training-monolingual-news-crawl/news.2013.en.shuffled.gz"
NEWS_FILE_NAME_COMPRESSED = os.path.join(DATA_FILES_FULL_PATH, "news.2013.en.shuffled.gz") # 1.1 GB
NEWS_FILE_NAME_ENGLISH = "news.2013.en.shuffled"
NEWS_FILE_NAME = os.path.join(DATA_FILES_FULL_PATH, NEWS_FILE_NAME_ENGLISH)
NEWS_FILE_NAME_CLEAN = os.path.join(DATA_FILES_FULL_PATH, "news.2013.en.clean")
NEWS_FILE_NAME_FILTERED = os.path.join(DATA_FILES_FULL_PATH, "news.2013.en.filtered")
NEWS_FILE_NAME_SPLIT = os.path.join(DATA_FILES_FULL_PATH, "news.2013.en.split")
NEWS_FILE_NAME_TRAIN = os.path.join(DATA_FILES_FULL_PATH, "news.2013.en.train")
NEWS_FILE_NAME_VALIDATE = os.path.join(DATA_FILES_FULL_PATH, "news.2013.en.validate")
CHAR_FREQUENCY_FILE_NAME = os.path.join(DATA_FILES_FULL_PATH, "char_frequency.json")
SAVED_MODEL_FILE_NAME = os.path.join(DATA_FILES_FULL_PATH, "keras_spell_e{}.h5") # an HDF5 file
# Some cleanup:
NORMALIZE_WHITESPACE_REGEX = re.compile(r'[^\S\n]+', re.UNICODE) # match all whitespace except newlines
RE_DASH_FILTER = re.compile(r'[\-\˗\֊\‐\‑\‒\–\—\⁻\₋\−\﹣\-]', re.UNICODE)
RE_APOSTROPHE_FILTER = re.compile(r''|[ʼ՚'‘’‛❛❜ߴߵ`‵´ˊˋ{}{}{}{}{}{}{}{}{}]'.format(chr(768), chr(769), chr(832),
chr(833), chr(2387), chr(5151),
chr(5152), chr(65344), chr(8242)),
re.UNICODE)
RE_LEFT_PARENTH_FILTER = re.compile(r'[\(\[\{\⁽\₍\❨\❪\﹙\(]', re.UNICODE)
RE_RIGHT_PARENTH_FILTER = re.compile(r'[\)\]\}\⁾\₎\❩\❫\﹚\)]', re.UNICODE)
ALLOWED_CURRENCIES = """¥£₪$€฿₨"""
ALLOWED_PUNCTUATION = """-!?/;"'%&<>.()[]{}@#:,|=*"""
RE_BASIC_CLEANER = re.compile(r'[^\w\s{}{}]'.format(re.escape(ALLOWED_CURRENCIES), re.escape(ALLOWED_PUNCTUATION)), re.UNICODE)
# pylint:disable=invalid-name
def download_the_news_data():
"""Download the news data"""
LOGGER.info("Downloading")
try:
os.makedirs(os.path.dirname(NEWS_FILE_NAME_COMPRESSED))
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
with open(NEWS_FILE_NAME_COMPRESSED, "wb") as output_file:
response = requests.get(DATA_FILES_URL, stream=True)
total_length = response.headers.get('content-length')
downloaded = percentage = 0
print("»"*100)
total_length = int(total_length)
for data in response.iter_content(chunk_size=4096):
downloaded += len(data)
output_file.write(data)
new_percentage = 100 * downloaded // total_length
if new_percentage > percentage:
print("☑", end="")
percentage = new_percentage
print()
def uncompress_data():
"""Uncompress the data files"""
import gzip
with gzip.open(NEWS_FILE_NAME_COMPRESSED, 'rb') as compressed_file:
with open(NEWS_FILE_NAME_COMPRESSED[:-3], 'wb') as outfile:
outfile.write(compressed_file.read())
def add_noise_to_string(a_string, amount_of_noise):
"""Add some artificial spelling mistakes to the string"""
if rand() < amount_of_noise * len(a_string):
# Replace a character with a random character
random_char_position = random_randint(len(a_string))
a_string = a_string[:random_char_position] + random_choice(CHARS[:-1]) + a_string[random_char_position + 1:]
if rand() < amount_of_noise * len(a_string):
# Delete a character
random_char_position = random_randint(len(a_string))
a_string = a_string[:random_char_position] + a_string[random_char_position + 1:]
if len(a_string) < CONFIG.max_input_len and rand() < amount_of_noise * len(a_string):
# Add a random character
random_char_position = random_randint(len(a_string))
a_string = a_string[:random_char_position] + random_choice(CHARS[:-1]) + a_string[random_char_position:]
if rand() < amount_of_noise * len(a_string):
# Transpose 2 characters
random_char_position = random_randint(len(a_string) - 1)
a_string = (a_string[:random_char_position] + a_string[random_char_position + 1] + a_string[random_char_position] +
a_string[random_char_position + 2:])
return a_string
def _vectorize(questions, answers, ctable):
"""Vectorize the data as numpy arrays"""
len_of_questions = len(questions)
X = np_zeros((len_of_questions, CONFIG.max_input_len, ctable.size), dtype=np.bool)
for i in xrange(len(questions)):
sentence = questions.pop()
for j, c in enumerate(sentence):
try:
X[i, j, ctable.char_indices[c]] = 1
except KeyError:
pass # Padding
y = np_zeros((len_of_questions, CONFIG.max_input_len, ctable.size), dtype=np.bool)
for i in xrange(len(answers)):
sentence = answers.pop()
for j, c in enumerate(sentence):
try:
y[i, j, ctable.char_indices[c]] = 1
except KeyError:
pass # Padding
return X, y
def slice_X(X, start=None, stop=None):
"""This takes an array-like, or a list of
array-likes, and outputs:
- X[start:stop] if X is an array-like
- [x[start:stop] for x in X] if X in a list
Can also work on list/array of indices: `slice_X(x, indices)`
# Arguments
start: can be an integer index (start index)
or a list/array of indices
stop: integer (stop index); should be None if
`start` was a list.
"""
if isinstance(X, list):
if hasattr(start, '__len__'):
# hdf5 datasets only support list objects as indices
if hasattr(start, 'shape'):
start = start.tolist()
return [x[start] for x in X]
else:
return [x[start:stop] for x in X]
else:
if hasattr(start, '__len__'):
if hasattr(start, 'shape'):
start = start.tolist()
return X[start]
else:
return X[start:stop]
def vectorize(questions, answers, chars=None):
"""Vectorize the questions and expected answers"""
print('Vectorization...')
chars = chars or CHARS
ctable = CharacterTable(chars)
X, y = _vectorize(questions, answers, ctable)
# Explicitly set apart 10% for validation data that we never train over
split_at = int(len(X) - len(X) / 10)
(X_train, X_val) = (slice_X(X, 0, split_at), slice_X(X, split_at))
(y_train, y_val) = (y[:split_at], y[split_at:])
print(X_train.shape)
print(y_train.shape)
return X_train, X_val, y_train, y_val, CONFIG.max_input_len, ctable
def generate_model(output_len, chars=None):
"""Generate the model"""
print('Build model...')
chars = chars or CHARS
model = Sequential()
# "Encode" the input sequence using an RNN, producing an output of hidden_size
# note: in a situation where your input sequences have a variable length,
# use input_shape=(None, nb_feature).
for layer_number in range(CONFIG.input_layers):
model.add(recurrent.LSTM(CONFIG.hidden_size, input_shape=(None, len(chars)), kernel_initializer=CONFIG.initialization,
return_sequences=layer_number + 1 < CONFIG.input_layers))
model.add(Dropout(CONFIG.amount_of_dropout))
# For the decoder's input, we repeat the encoded input for each time step
model.add(RepeatVector(output_len))
# The decoder RNN could be multiple layers stacked or a single layer
for _ in range(CONFIG.output_layers):
model.add(recurrent.LSTM(CONFIG.hidden_size, return_sequences=True, kernel_initializer=CONFIG.initialization))
model.add(Dropout(CONFIG.amount_of_dropout))
# For each of step of the output sequence, decide which character should be chosen
model.add(TimeDistributed(Dense(len(chars), kernel_initializer=CONFIG.initialization)))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
class Colors(object):
"""For nicer printouts"""
green = '\033[92m'
red = '\033[91m'
close = '\033[0m'
class CharacterTable(object):
"""
Given a set of characters:
+ Encode them to a one hot integer representation
+ Decode the one hot integer representation to their character output
+ Decode a vector of probabilities to their character output
"""
def __init__(self, chars):
self.chars = sorted(set(chars))
self.char_indices = dict((c, i) for i, c in enumerate(self.chars))
self.indices_char = dict((i, c) for i, c in enumerate(self.chars))
@property
def size(self):
"""The number of chars"""
return len(self.chars)
def encode(self, C, maxlen):
"""Encode as one-hot"""
X = np_zeros((maxlen, len(self.chars)), dtype=np.bool) # pylint:disable=no-member
for i, c in enumerate(C):
X[i, self.char_indices[c]] = 1
return X
def decode(self, X, calc_argmax=True):
"""Decode from one-hot"""
if calc_argmax:
X = X.argmax(axis=-1)
return ''.join(self.indices_char[x] for x in X if x)
def generator(file_name):
"""Returns a tuple (inputs, targets)
All arrays should contain the same number of samples.
The generator is expected to loop over its data indefinitely.
An epoch finishes when samples_per_epoch samples have been seen by the model.
"""
ctable = CharacterTable(read_top_chars())
batch_of_answers = []
while True:
with open(file_name) as answers:
for answer in answers:
# batch_of_answers.append(answer.strip().decode('utf-8'))
batch_of_answers.append(answer.strip())
if len(batch_of_answers) == CONFIG.batch_size:
random_shuffle(batch_of_answers)
batch_of_questions = []
for answer_index, answer in enumerate(batch_of_answers):
question, answer = generate_question(answer)
batch_of_answers[answer_index] = answer
assert len(answer) == CONFIG.max_input_len
question = question[::-1] if CONFIG.inverted else question
batch_of_questions.append(question)
X, y = _vectorize(batch_of_questions, batch_of_answers, ctable)
yield X, y
batch_of_answers = []
def print_random_predictions(model, ctable, X_val, y_val):
"""Select 10 samples from the validation set at random so we can visualize errors"""
print()
for _ in range(10):
ind = random_randint(0, len(X_val))
rowX, rowy = X_val[np.array([ind])], y_val[np.array([ind])] # pylint:disable=no-member
preds = model.predict_classes(rowX, verbose=0)
q = ctable.decode(rowX[0])
correct = ctable.decode(rowy[0])
guess = ctable.decode(preds[0], calc_argmax=False)
if CONFIG.inverted:
print('Q', q[::-1]) # inverted back!
else:
print('Q', q)
print('A', correct)
print(Colors.green + '☑' + Colors.close if correct == guess else Colors.red + '☒' + Colors.close, guess)
print('---')
print()
class OnEpochEndCallback(Callback):
"""Execute this every end of epoch"""
def on_epoch_end(self, epoch, logs=None):
"""On Epoch end - do some stats"""
ctable = CharacterTable(read_top_chars())
X_val, y_val = next(generator(NEWS_FILE_NAME_VALIDATE))
print_random_predictions(self.model, ctable, X_val, y_val)
self.model.save(SAVED_MODEL_FILE_NAME.format(epoch))
ON_EPOCH_END_CALLBACK = OnEpochEndCallback()
def itarative_train(model):
"""
Iterative training of the model
- To allow for finite RAM...
- To allow infinite training data as the training noise is injected in runtime
"""
model.fit_generator(generator(NEWS_FILE_NAME_TRAIN), steps_per_epoch=CONFIG.steps_per_epoch,
epochs=CONFIG.epochs,
verbose=1, callbacks=[ON_EPOCH_END_CALLBACK, ], validation_data=generator(NEWS_FILE_NAME_VALIDATE),
validation_steps=CONFIG.validation_steps,
class_weight=None, max_q_size=10, workers=1,
pickle_safe=False, initial_epoch=0)
def iterate_training(model, X_train, y_train, X_val, y_val, ctable):
"""Iterative Training"""
# Train the model each generation and show predictions against the validation dataset
for iteration in range(1, CONFIG.number_of_iterations):
print()
print('-' * 50)
print('Iteration', iteration)
model.fit(X_train, y_train, batch_size=CONFIG.batch_size, epochs=CONFIG.epochs,
validation_data=(X_val, y_val))
print_random_predictions(model, ctable, X_val, y_val)
def clean_text(text):
"""Clean the text - remove unwanted chars, fold | |
<filename>stubs.min/System/Windows/Media/__init___parts/FormattedText.py
class FormattedText(object):
"""
Provides low-level control for drawing text in Windows Presentation Foundation (WPF) applications.
FormattedText(textToFormat: str,culture: CultureInfo,flowDirection: FlowDirection,typeface: Typeface,emSize: float,foreground: Brush)
FormattedText(textToFormat: str,culture: CultureInfo,flowDirection: FlowDirection,typeface: Typeface,emSize: float,foreground: Brush,pixelsPerDip: float)
FormattedText(textToFormat: str,culture: CultureInfo,flowDirection: FlowDirection,typeface: Typeface,emSize: float,foreground: Brush,numberSubstitution: NumberSubstitution)
FormattedText(textToFormat: str,culture: CultureInfo,flowDirection: FlowDirection,typeface: Typeface,emSize: float,foreground: Brush,numberSubstitution: NumberSubstitution,pixelsPerDip: float)
FormattedText(textToFormat: str,culture: CultureInfo,flowDirection: FlowDirection,typeface: Typeface,emSize: float,foreground: Brush,numberSubstitution: NumberSubstitution,textFormattingMode: TextFormattingMode)
FormattedText(textToFormat: str,culture: CultureInfo,flowDirection: FlowDirection,typeface: Typeface,emSize: float,foreground: Brush,numberSubstitution: NumberSubstitution,textFormattingMode: TextFormattingMode,pixelsPerDip: float)
"""
def BuildGeometry(self,origin):
"""
BuildGeometry(self: FormattedText,origin: Point) -> Geometry
Returns a System.Windows.Media.Geometry object that represents the formatted
text,including all glyphs and text decorations.
origin: The top-left origin of the resulting geometry.
Returns: The System.Windows.Media.Geometry object representation of the formatted text.
"""
pass
def BuildHighlightGeometry(self,origin,startIndex=None,count=None):
"""
BuildHighlightGeometry(self: FormattedText,origin: Point,startIndex: int,count: int) -> Geometry
Returns a System.Windows.Media.Geometry object that represents the highlight
bounding box for a specified substring of the formatted text.
origin: The origin of the highlight region.
startIndex: The index of the initial character the highlight bounds should be obtained for.
count: The number of characters the highlight bounds should contain.
Returns: The System.Windows.Media.Geometry object that represents the highlight bounding
box of the formatted text substring.
BuildHighlightGeometry(self: FormattedText,origin: Point) -> Geometry
Returns a System.Windows.Media.Geometry object that represents the highlight
bounding box of the formatted text.
origin: The origin of the highlight region.
Returns: The System.Windows.Media.Geometry object that represents the highlight bounding
box of the formatted text.
"""
pass
def GetMaxTextWidths(self):
"""
GetMaxTextWidths(self: FormattedText) -> Array[float]
Retrieves an array of text widths. Each element in the array represents the
maximum text width of sequential lines of text.
Returns: An array of maximum text widths,each width provided in device-independent
units (1/96th inch per unit).
"""
pass
def SetCulture(self,culture,startIndex=None,count=None):
"""
SetCulture(self: FormattedText,culture: CultureInfo,startIndex: int,count: int)
Sets the System.Globalization.CultureInfo for a specified subset of characters
in the System.Windows.Media.FormattedText object.
culture: The System.Globalization.CultureInfo to use for text formatting.
startIndex: The start index of initial character to apply the change to.
count: The number of characters the change should be applied to.
SetCulture(self: FormattedText,culture: CultureInfo)
Sets the System.Globalization.CultureInfo for the entire set of characters in
the System.Windows.Media.FormattedText object.
culture: The System.Globalization.CultureInfo to use for text formatting.
"""
pass
def SetFontFamily(self,fontFamily,startIndex=None,count=None):
"""
SetFontFamily(self: FormattedText,fontFamily: FontFamily)
Sets the font family for a System.Windows.Media.FormattedText object.
fontFamily: The System.Windows.Media.FontFamily to use for text formatting.
SetFontFamily(self: FormattedText,fontFamily: FontFamily,startIndex: int,count: int)
Sets the font family for a specified subset of characters in the
System.Windows.Media.FormattedText object.
fontFamily: The System.Windows.Media.FontFamily to use for text formatting.
startIndex: The starting index of the initial character to apply the font family change to.
count: The number of characters the change should apply to.
SetFontFamily(self: FormattedText,fontFamily: str)
Sets the font family for the entire set of characters in the
System.Windows.Media.FormattedText object.
fontFamily: A string that constructs the System.Windows.Media.FontFamily to use for text
formatting. Fallbacks are permitted; for details,see
System.Windows.Media.FontFamily.
SetFontFamily(self: FormattedText,fontFamily: str,startIndex: int,count: int)
Sets the font family for a specified subset of characters in the
System.Windows.Media.FormattedText object.
fontFamily: A string that constructs the System.Windows.Media.FontFamily to use for text
formatting. Fallbacks are permitted; for details,see
System.Windows.Media.FontFamily.
startIndex: The starting index of the initial character to apply the font family change to.
count: The number of characters the change should apply to.
"""
pass
def SetFontSize(self,emSize,startIndex=None,count=None):
"""
SetFontSize(self: FormattedText,emSize: float,startIndex: int,count: int)
Sets the font size for a specified subset of characters in the
System.Windows.Media.FormattedText object.
emSize: The font 'em' measure size,provided in device-independent units (1/96th inch
per unit).
startIndex: The start index of the initial character to apply the font size to.
count: The number of characters to apply the font size to.
SetFontSize(self: FormattedText,emSize: float)
Sets the font size for the entire set of characters in the
System.Windows.Media.FormattedText object.
emSize: The font 'em' measure size,provided in device-independent units (1/96th inch
per unit).
"""
pass
def SetFontStretch(self,stretch,startIndex=None,count=None):
"""
SetFontStretch(self: FormattedText,stretch: FontStretch,startIndex: int,count: int)
Sets the font stretch value for a specified subset of characters in the
System.Windows.Media.FormattedText object.
stretch: The desired System.Windows.FontStretch value to use for text formatting.
startIndex: The start index of the initial character to apply the font stretch to.
count: The number of characters to apply the font stretch to.
SetFontStretch(self: FormattedText,stretch: FontStretch)
Sets the font stretch value for the entire set of characters in the
System.Windows.Media.FormattedText object.
stretch: The desired System.Windows.FontStretch value to use for text formatting.
"""
pass
def SetFontStyle(self,style,startIndex=None,count=None):
"""
SetFontStyle(self: FormattedText,style: FontStyle,startIndex: int,count: int)
Sets the font style for a specified subset of characters in the
System.Windows.Media.FormattedText object.
style: The System.Windows.FontStyle value to use for text formatting.
startIndex: The start index of the initial character to apply the font style to.
count: The number of characters to apply the font style to.
SetFontStyle(self: FormattedText,style: FontStyle)
Sets the font style for the entire set of characters in the
System.Windows.Media.FormattedText object.
style: The System.Windows.FontStyle value to use for text formatting.
"""
pass
def SetFontTypeface(self,typeface,startIndex=None,count=None):
"""
SetFontTypeface(self: FormattedText,typeface: Typeface,startIndex: int,count: int)
Sets the font typeface for a specified subset of characters in the
System.Windows.Media.FormattedText object.
typeface: The System.Windows.Media.Typeface to use for text formatting.
startIndex: The start index of the initial character to apply the typeface to.
count: The number of characters to apply the typeface to.
SetFontTypeface(self: FormattedText,typeface: Typeface)
Sets the font typeface for the entire set of characters in the
System.Windows.Media.FormattedText object.
typeface: The System.Windows.Media.Typeface to use for text formatting.
"""
pass
def SetFontWeight(self,weight,startIndex=None,count=None):
"""
SetFontWeight(self: FormattedText,weight: FontWeight,startIndex: int,count: int)
Changes the System.Windows.FontWeight for specified text within a
System.Windows.Media.FormattedText object.
weight: The font weight to use for text formatting.
startIndex: The start index of the initial character to apply the font weight to.
count: The number of characters to apply the font weight to.
SetFontWeight(self: FormattedText,weight: FontWeight)
Sets the font weight for the entire set of characters in the
System.Windows.Media.FormattedText object.
weight: The System.Windows.FontWeight to use for text formatting.
"""
pass
def SetForegroundBrush(self,foregroundBrush,startIndex=None,count=None):
"""
SetForegroundBrush(self: FormattedText,foregroundBrush: Brush,startIndex: int,count: int)
Changes the foreground System.Windows.Media.Brush for specified text within a
System.Windows.Media.FormattedText object.
foregroundBrush: The brush to use for the text foreground.
startIndex: The start index of the initial character to apply the foreground brush to.
count: The number of characters to apply the foreground brush to.
SetForegroundBrush(self: FormattedText,foregroundBrush: Brush)
Changes the foreground System.Windows.Media.Brush for an entire
System.Windows.Media.FormattedText object.
foregroundBrush: The brush to use for the text foreground.
"""
pass
def SetMaxTextWidths(self,maxTextWidths):
"""
SetMaxTextWidths(self: FormattedText,maxTextWidths: Array[float])
Sets an array of maximum text widths within the
System.Windows.Media.FormattedText,on a per-line basis. Each element in the
array represents the maximum text width of sequential lines of text.
maxTextWidths: An array of maximum text widths,each width provided in device-independent
units (1/96th inch per unit).
"""
pass
def SetNumberSubstitution(self,numberSubstitution,startIndex=None,count=None):
"""
SetNumberSubstitution(self: FormattedText,numberSubstitution: NumberSubstitution,startIndex: int,count: int)
Sets the number substitution behavior for specified text within a
System.Windows.Media.FormattedText object.
numberSubstitution: Number substitution behavior to apply to the text; can be null,in which case
the default number substitution method for the text culture is used.
startIndex: The start index of initial character to apply the change to.
count: The number of characters the change should be applied to.
SetNumberSubstitution(self: FormattedText,numberSubstitution: NumberSubstitution)
Sets the number substitution behavior for the entire set of characters in the
System.Windows.Media.FormattedText object.
numberSubstitution: Number substitution behavior to apply to the text; can be null,in which case
the default number substitution method for the text culture is used.
"""
pass
| |
of Prelude.Show.Show_a94d79ab20}
def _idris__123_PE_95__40_a_44__32_b_41__32_instance_32_of_32_Prelude_46_Show_46_Show_95_a94d79ab20_125_(
in24
):
while True:
return _idris_Prelude_46_Show_46_primNumShow(None, (65700,), (0,), in24) # {U_prim__toStrInt1}, Prelude.Show.Open
# {PE_(a, b) instance of Prelude.Show.Show_a94d79ab21}
def _idris__123_PE_95__40_a_44__32_b_41__32_instance_32_of_32_Prelude_46_Show_46_Show_95_a94d79ab21_125_(
in25, in26
):
while True:
return _idris_Prelude_46_Show_46_primNumShow(None, (65700,), in25, in26) # {U_prim__toStrInt1}
# {PE_(a, b) instance of Prelude.Show.Show_a94d79ab22}
def _idris__123_PE_95__40_a_44__32_b_41__32_instance_32_of_32_Prelude_46_Show_46_Show_95_a94d79ab22_125_(
in25
):
while True:
return (65714, in25) # {U_{PE_(a, b) instance of Prelude.Show.Show_a94d79ab21}1}
# {PE_(a, b) instance of Prelude.Show.Show_a94d79ab23}
def _idris__123_PE_95__40_a_44__32_b_41__32_instance_32_of_32_Prelude_46_Show_46_Show_95_a94d79ab23_125_(
in20
):
while True:
return _idris_Prelude_46_Show_46_Prelude_46_Show_46__64_Prelude_46_Show_46_Show_36__40_a_44__32_b_41__58__33_show_58_0(
None,
None,
None,
None,
(0, (65709,), (65711,)), # constructor of Prelude.Show.Show, {U_{PE_(a, b) instance of Prelude.Show.Show_a94d79ab17}1}, {U_{PE_(a, b) instance of Prelude.Show.Show_a94d79ab19}1}
(0, (65713,), (65715,)), # constructor of Prelude.Show.Show, {U_{PE_(a, b) instance of Prelude.Show.Show_a94d79ab20}1}, {U_{PE_(a, b) instance of Prelude.Show.Show_a94d79ab22}1}
in20
)
# {PE_(a, b) instance of Prelude.Show.Show_a94d79ab24}
def _idris__123_PE_95__40_a_44__32_b_41__32_instance_32_of_32_Prelude_46_Show_46_Show_95_a94d79ab24_125_(
in19
):
while True:
return (65716,) # {U_{PE_(a, b) instance of Prelude.Show.Show_a94d79ab23}1}
# {PE_(a, b) instance of Prelude.Show.Show_a94d79ab25}
def _idris__123_PE_95__40_a_44__32_b_41__32_instance_32_of_32_Prelude_46_Show_46_Show_95_a94d79ab25_125_(
in14
):
while True:
return _idris_Prelude_46_Show_46_Prelude_46_Show_46__64_Prelude_46_Show_46_Show_36__40_a_44__32_b_41__58__33_show_58_0(
None,
None,
None,
None,
(0, (65705,), (65707,)), # constructor of Prelude.Show.Show, {U_{PE_(a, b) instance of Prelude.Show.Show_a94d79ab13}1}, {U_{PE_(a, b) instance of Prelude.Show.Show_a94d79ab15}1}
(0, (65708,), (65717,)), # constructor of Prelude.Show.Show, {U_{PE_(a, b) instance of Prelude.Show.Show_a94d79ab16}1}, {U_{PE_(a, b) instance of Prelude.Show.Show_a94d79ab24}1}
in14
)
# {PE_(a, b) instance of Prelude.Show.Show_a94d79ab26}
def _idris__123_PE_95__40_a_44__32_b_41__32_instance_32_of_32_Prelude_46_Show_46_Show_95_a94d79ab26_125_(
in13
):
while True:
return (65718,) # {U_{PE_(a, b) instance of Prelude.Show.Show_a94d79ab25}1}
# Decidable.Equality.Decidable.Equality.Char instance of Decidable.Equality.DecEq, method decEq, primitiveNotEq
def _idris_Decidable_46_Equality_46_Decidable_46_Equality_46__64_Decidable_46_Equality_46_DecEq_36_Char_58__33_decEq_58_0_58_primitiveNotEq_58_0():
while True:
return None
# Decidable.Equality.Decidable.Equality.Int instance of Decidable.Equality.DecEq, method decEq, primitiveNotEq
def _idris_Decidable_46_Equality_46_Decidable_46_Equality_46__64_Decidable_46_Equality_46_DecEq_36_Int_58__33_decEq_58_0_58_primitiveNotEq_58_0():
while True:
return None
# Decidable.Equality.Decidable.Equality.Integer instance of Decidable.Equality.DecEq, method decEq, primitiveNotEq
def _idris_Decidable_46_Equality_46_Decidable_46_Equality_46__64_Decidable_46_Equality_46_DecEq_36_Integer_58__33_decEq_58_0_58_primitiveNotEq_58_0():
while True:
return None
# Decidable.Equality.Decidable.Equality.ManagedPtr instance of Decidable.Equality.DecEq, method decEq, primitiveNotEq
def _idris_Decidable_46_Equality_46_Decidable_46_Equality_46__64_Decidable_46_Equality_46_DecEq_36_ManagedPtr_58__33_decEq_58_0_58_primitiveNotEq_58_0():
while True:
return None
# Decidable.Equality.Decidable.Equality.Ptr instance of Decidable.Equality.DecEq, method decEq, primitiveNotEq
def _idris_Decidable_46_Equality_46_Decidable_46_Equality_46__64_Decidable_46_Equality_46_DecEq_36_Ptr_58__33_decEq_58_0_58_primitiveNotEq_58_0():
while True:
return None
# Decidable.Equality.Decidable.Equality.String instance of Decidable.Equality.DecEq, method decEq, primitiveNotEq
def _idris_Decidable_46_Equality_46_Decidable_46_Equality_46__64_Decidable_46_Equality_46_DecEq_36_String_58__33_decEq_58_0_58_primitiveNotEq_58_0():
while True:
return None
# Prelude.Prelude.Int instance of Prelude.Enum, method enumFromTo, go
def _idris_Prelude_46_Prelude_46__64_Prelude_46_Enum_36_Int_58__33_enumFromTo_58_0_58_go_58_0(
e0, e1, e2, e3, e4
):
while True:
if e3 == 0:
return e2.cons(e4)
else:
in0 = (e3 - 1)
e0, e1, e2, e3, e4, = None, None, e2.cons(e4), in0, (e4 - 1),
continue
return _idris_error("unreachable due to tail call")
return _idris_error("unreachable due to case in tail position")
# Prelude.Show.Prelude.Show.List a instance of Prelude.Show.Show, method show, show'
def _idris_Prelude_46_Show_46_Prelude_46_Show_46__64_Prelude_46_Show_46_Show_36_List_32_a_58__33_show_58_0_58_show_39__58_0(
e0, e1, e2, e3, e4, e5
):
while True:
if e5: # Prelude.List.::
in0, in1 = e5.head, e5.tail
if not in1: # Prelude.List.Nil
return (e4 + APPLY0(_idris_Prelude_46_Show_46_show(None, e3), in0))
else:
e0, e1, e2, e3, e4, e5, = None, None, None, e3, (e4 + (APPLY0(_idris_Prelude_46_Show_46_show(None, e3), in0) + u', ')), in1,
continue
return _idris_error("unreachable due to tail call")
return _idris_error("unreachable due to case in tail position")
else: # Prelude.List.Nil
return e4
return _idris_error("unreachable due to case in tail position")
# Decidable.Equality.Decidable.Equality.Bool instance of Decidable.Equality.DecEq, method decEq
def _idris_Decidable_46_Equality_46_Decidable_46_Equality_46__64_Decidable_46_Equality_46_DecEq_36_Bool_58__33_decEq_58_0(
e0, e1
):
while True:
if not e1: # Prelude.Bool.False
if not e0: # Prelude.Bool.False
return (0,) # Prelude.Basics.Yes
else: # Prelude.Bool.True
return (1,) # Prelude.Basics.No
return _idris_error("unreachable due to case in tail position")
else: # Prelude.Bool.True
if not e0: # Prelude.Bool.False
return (1,) # Prelude.Basics.No
else: # Prelude.Bool.True
return (0,) # Prelude.Basics.Yes
return _idris_error("unreachable due to case in tail position")
return _idris_error("unreachable due to case in tail position")
# Prelude.Prelude.Int instance of Prelude.Enum, method enumFromTo
def _idris_Prelude_46_Prelude_46__64_Prelude_46_Enum_36_Int_58__33_enumFromTo_58_0(
e0, e1
):
while True:
aux1 = _idris_Prelude_46_Classes_46_Prelude_46_Classes_46__64_Prelude_46_Classes_46_Ord_36_Int_58__33__60__61__58_0(
e0, e1
)
if not aux1: # Prelude.Bool.False
return ConsList()
else: # Prelude.Bool.True
return _idris_Prelude_46_Prelude_46__64_Prelude_46_Enum_36_Int_58__33_enumFromTo_58_0_58_go_58_0(
None,
None,
ConsList(),
(e1 - e0),
e1
)
return _idris_error("unreachable due to case in tail position")
# Prelude.Classes.Prelude.Nat.Nat instance of Prelude.Classes.Eq, method ==
def _idris_Prelude_46_Classes_46_Prelude_46_Nat_46__64_Prelude_46_Classes_46_Eq_36_Nat_58__33__61__61__58_0(
e0, e1
):
while True:
if e1 == 0:
if e0 == 0:
return True
else:
return False
return _idris_error("unreachable due to case in tail position")
elif True:
in0 = (e1 - 1)
if e0 == 0:
return False
else:
in1 = (e0 - 1)
return APPLY0(APPLY0(_idris_Prelude_46_Classes_46__61__61_(None, (65738,)), in1), in0) # {U_Prelude.Nat.Nat instance of Prelude.Classes.Eq2}
return _idris_error("unreachable due to case in tail position")
else:
return False
return _idris_error("unreachable due to case in tail position")
# Prelude.Classes.Prelude.Classes.Ordering instance of Prelude.Classes.Eq, method ==
def _idris_Prelude_46_Classes_46_Prelude_46_Classes_46__64_Prelude_46_Classes_46_Eq_36_Ordering_58__33__61__61__58_0(
e0, e1
):
while True:
if e1[0] == 1: # Prelude.Classes.EQ
if e0[0] == 1: # Prelude.Classes.EQ
return True
else:
return False
return _idris_error("unreachable due to case in tail position")
elif e1[0] == 2: # Prelude.Classes.GT
if e0[0] == 2: # Prelude.Classes.GT
return True
else:
return False
return _idris_error("unreachable due to case in tail position")
elif e1[0] == 0: # Prelude.Classes.LT
if e0[0] == 0: # Prelude.Classes.LT
return True
else:
return False
return _idris_error("unreachable due to case in tail position")
else:
return False
return _idris_error("unreachable due to case in tail position")
# Prelude.Classes.Prelude.Show.Prec instance of Prelude.Classes.Eq, method ==
def _idris_Prelude_46_Classes_46_Prelude_46_Show_46__64_Prelude_46_Classes_46_Eq_36_Prec_58__33__61__61__58_0(
e0, e1
):
while True:
if e1[0] == 4: # Prelude.Show.User
in0 = e1[1]
if e0[0] == 4: # Prelude.Show.User
in1 = e0[1]
return _idris_Prelude_46_Classes_46_Prelude_46_Nat_46__64_Prelude_46_Classes_46_Eq_36_Nat_58__33__61__61__58_0(
in1, in0
)
else:
aux1 = (_idris_Prelude_46_Show_46_precCon(e0) == _idris_Prelude_46_Show_46_precCon(e1))
if aux1 == 0:
return False
else:
return True
return _idris_error("unreachable due to case in tail position")
return _idris_error("unreachable due to case in tail position")
else:
aux2 = (_idris_Prelude_46_Show_46_precCon(e0) == _idris_Prelude_46_Show_46_precCon(e1))
if aux2 == 0:
return False
else:
return True
return _idris_error("unreachable due to case in tail position")
return _idris_error("unreachable due to case in tail position")
# Prelude.Foldable.Prelude.List.List instance of Prelude.Foldable.Foldable, method foldr
def _idris_Prelude_46_Foldable_46_Prelude_46_List_46__64_Prelude_46_Foldable_46_Foldable_36_List_58__33_foldr_58_0(
e0, e1, e2, e3, e4
):
while True:
if e4: # Prelude.List.::
in0, in1 = e4.head, e4.tail
return APPLY0(
APPLY0(e2, in0),
APPLY0(
APPLY0(
APPLY0(_idris_Prelude_46_Foldable_46_foldr(None, None, None, (65741,)), e2), # {U_Prelude.List.List instance of Prelude.Foldable.Foldable5}
e3
),
in1
)
)
else: # Prelude.List.Nil
return e3
return _idris_error("unreachable due to case in tail position")
# Prelude.Monad.Prelude.List instance of Prelude.Monad.Monad, method >>=
def _idris_Prelude_46_Monad_46_Prelude_46__64_Prelude_46_Monad_46_Monad_36_List_58__33__62__62__61__58_0(
e0, e1, e2, e3
):
while True:
return _idris_PE_95_concatMap_95_af3155d1(None, None, e3, e2)
# Prelude.Classes.Prelude.Classes.Char instance of Prelude.Classes.Ord, method <=
def _idris_Prelude_46_Classes_46_Prelude_46_Classes_46__64_Prelude_46_Classes_46_Ord_36_Char_58__33__60__61__58_0(
e0, e1
):
while True:
aux1 = APPLY0(
APPLY0(
_idris_Prelude_46_Classes_46__60_(
None,
_idris_Prelude_46_Classes_46__64_Prelude_46_Classes_46_Ord_36_Char()
),
e0
),
e1
)
if not aux1: # Prelude.Bool.False
return _idris_Prelude_46_Classes_46__123_Prelude_46_Classes_46_Char_32_instance_32_of_32_Prelude_46_Classes_46_Ord_44__32_method_32__60__61__95_lam0_125_(
e0, e1
)
else: # Prelude.Bool.True
return True
return _idris_error("unreachable due to case in tail position")
# Prelude.Classes.Prelude.Classes.Char instance of Prelude.Classes.Ord, method >=
def _idris_Prelude_46_Classes_46_Prelude_46_Classes_46__64_Prelude_46_Classes_46_Ord_36_Char_58__33__62__61__58_0(
e0, e1
):
while True:
aux1 = APPLY0(
APPLY0(
_idris_Prelude_46_Classes_46__62_(
None,
_idris_Prelude_46_Classes_46__64_Prelude_46_Classes_46_Ord_36_Char()
),
e0
),
e1
)
if not aux1: # Prelude.Bool.False
return _idris_Prelude_46_Classes_46__123_Prelude_46_Classes_46_Char_32_instance_32_of_32_Prelude_46_Classes_46_Ord_44__32_method_32__62__61__95_lam0_125_(
e0, e1
)
else: # Prelude.Bool.True
return True
return _idris_error("unreachable due to case in tail position")
# Prelude.Classes.Prelude.Classes.Char instance of Prelude.Classes.Ord, method compare
def _idris_Prelude_46_Classes_46_Prelude_46_Classes_46__64_Prelude_46_Classes_46_Ord_36_Char_58__33_compare_58_0(
e0, e1
):
while True:
aux2 = (e0 == e1)
if aux2 == 0:
aux3 = False
else:
aux3 = True
aux1 = aux3
if not aux1: # Prelude.Bool.False
aux5 = (e0 < e1)
if aux5 == 0:
aux6 = False
else:
aux6 = True
aux4 = aux6
if not aux4: # Prelude.Bool.False
return (2,) # Prelude.Classes.GT
else: # Prelude.Bool.True
return (0,) # Prelude.Classes.LT
return _idris_error("unreachable due to case in tail position")
else: # Prelude.Bool.True
return (1,) # Prelude.Classes.EQ
return _idris_error("unreachable due to case in tail position")
# Prelude.Classes.Prelude.Classes.Int instance of Prelude.Classes.Ord, method <=
def _idris_Prelude_46_Classes_46_Prelude_46_Classes_46__64_Prelude_46_Classes_46_Ord_36_Int_58__33__60__61__58_0(
e0, e1
):
while True:
aux1 = APPLY0(
APPLY0(
_idris_Prelude_46_Classes_46__60_(
None,
_idris_Prelude_46_Classes_46__64_Prelude_46_Classes_46_Ord_36_Int()
),
e0
),
e1
)
if not aux1: # Prelude.Bool.False
return _idris_Prelude_46_Classes_46__123_Prelude_46_Classes_46_Int_32_instance_32_of_32_Prelude_46_Classes_46_Ord_44__32_method_32__60__61__95_lam0_125_(
e0, e1
)
else: # Prelude.Bool.True
return True
return _idris_error("unreachable due to case in tail position")
# Prelude.Classes.Prelude.Classes.Int instance of Prelude.Classes.Ord, method compare
def _idris_Prelude_46_Classes_46_Prelude_46_Classes_46__64_Prelude_46_Classes_46_Ord_36_Int_58__33_compare_58_0(
e0, e1
):
while True:
aux2 = (e0 == e1)
if aux2 == 0:
aux3 = False
else:
aux3 = True
aux1 = aux3
if not aux1: # Prelude.Bool.False
aux5 = (e0 < e1)
if aux5 == 0:
aux6 = False
else:
aux6 = True
aux4 = aux6
if not aux4: # Prelude.Bool.False
return (2,) # Prelude.Classes.GT
else: # Prelude.Bool.True
return (0,) # Prelude.Classes.LT
return _idris_error("unreachable due to case in tail position")
else: # Prelude.Bool.True
return (1,) # Prelude.Classes.EQ
return _idris_error("unreachable due to case in tail position")
# Prelude.Classes.Prelude.Classes.Integer instance of Prelude.Classes.Ord, method compare
def _idris_Prelude_46_Classes_46_Prelude_46_Classes_46__64_Prelude_46_Classes_46_Ord_36_Integer_58__33_compare_58_0(
e0, e1
):
while True:
aux2 = (e0 == e1)
if aux2 == 0:
aux3 = False
else:
aux3 = True
aux1 = aux3
if not aux1: # Prelude.Bool.False
aux5 = (e0 < e1)
if aux5 == 0:
aux6 = False
else:
aux6 = True
aux4 = aux6
if not aux4: # Prelude.Bool.False
return (2,) # Prelude.Classes.GT
else: # Prelude.Bool.True
return (0,) # Prelude.Classes.LT
return _idris_error("unreachable due to case in tail position")
else: # Prelude.Bool.True
return (1,) # Prelude.Classes.EQ
return _idris_error("unreachable due to case | |
},
{
"id": 26470,
"name": "Системы охранной сигнализации",
"term": 2,
"course_project": False
},
{
"id": 31331,
"name": "Системы очувствления роботов / Robot Sensing Systems",
"term": 2,
"course_project": False
},
{
"id": 27194,
"name": "Системы сжижения, хранения и транспортирования природного газа",
"term": 2,
"course_project": False
},
{
"id": 26472,
"name": "Системы сквозного проектирования",
"term": 6,
"course_project": False
},
{
"id": 26474,
"name": "Системы телевизионного наблюдения",
"term": 2,
"course_project": False
},
{
"id": 26474,
"name": "Системы телевизионного наблюдения",
"term": 2,
"course_project": True
},
{
"id": 30409,
"name": "Системы технического зрения",
"term": 2,
"course_project": False
},
{
"id": 34521,
"name": "Системы технического зрения / Systems of technical vision",
"term": 2,
"course_project": False
},
{
"id": 19088,
"name": "Системы управления биотехнологическими комплексами",
"term": 8,
"course_project": False
},
{
"id": 5503,
"name": "Системы управления химико-технологическими процессами",
"term": 8,
"course_project": False
},
{
"id": 5503,
"name": "Системы управления химико-технологическими процессами",
"term": 10,
"course_project": False
},
{
"id": 26492,
"name": "Служебная интеллектуальная собственность",
"term": 2,
"course_project": False
},
{
"id": 34566,
"name": "Случайные процессы и статистика",
"term": 2,
"course_project": False
},
{
"id": 26530,
"name": "Современная теория систем управления",
"term": 2,
"course_project": False
},
{
"id": 28789,
"name": "Современная теория систем управления / Modern Control Theory",
"term": 2,
"course_project": False
},
{
"id": 31549,
"name": "Современные биотехнологии сыров и сырных продуктов",
"term": 2,
"course_project": False
},
{
"id": 34589,
"name": "Современные инерциальные чувствительные элементы / Modern inertial sensors",
"term": 2,
"course_project": False
},
{
"id": 32629,
"name": "Современные криптографические алгоритмы / Modern cryptographic algorithms",
"term": 2,
"course_project": False
},
{
"id": 27294,
"name": "Современные криптографические алгоритмы в киберфизических системах",
"term": 2,
"course_project": False
},
{
"id": 29217,
"name": "Современные методы анализа систем информационной безопасности в киберфизических системах",
"term": 2,
"course_project": False
},
{
"id": 32110,
"name": "Современные методы бизнес-аналитики",
"term": 6,
"course_project": False
},
{
"id": 26493,
"name": "Современные методы испытаний биотехнологических процессов, производств и продукции",
"term": 2,
"course_project": False
},
{
"id": 27372,
"name": "Современные методы исследования материалов",
"term": 2,
"course_project": False
},
{
"id": 27372,
"name": "Современные методы исследования материалов",
"term": 2,
"course_project": True
},
{
"id": 28594,
"name": "Современные методы исследования материалов / Modern Methods of Materials Analysis",
"term": 2,
"course_project": False
},
{
"id": 28594,
"name": "Современные методы исследования материалов / Modern Methods of Materials Analysis",
"term": 2,
"course_project": True
},
{
"id": 6792,
"name": "Современные методы исследования оптических материалов",
"term": 8,
"course_project": False
},
{
"id": 26497,
"name": "Современные методы теории управления в биотехнологической промышленности",
"term": 2,
"course_project": False
},
{
"id": 26497,
"name": "Современные методы теории управления в биотехнологической промышленности",
"term": 2,
"course_project": True
},
{
"id": 34273,
"name": "Современные нанокомпозитные материалы",
"term": 2,
"course_project": False
},
{
"id": 31539,
"name": "Современные направления производства белковых продуктов на молочной основе",
"term": 2,
"course_project": False
},
{
"id": 26507,
"name": "Современные образовательные технологии",
"term": 2,
"course_project": False
},
{
"id": 31402,
"name": "Современные проблемы информационных систем бизнеса / Contemporary Subjects in Business Information Systems",
"term": 2,
"course_project": False
},
{
"id": 32229,
"name": "Современные проблемы науки и техники",
"term": 4,
"course_project": False
},
{
"id": 34580,
"name": "Современные проблемы оптических материалов",
"term": 2,
"course_project": False
},
{
"id": 31510,
"name": "Современные проблемы оптических материалов / Advanced problems of optical materials",
"term": 2,
"course_project": False
},
{
"id": 26503,
"name": "Современные проблемы прикладной математики и информатики",
"term": 2,
"course_project": False
},
{
"id": 26503,
"name": "Современные проблемы прикладной математики и информатики",
"term": 2,
"course_project": True
},
{
"id": 6038,
"name": "Современные проблемы экологии в машиностроении",
"term": 10,
"course_project": False
},
{
"id": 27407,
"name": "Современные рыночные технологии получения наноструктурированных материалов/ Modern technologies for manufacturing nanoscale objects and materials",
"term": 2,
"course_project": False
},
{
"id": 27230,
"name": "Современные системы пакетной коммутации",
"term": 2,
"course_project": False
},
{
"id": 27230,
"name": "Современные системы пакетной коммутации",
"term": 2,
"course_project": True
},
{
"id": 32730,
"name": "Современные системы пакетной коммутации / Up-to-date systems of packet switching",
"term": 2,
"course_project": False
},
{
"id": 32730,
"name": "Современные системы пакетной коммутации / Up-to-date systems of packet switching",
"term": 2,
"course_project": True
},
{
"id": 26527,
"name": "Современные тенденции развития оптоэлектроники",
"term": 2,
"course_project": False
},
{
"id": 27360,
"name": "Современные термоэлектрические материалы",
"term": 2,
"course_project": False
},
{
"id": 27688,
"name": "Современные термоэлектрические материалы /Modern Thermoelectric Materials",
"term": 2,
"course_project": False
},
{
"id": 26541,
"name": "Современные технологии программирования в инфокоммуникационных системах",
"term": 2,
"course_project": False
},
{
"id": 26544,
"name": "Современные энергетические проблемы/The Energy Quest",
"term": 2,
"course_project": False
},
{
"id": 26545,
"name": "Создание Web-приложений",
"term": 6,
"course_project": False
},
{
"id": 36969,
"name": "Создание бизнесов электроники",
"term": 2,
"course_project": False
},
{
"id": 26546,
"name": "Создание и развитие студенческого клуба",
"term": 2,
"course_project": False
},
{
"id": 26546,
"name": "Создание и развитие студенческого клуба",
"term": 6,
"course_project": False
},
{
"id": 6829,
"name": "Создание клиент-серверных приложений",
"term": 8,
"course_project": False
},
{
"id": 6824,
"name": "Создание программного обеспечения инфокоммуникационных систем",
"term": 8,
"course_project": False
},
{
"id": 34131,
"name": "Создание технологического бизнеса",
"term": 2,
"course_project": False
},
{
"id": 26551,
"name": "<NAME>",
"term": 2,
"course_project": False
},
{
"id": 29416,
"name": "Сорбенты в системах очистки",
"term": 2,
"course_project": False
},
{
"id": 32670,
"name": "Состав и структура пищевого сырья животного и растительного происхождения",
"term": 8,
"course_project": False
},
{
"id": 32275,
"name": "Социально-экономический анализ регионов и отраслей",
"term": 2,
"course_project": False
},
{
"id": 32275,
"name": "Социально-экономический анализ регионов и отраслей",
"term": 2,
"course_project": True
},
{
"id": 31079,
"name": "Социология академического мира",
"term": 2,
"course_project": False
},
{
"id": 36129,
"name": "Спектральные и поляризационные оптические системы",
"term": 8,
"course_project": False
},
{
"id": 27301,
"name": "Специализированные пакеты программ для моделирования процессов",
"term": 2,
"course_project": False
},
{
"id": 26559,
"name": "Специальные вопросы прикладной оптики",
"term": 2,
"course_project": False
},
{
"id": 26559,
"name": "Специальные вопросы прикладной оптики",
"term": 2,
"course_project": True
},
{
"id": 7082,
"name": "Специальные вопросы проектирования оптико-электронных приборов и систем",
"term": 8,
"course_project": False
},
{
"id": 20843,
"name": "Специальные вопросы проектирования приборов фотоники",
"term": 8,
"course_project": False
},
{
"id": 32932,
"name": "Специальные главы физики полупроводников",
"term": 2,
"course_project": False
},
{
"id": 19073,
"name": "Специальные методы обработки пищевого сырья",
"term": 8,
"course_project": False
},
{
"id": 19073,
"name": "Специальные методы обработки пищевого сырья",
"term": 10,
"course_project": False
},
{
"id": 6727,
"name": "Специальные оптические элементы",
"term": 8,
"course_project": False
},
{
"id": 34651,
"name": "Специальные разделы линейной алгебры",
"term": 2,
"course_project": False
},
{
"id": 30916,
"name": "Специальные разделы материалов фотоники",
"term": 2,
"course_project": False
},
{
"id": 34453,
"name": "Специальные разделы неорганической химии / Special sections of inorganic chemistry",
"term": 2,
"course_project": False
},
{
"id": 26570,
"name": "Специальные разделы оптического материаловедения",
"term": 2,
"course_project": False
},
{
"id": 14454,
"name": "Специальные разделы теории вероятностей и математической статистики",
"term": 8,
"course_project": False
},
{
"id": 955,
"name": "Специальные разделы теории управления",
"term": 8,
"course_project": False
},
{
"id": 26569,
"name": "Специальные разделы физики",
"term": 2,
"course_project": False
},
{
"id": 26569,
"name": "Специальные разделы физики",
"term": 4,
"course_project": False
},
{
"id": 34684,
"name": "Специальные разделы функционального анализа",
"term": 4,
"course_project": False
},
{
"id": 34451,
"name": "Спинтроника / Spintronics",
"term": 2,
"course_project": False
},
{
"id": 29563,
"name": "Средства Web-программирования",
"term": 6,
"course_project": False
},
{
"id": 29599,
"name": "Средства компьютерного моделирования при разработке изделий",
"term": 6,
"course_project": False
},
{
"id": 31400,
"name": "Средства моделирования предприятия / Enterprise Modelling Applications",
"term": 2,
"course_project": False
},
{
"id": 35070,
"name": "Стандартизация в области информационных технологий",
"term": 2,
"course_project": False
},
{
"id": 26591,
"name": "Статистика",
"term": 2,
"course_project": False
},
{
"id": 26593,
"name": "<NAME>",
"term": 6,
"course_project": False
},
{
"id": 26595,
"name": "Статистические методы в инженерных исследованиях",
"term": 4,
"course_project": False
},
{
"id": 26594,
"name": "Статистические методы контроля и управления",
"term": 2,
"course_project": False
},
| |
#!/usr/bin/env python
# Copyright (C) 2014 Open Data ("Open Data" refers to
# one or more of the following companies: Open Data Partners LLC,
# Open Data Research LLC, or Open Data Capital LLC.)
#
# This file is part of Hadrian.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import json
import math
import threading
import time
import random
import struct
from avro.datafile import DataFileReader, DataFileWriter
from avro.io import DatumReader, DatumWriter
from six.moves import range
from titus.errors import *
import titus.pfaast
import titus.datatype
import titus.fcn
import titus.options
import titus.P as P
import titus.reader
import titus.signature
import titus.util
from titus.util import DynamicScope
import titus.version
from titus.pfaast import EngineConfig
from titus.pfaast import Cell as AstCell
from titus.pfaast import Pool as AstPool
from titus.pfaast import FcnDef
from titus.pfaast import FcnRef
from titus.pfaast import FcnRefFill
from titus.pfaast import CallUserFcn
from titus.pfaast import Call
from titus.pfaast import Ref
from titus.pfaast import LiteralNull
from titus.pfaast import LiteralBoolean
from titus.pfaast import LiteralInt
from titus.pfaast import LiteralLong
from titus.pfaast import LiteralFloat
from titus.pfaast import LiteralDouble
from titus.pfaast import LiteralString
from titus.pfaast import LiteralBase64
from titus.pfaast import Literal
from titus.pfaast import NewObject
from titus.pfaast import NewArray
from titus.pfaast import Do
from titus.pfaast import Let
from titus.pfaast import SetVar
from titus.pfaast import AttrGet
from titus.pfaast import AttrTo
from titus.pfaast import CellGet
from titus.pfaast import CellTo
from titus.pfaast import PoolGet
from titus.pfaast import PoolTo
from titus.pfaast import PoolDel
from titus.pfaast import If
from titus.pfaast import Cond
from titus.pfaast import While
from titus.pfaast import DoUntil
from titus.pfaast import For
from titus.pfaast import Foreach
from titus.pfaast import Forkeyval
from titus.pfaast import CastCase
from titus.pfaast import CastBlock
from titus.pfaast import Upcast
from titus.pfaast import IfNotNull
from titus.pfaast import BinaryFormatter
from titus.pfaast import Pack
from titus.pfaast import Unpack
from titus.pfaast import Doc
from titus.pfaast import Error
from titus.pfaast import Try
from titus.pfaast import Log
from titus.pfaast import Method
from titus.pfaast import ArrayIndex
from titus.pfaast import MapIndex
from titus.pfaast import RecordIndex
from titus.pmml.reader import pmmlToAst
class GeneratePython(titus.pfaast.Task):
"""A ``titus.pfaast.Task`` for turning PFA into executable Python."""
@staticmethod
def makeTask(style):
"""Make a ``titus.genpy.GeneratePython`` Task with a particular style.
Currently, the only style is "pure" (``titus.genpy.GeneratePythonPure``).
"""
if style == "pure":
return GeneratePythonPure()
else:
raise NotImplementedError("unrecognized style " + style)
def commandsMap(self, codes, indent):
"""Concatenate commands for a map-type engine."""
suffix = indent + "self.actionsFinished += 1\n" + \
indent + "return last\n"
return "".join(indent + x + "\n" for x in codes[:-1]) + indent + "last = " + codes[-1] + "\n" + suffix
def commandsEmit(self, codes, indent):
"""Concatenate commands for an emit-type engine."""
suffix = indent + "self.actionsFinished += 1\n"
return "".join(indent + x + "\n" for x in codes) + suffix
def commandsFold(self, codes, indent):
"""Concatenate commands for a fold-type engine."""
prefix = indent + "scope.let({'tally': self.tally})\n"
suffix = indent + "self.tally = last\n" + \
indent + "self.actionsFinished += 1\n" + \
indent + "return self.tally\n"
return prefix + "".join(indent + x + "\n" for x in codes[:-1]) + indent + "last = " + codes[-1] + "\n" + suffix
def commandsFoldMerge(self, codes, indent):
"""Concatenate commands for the merge section of a fold-type engine."""
suffix = indent + "self.tally = last\n" + \
indent + "return self.tally\n"
return "".join(indent + x + "\n" for x in codes[:-1]) + indent + "last = " + codes[-1] + "\n" + suffix
def commandsBeginEnd(self, codes, indent):
"""Concatenate commands for the begin or end method."""
return "".join(indent + x + "\n" for x in codes)
def reprPath(self, path):
"""Build a path for "attr", "cell", or "pool" special forms."""
out = []
for p in path:
if isinstance(p, ArrayIndex):
out.append(p.i)
elif isinstance(p, MapIndex):
out.append(p.k)
elif isinstance(p, RecordIndex):
out.append(repr(p.f))
else:
raise Exception
return ", ".join(out)
def __call__(self, context, engineOptions):
"""Turn a PFA Context into Python."""
if isinstance(context, EngineConfig.Context):
if context.name is None:
name = titus.util.uniqueEngineName()
else:
name = context.name
begin, beginSymbols, beginCalls = context.begin
action, actionSymbols, actionCalls = context.action
end, endSymbols, endCalls = context.end
callGraph = {"(begin)": beginCalls, "(action)": actionCalls, "(end)": endCalls}
if context.merge is not None:
mergeTasks, mergeSymbols, mergeCalls = context.merge
callGraph["(merge)"] = mergeCalls
for fname, fctx in context.fcns:
callGraph[fname] = fctx.calls
out = ["class PFA_" + name + """(PFAEngine):
def __init__(self, cells, pools, config, options, log, emit, zero, instance, rand):
self.actionsStarted = 0
self.actionsFinished = 0
self.cells = cells
self.pools = pools
self.config = config
self.inputType = config.input
self.outputType = config.output
self.options = options
self.log = log
self.emit = emit
self.instance = instance
self.rand = rand
self.callGraph = """ + repr(callGraph) + "\n"]
if context.method == Method.FOLD:
out.append(" self.tally = zero\n")
out.append(""" def initialize(self):
self
""")
for ufname, fcnContext in context.fcns:
out.append(" self.f[" + repr(ufname) + "] = " + self(fcnContext, engineOptions) + "\n")
if len(begin) > 0:
out.append("""
def begin(self):
state = ExecutionState(self.options, self.rand, 'action', self.parser)
scope = DynamicScope(None)
scope.let({'name': self.config.name, 'instance': self.instance, 'metadata': self.config.metadata})
if self.config.version is not None:
scope.let({'version': self.config.version})
""" + self.commandsBeginEnd(begin, " "))
else:
out.append("""
def begin(self):
pass
""")
if context.method == Method.MAP:
commands = self.commandsMap(action, " ")
elif context.method == Method.EMIT:
commands = self.commandsEmit(action, " ")
elif context.method == Method.FOLD:
commands = self.commandsFold(action, " ")
out.append("""
def action(self, input, check=True):
if check:
input = checkData(input, self.inputType)
state = ExecutionState(self.options, self.rand, 'action', self.parser)
scope = DynamicScope(None)
for cell in self.cells.values():
cell.maybeSaveBackup()
for pool in self.pools.values():
pool.maybeSaveBackup()
self.actionsStarted += 1
try:
scope.let({'input': input, 'name': self.config.name, 'instance': self.instance, 'metadata': self.config.metadata, 'actionsStarted': self.actionsStarted, 'actionsFinished': self.actionsFinished})
if self.config.version is not None:
scope.let({'version': self.config.version})
""" + commands)
out.append(""" except Exception:
for cell in self.cells.values():
cell.maybeRestoreBackup()
for pool in self.pools.values():
pool.maybeRestoreBackup()
raise
""")
if context.merge is not None:
out.append("""
def merge(self, tallyOne, tallyTwo):
state = ExecutionState(self.options, self.rand, 'merge', self.parser)
scope = DynamicScope(None)
for cell in self.cells.values():
cell.maybeSaveBackup()
for pool in self.pools.values():
pool.maybeSaveBackup()
try:
scope.let({'tallyOne': tallyOne, 'tallyTwo': tallyTwo, 'name': self.config.name, 'instance': self.instance, 'metadata': self.config.metadata})
if self.config.version is not None:
scope.let({'version': self.config.version})
""" + self.commandsFoldMerge(mergeTasks, " "))
out.append(""" except Exception:
for cell in self.cells.values():
cell.maybeRestoreBackup()
for pool in self.pools.values():
pool.maybeRestoreBackup()
raise
""")
if len(end) > 0:
tallyLine = ""
if context.method == Method.FOLD:
tallyLine = """ scope.let({'tally': self.tally})\n"""
out.append("""
def end(self):
state = ExecutionState(self.options, self.rand, 'action', self.parser)
scope = DynamicScope(None)
scope.let({'name': self.config.name, 'instance': self.instance, 'metadata': self.config.metadata, 'actionsStarted': self.actionsStarted, 'actionsFinished': self.actionsFinished})
if self.config.version is not None:
scope.let({'version': self.config.version})
""" + tallyLine + self.commandsBeginEnd(end, " "))
else:
out.append("""
def end(self):
pass
""")
out.append("""
def pooldel(self, name, item):
p = self.pools[name]
try:
del p.value[item]
except KeyError:
pass
return None
""")
return "".join(out)
elif isinstance(context, FcnDef.Context):
return "labeledFcn(lambda state, scope: do(" + ", ".join(context.exprs) + "), [" + ", ".join(map(repr, context.paramNames)) + "])"
elif isinstance(context, FcnRef.Context):
return "self.f[" + repr(context.fcn.name) + "]"
elif isinstance(context, FcnRefFill.Context):
reducedArgs = ["\"$" + str(x) + "\"" for x in range(len(context.fcnType.params))]
j = 0
args = []
for name in context.originalParamNames:
if name in context.argTypeResult:
args.append(context.argTypeResult[name][1])
else:
args.append("scope.get(\"$" + str(j) + "\")")
j += 1
return "labeledFcn(lambda state, scope: call(state, DynamicScope(scope), self.f[" + repr(context.fcn.name) + "], [" + ", ".join(args) + "]), [" + ", ".join(reducedArgs) + "])"
elif isinstance(context, CallUserFcn.Context):
return "call(state, DynamicScope(None), self.f['u.' + " + context.name + "], [" + ", ".join(context.args) + "])"
elif isinstance(context, Call.Context):
return context.fcn.genpy(context.paramTypes + [context.retType], context.args, context.pos)
elif isinstance(context, Ref.Context):
return "scope.get({0})".format(repr(context.name))
elif isinstance(context, LiteralNull.Context):
return "None"
elif isinstance(context, LiteralBoolean.Context):
return str(context.value)
elif isinstance(context, LiteralInt.Context):
return str(context.value)
elif isinstance(context, LiteralLong.Context):
return str(context.value)
elif isinstance(context, LiteralFloat.Context):
return str(float(context.value))
elif isinstance(context, LiteralDouble.Context):
return str(float(context.value))
elif isinstance(context, LiteralString.Context):
return repr(context.value)
elif isinstance(context, LiteralBase64.Context):
return repr(context.value)
elif isinstance(context, Literal.Context):
return repr(titus.datatype.jsonDecoder(context.retType, json.loads(context.value)))
elif isinstance(context, NewObject.Context):
return "{" + ", ".join(repr(k) + ": " + v for k, v in context.fields.items()) + "}"
elif isinstance(context, NewArray.Context):
return "[" + ", ".join(context.items) + "]"
elif isinstance(context, Do.Context):
return "do(" + ", ".join(context.exprs) + ")"
elif isinstance(context, Let.Context):
return "scope.let({" + ", ".join(repr(n) + ": " + e for n, t, e in context.nameTypeExpr) + "})"
elif isinstance(context, SetVar.Context):
return "scope.set({" + ", ".join(repr(n) + ": " + e for n, t, e in | |
<filename>py/orbit/sns_linac/LinacLatticeFactory.py
"""
The Linac Lattice Factory generates the Linac Accelerator Lattice from the information
inside of the LinacStructureTree instance which in turn was generated by the LinacParser.
The Linac Lattice Factory uses a predefined set of Linac Acc Elements. If you do not have
the LinacStructureTree instance you can create the Linac Accelerator Lattice directly in
the script.
"""
import os
import math
# import the linac structure tree with all sequences and nodes, but without drifts
from LinacParser import LinacStructureTree
from LinacAccNodes import BaseLinacNode, LinacNode, LinacMagnetNode, MarkerLinacNode, Drift, Quad, BaseRF_Gap, Bend
from LinacAccNodes import DCorrectorH, DCorrectorV
from LinacAccNodes import RF_Cavity, Sequence
from LinacAccLattice import LinacAccLattice
# import general accelerator elements
from orbit.lattice import AccNode
# import pyORBIT Python utilities classes for objects with names, types, and dictionary parameters
from orbit.utils import orbitFinalize
class LinacLatticeFactory():
"""
The Linac Lattice Factory generates the Linac Accelerator Lattice
from the information inside of the LinacStructureTree instance.
"""
def __init__(self, ltree):
if(isinstance(ltree, LinacStructureTree) != True):
msg = "The LinacLatticeFactory constructor: you have to specify the LinacStructureTree instance as input!"
msg = msg + os.linesep
msg = msg + "Stop."
msg = msg + os.linesep
orbitFinalize(msg)
self.ltree = ltree
#We need to compare positions, lengths etc. This is our delta
self.zeroDistance = 0.00001
#The maximal length of the drift. It will be devided if it is more than that.
self.maxDriftLength = 1.
def setMaxDriftLength(self, maxDriftLength = 1.0):
"""
Sets the maximal drift length that is used for
the purpose of the space charge calculations and diagnostics.
"""
self.maxDriftLength = maxDriftLength
def getMaxDriftLength(self):
"""
Returns the maximal drift length that is used for the purpose
of the space charge calculations and diagnostics.
"""
return self.maxDriftLength
def getLinacAccLattice(self,names):
"""
Returns the linac accelerator lattice for specified sequence names.
"""
if(len(names) < 1):
msg = "The LinacLatticeFactory method getLinacAccLattice(names): you have to specify the names array!"
msg = msg + os.linesep
msg = msg + "Stop."
msg = msg + os.linesep
orbitFinalize(msg)
#let's check that the names in good order ==start==
seqencesLocal = self.ltree.getSeqs()
seqencesLocalNames = []
for seq in seqencesLocal:
seqencesLocalNames.append(seq.getName())
ind_old = -1
count = 0
for name in names:
ind = seqencesLocalNames.index(name)
if(ind < 0 or (count > 0 and ind != (ind_old + 1))):
msg = "The LinacLatticeFactory method getLinacAccLattice(names): sequence names array is wrong!"
msg = msg + os.linesep
msg = msg + "existing names=" + str(seqencesLocalNames)
msg = msg + os.linesep
msg = msg + "sequence names="+str(names)
orbitFinalize(msg)
ind_old = ind
count += 1
# let's check that the names in good order ==stop==
ind_start = seqencesLocalNames.index(names[0])
sequences = self.ltree.getSeqs()[ind_start:ind_start+len(names)]
#----make linac lattice
linacAccLattice = LinacAccLattice(self.ltree.getName())
#There are the folowing possible types of elements in the linac tree:
#QUAD - quadrupole
#RFGAP - RF Gap
#DCH - horizontal dipole corrector
#DCV - vertical dipole corrector
#Marker - anything else with the length equals to 0
#Before putting enerything into the linacAccLattice we will create sequences
# with all nodes.
#----------------------------------------------------------------------
# The DRIFTS will be generated additionally and put into right places
#----------------------------------------------------------------------
def positionComp(node1,node2):
if(node1.getParam("pos") > node2.getParam("pos")):
return 1
else:
if(node1.getParam("pos") == node2.getParam("pos")):
return 0
return -1
accSeqs = []
accRF_Cavs = []
seqPosition = 0.
for seq in sequences:
#print "debug =========================================== seq=",seq.getName()
accSeq = Sequence(seq.getName())
accSeq.setLinacAccLattice(linacAccLattice)
accSeq.setLength(float(seq.getLength()))
accSeq.setPosition(seqPosition)
seqPosition = seqPosition + accSeq.getLength()
accSeqs.append(accSeq)
#these nodes are not AccNodes. They are from linac parser
nodes = seq.getNodes()
#put nodes in order according to the position in the sequence
for node in nodes:
node.setParam("pos",float(node.getParam("pos")))
nodes.sort(positionComp)
#rf_cav_names is an auxilary array with RF Cav. names
rf_cav_names = []
#array of nodes that are AccNodes with zero length
#They can be positioned inside the thick nodes, and this will be done at the end
#of this constructor
thinNodes = []
for node in nodes:
#print "debug node=",node.getName()," pos=",node.getParam("pos")
#------------QUAD-----------------
if(node.getType() == "QUAD"):
accNode = Quad(node.getName())
accNode.updateParamsDict(node.getParamsDict())
accNode.setParam("dB/dr",node.getParam("field"))
accNode.setParam("field",node.getParam("field"))
accNode.setLength(node.getParam("effLength"))
if(0.5*accNode.getLength() > self.maxDriftLength):
accNode.setnParts(2*int(0.5*accNode.getLength()/self.maxDriftLength + 1.5 - 1.0e-12))
accSeq.addNode(accNode)
#------------BEND-----------------
elif(node.getType() == "BEND"):
accNode = Bend(node.getName())
accNode.updateParamsDict(node.getParamsDict())
accNode.setParam("poles",[int(x) for x in eval(node.getParam("poles"))])
accNode.setParam("kls", [x for x in eval(node.getParam("kls"))])
accNode.setParam("skews",[int(x) for x in eval(node.getParam("skews"))])
accNode.setParam("ea1",node.getParam("ea1"))
accNode.setParam("ea2",node.getParam("ea2"))
accNode.setParam("theta",node.getParam("theta"))
accNode.setLength(node.getParam("effLength"))
if(0.5*accNode.getLength() > self.maxDriftLength):
accNode.setnParts(2*int(0.5*accNode.getLength()/self.maxDriftLength + 1.5 - 1.0e-12))
accSeq.addNode(accNode)
#------------RF_Gap-----------------
elif(node.getType() == "RFGAP"):
accNode = BaseRF_Gap(node.getName())
accNode.updateParamsDict(node.getParamsDict())
accNode.setParam("gapOffset",node.getParam("gapOffset"))
accNode.setLength(node.getParam("gapLength"))
accNode.setParam("amp",node.getParam("amp"))
#the parameter from XAL in MeV, we use GeV
#accNode.setParam("E0TL",1.0e-3*node.getParam("E0TL"))
accNode.setParam("E0TL",0.001*node.getParam("E0TL"))
accNode.setParam("length",node.getParam("gapLength"))
accNode.setParam("gapLength",node.getParam("gapLength"))
accNode.setParam("modePhase",node.getParam("modePhase"))
rf_cav_name = node.getParam("parentCavity")
if(rf_cav_name not in rf_cav_names):
accNode.setParam("firstPhase", (math.pi/180.)*accNode.getParam("firstPhase"))
rf_cav_names.append(rf_cav_name)
accRF_Cav = RF_Cavity(rf_cav_name)
accRF_Cavs.append(accRF_Cav)
accRF_Cav._setDesignPhase(accNode.getParam("firstPhase"))
accRF_Cav.setPhase(accNode.getParam("firstPhase"))
accRF_Cav._setDesignAmp(1.)
accRF_Cav.setAmp(1.)
accRF_Cav.setFrequency(seq.getParam("rfFrequency"))
accRF_Cav = accRF_Cavs[len(accRF_Cavs) - 1]
accRF_Cav.addRF_GapNode(accNode)
accSeq.addNode(accNode)
else:
if(node.getParam("length") != 0.):
msg = "The LinacLatticeFactory method getLinacAccLattice(names): there is a strange element!"
msg = msg + os.linesep
msg = msg + "name=" + node.getName()
msg = msg + os.linesep
msg = msg + "type="+node.getType()
msg = msg + os.linesep
msg = msg + "length(should be 0.)="+str(node.getParam("length"))
orbitFinalize(msg)
thinNodes.append(node)
#insert the drifts ======================start ===========================
#-----now check the integrety quads and rf_gaps should not overlap
#-----and create drifts
copyAccNodes = accSeq.getNodes()[:]
firstNode = copyAccNodes[0]
lastNode = copyAccNodes[len(copyAccNodes)-1]
driftNodes_before = []
driftNodes_after = []
#insert the drift before the first element if its half length less than its position
if(math.fabs(firstNode.getLength()/2.0 - firstNode.getParam("pos")) > self.zeroDistance):
if(firstNode.getLength()/2.0 > firstNode.getParam("pos")):
msg = "The LinacLatticeFactory method getLinacAccLattice(names): the first node is too long!"
msg = msg + os.linesep
msg = msg + "name=" + firstNode.getName()
msg = msg + os.linesep
msg = msg + "type=" + firstNode.getType()
msg = msg + os.linesep
msg = msg + "length=" + str(firstNode.getLength())
msg = msg + os.linesep
msg = msg + "pos=" + str(firstNode.getParam("pos"))
orbitFinalize(msg)
else:
driftNodes = []
driftLength = firstNode.getParam("pos") - firstNode.getLength()/2.0
nDrifts = int(driftLength/self.maxDriftLength) + 1
driftLength = driftLength/nDrifts
for idrift in range(nDrifts):
drift = Drift(accSeq.getName()+":"+firstNode.getName()+":"+str(idrift+1)+":drift")
drift.setLength(driftLength)
drift.setParam("pos",0.+drift.getLength()*(idrift+0.5))
driftNodes.append(drift)
driftNodes_before = driftNodes
#insert the drift after the last element if its half length less + position is less then the sequence length
if(math.fabs(lastNode.getLength()/2.0 + lastNode.getParam("pos") - accSeq.getLength()) > self.zeroDistance):
if(lastNode.getLength()/2.0 + lastNode.getParam("pos") > accSeq.getLength()):
msg = "The LinacLatticeFactory method getLinacAccLattice(names): the last node is too long!"
msg = msg + os.linesep
msg = msg + "name=" + lastNode.getName()
msg = msg + os.linesep
msg = msg + "type=" + lastNode.getType()
msg = msg + os.linesep
msg = msg + "length=" + str(lastNode.getLength())
msg = msg + os.linesep
msg = msg + "pos=" + str(lastNode.getParam("pos"))
msg = msg + os.linesep
msg = msg + "sequence name=" + accSeq.getName()
msg = msg + os.linesep
msg = msg + "sequence length=" + str(accSeq.getLength())
orbitFinalize(msg)
else:
driftNodes = []
driftLength = accSeq.getLength() - (lastNode.getParam("pos") + lastNode.getLength()/2.0)
nDrifts = int(driftLength/self.maxDriftLength) + 1
driftLength = driftLength/nDrifts
for idrift in range(nDrifts):
drift = Drift(accSeq.getName()+":"+lastNode.getName()+":"+str(idrift+1)+":drift")
drift.setLength(driftLength)
drift.setParam("pos",lastNode.getParam("pos")+lastNode.getLength()/2.0 + drift.getLength()*(idrift+0.5))
driftNodes.append(drift)
driftNodes_after = driftNodes
#now move on and generate drifts between (i,i+1) nodes from copyAccNodes
newAccNodes = driftNodes_before
for node_ind in range(len(copyAccNodes)-1):
accNode0 = copyAccNodes[node_ind]
newAccNodes.append(accNode0)
accNode1 = copyAccNodes[node_ind+1]
ind_of_node = accSeq.getNodes().index(accNode1)
dist = accNode1.getParam("pos") - accNode1.getLength()/2 - (accNode0.getParam("pos") + accNode0.getLength()/2)
if(dist < 0.):
msg = "The LinacLatticeFactory method getLinacAccLattice(names): two nodes are overlapping!"
msg = msg + os.linesep
msg = msg + "sequence name=" + accSeq.getName()
msg = msg + os.linesep
msg = msg + "node 0 name=" + accNode0.getName() + " pos="+ str(accNode0.getParam("pos")) + " L="+str(accNode0.getLength())
msg = msg + os.linesep
msg = msg + "node 1 name=" + accNode1.getName() + " pos="+ str(accNode1.getParam("pos")) + " L="+str(accNode1.getLength())
msg = msg + os.linesep
orbitFinalize(msg)
elif(dist > self.zeroDistance):
driftNodes = []
nDrifts = int(dist/self.maxDriftLength) + 1
driftLength = dist/nDrifts
for idrift in range(nDrifts):
drift = Drift(accSeq.getName()+":"+accNode0.getName()+":"+str(idrift+1)+":drift")
drift.setLength(driftLength)
drift.setParam("pos",accNode0.getParam("pos")+accNode0.getLength()*0.5+drift.getLength()*(idrift+0.5))
driftNodes.append(drift)
newAccNodes += driftNodes
else:
pass
newAccNodes.append(lastNode)
newAccNodes += driftNodes_after
accSeq.setNodes(newAccNodes)
#insert the drifts ======================stop ===========================
#========================================================================
#Now we will go over all zero length nodes and attach them into the quads
#or drifts. We cannot put anything inside RF Cavity.
# zero length elements insertion ========== start ======================
# if a zero-length element is inside a quad it will be placed inside this
# quad
accQuads = []
for accNode in accSeq.getNodes():
if(isinstance(accNode,Quad)): accQuads.append(accNode)
unusedThinNodes = []
for node in thinNodes:
position = node.getParam("pos")
quad_found = False
for quad in accQuads:
pos = quad.getParam("pos")
L = quad.getLength()
nParts = quad.getnParts()
if(abs(position - pos) < self.zeroDistance or (position > pos - L/2.0 and position < pos +L/2.0)):
accNode = None
if(node.getType() == "DCV" or node.getType() == "DCH"):
if(node.getType() == "DCV"): accNode = DCorrectorV(node.getName())
if(node.getType() == "DCH"): accNode = DCorrectorH(node.getName())
accNode.setParam("effLength",float(node.getParam("effLength")))
else:
accNode = MarkerLinacNode(node.getName())
accNode.updateParamsDict(node.getParamsDict())
accNode.setParam("pos",quad.getParam("pos"))
quad.addChildNode(accNode, place = AccNode.BODY, part_index = (nParts/2) - 1 , place_in_part = AccNode.AFTER)
quad_found = True
break
if(not quad_found): unusedThinNodes.append(node)
#remove all assigned zero-length nodes from list of thin nodes
thinNodes = unusedThinNodes
def posCompFunc(node1,node2):
if(node1.getParam("pos") < node2.getParam("pos")):
return True
return False
thinNodes.sort(posCompFunc)
#----------------
# chop the drifts if the thin element is inside or insert this element into
# the sequence at the end or the beginning of the drift
usedThinNodes = []
for node in thinNodes:
#print "debug chop drift thin node=",node.getName()
position = node.getParam("pos")
driftNode = self.__getDriftThinNode(position,accSeq)
if(driftNode != None):
usedThinNodes.append(node)
pos = driftNode.getParam("pos")
L = driftNode.getLength()
ind_insertion = accSeq.getNodes().index(driftNode)
accNode = None
if(node.getType() == "DCV" or node.getType() == "DCH"):
if(node.getType() == "DCV"): accNode = DCorrectorV(node.getName())
if(node.getType() == "DCH"): accNode = DCorrectorH(node.getName())
accNode.setParam("effLength",float(node.getParam("effLength")))
else:
accNode = MarkerLinacNode(node.getName())
accNode.updateParamsDict(node.getParamsDict())
accNode.setParam("pos",position)
if(abs(position - (pos - L/2.0)) < self.zeroDistance):
#insert before the drift
accSeq.addNode(accNode, index = ind_insertion)
elif(abs(position - (pos + L/2.0)) < self.zeroDistance):
#insert after the drift
accSeq.addNode(accNode, index = ind_insertion+1)
else:
#we replace this drift with two new
drift_node_name = driftNode.getName()
ind_name_pos = drift_node_name.find(":drift")
drift_node_name = drift_node_name[0:ind_name_pos]
drift0 = Drift(drift_node_name+":1:drift")
drift0.setLength(position - (pos - L/2.0))
drift0.setParam("pos",(pos - L/2.0) + drift0.getLength()/2.0)
drift1 = Drift(drift_node_name+":2:drift")
drift1.setLength((pos + L/2.0) - position)
drift1.setParam("pos",(pos + L/2.0) - drift1.getLength()/2.0)
accSeq.getNodes().remove(driftNode)
accSeq.addNode(drift0, index = ind_insertion)
accSeq.addNode(accNode, index = ind_insertion + 1)
accSeq.addNode(drift1, index = ind_insertion + 2)
#remove all assigned zero-length nodes from list of thin nodes
for node in usedThinNodes:
thinNodes.remove(node)
if(len(thinNodes) != 0):
print "==========WARNING!!!!==============="
print "The seqence =",accSeq.getName()," has nodes that are not assigned to the lattice:"
for node in thinNodes:
print "unused node =",node.getName()," pos=",node.getParam("pos")
# add all AccNodes to the linac | |
#!/usr/bin/python
#finance_data.py
'''Holds functions that preform specific tasks related to stocks'''
__version__ = "1.0.0"
__author__ = '<NAME>'
#cx-freeze
import yfinance as yf
def current_price(stock):
"""determines current stock price of a certain stock
:param stock: stock that will be analyzed
:return current stock price"""
stock = yf.Ticker(str(stock))
hist = str(stock.history(period=("1h")))
l = hist.split() #List form of hist split into individual words
stock = yf.Ticker(str(stock))
current_price = l[13] #number in list that is stock price
print(hist)
print(current_price)
return current_price
current_price("amd")
def information(stock):
"""gets current information about a certain stock
:param stock: stock that will be analyzed
:return stock information"""
stock = yf.Ticker(str(stock))
return stock.info
def history(stock, time):
"""determines current stock price of a certain stock
:param stock: stock that will be analyzed
:param time: time period of data collected
:return stock history in time period"""
stock = yf.Ticker(str(stock))
hist = str(stock.history(period=(str(time)) + "mo"))
return hist
def trend(stock, time):
"""determines current stock trends within a time period
:param stock: stock that will be analyzed
:param time: time period of data collected
:return stock trend in time period"""
stock = yf.Ticker(str(stock))
hist = str(stock.history(period=(str(time)) + "mo"))
open_price = []
high_price = []
low_price = []
close_price = []
l = hist.split() #List form of hist split into individual words
############################################
### CREATES LISTS OF FINANCIAL DATA PER DAY
############################################
START_OF_LIST = 10
INTERVAL = 8
END_OF_LIST = len(l) - 6
for i in range(START_OF_LIST, END_OF_LIST, INTERVAL):
open_price.append(l[i])
for i in range(START_OF_LIST + 1, END_OF_LIST + 1, INTERVAL):
high_price.append(l[i])
for i in range(START_OF_LIST + 2, END_OF_LIST + 2, INTERVAL):
low_price.append(l[i])
for i in range(START_OF_LIST + 3, END_OF_LIST + 3, INTERVAL):
close_price.append(l[i])
#################
### LOW COUNTER
#################
low_counter = 0
lowest_price = float(low_price[0])
for i in range(len(low_price)):
if float(low_price[i]) < float(lowest_price):
low_counter = low_counter + 1
lowest_price = float(low_price[i])
#################
### HIGH COUNTER
#################
high_counter = 0
highest_price = high_price[0]
for i in range(len(high_price)):
if float(high_price[i]) > float(highest_price):
high_counter = high_counter + 1
highest_price = float(high_price[i])
#############
### LOW TREND
#############
if low_counter > high_counter: #Low trend
if abs(low_counter - high_counter) > 5: #difference in # of lows vs. highs is big
return "Strong Low trend"
if abs(low_counter - high_counter) <= 5: #difference in # of lows vs. highs is small
return "Weak Low trend"
#############
### HIGH TREND
#############
if low_counter < high_counter: #high trend
if abs(high_counter - low_counter) > 5: #difference in # of lows vs. highs is big
return "Strong High trend"
if abs(high_counter - low_counter) <= 5: #difference in # of lows vs. highs is small
return "Weak High trend"
###########################
### Volume interpreter
###########################
def volume(stock, time):
"""analyzes current stock volume within a time period
:param stock: stock that will be analyzed
:param time: time period of data collected
:return stock volume analysis in time period"""
stock = yf.Ticker(str(stock))
hist = str(stock.history(period=(str(time)) + "mo"))
l = hist.split() #List form of hist split into individual words
START_OF_LIST = 10
INTERVAL = 8
END_OF_LIST = len(l) - 6
volume = []
open_price = []
for i in range(START_OF_LIST, END_OF_LIST, INTERVAL):
open_price.append(l[i])
for i in range(START_OF_LIST + 4, END_OF_LIST + 4, INTERVAL):
volume.append(l[i])
#Low Volume is considered bad, high volume is good
if volume[-1] == volume[-2]:
return "Equal Volume"
if volume[-1] < volume[-2]: #Low volume
return "Low Volume"
if volume[-1] > volume[-2]: #High volume
if open_price[-1] > open_price[-2]:
return "High Volume: Buy"
if open_price[-1] < open_price[-2]:
return "High Volume: Sell"
def change_during_day(stock, time):
"""determines average percent change in a day
:param stock: stock that will be analyzed
:param time: time period of data collected
:return average percent change in a day for a certain time period"""
START_OF_LIST = 10
END_OF_LIST = 169
INTERVAL = 8
stock = yf.Ticker(str(stock))
hist = str(stock.history(period=(str(time)) + "mo"))
open_price = []
close_price = []
percent_change_during = [] # % change during day
percent_change_day = [] # % change from day to day
l = hist.split() #List form of hist split into individual words
############################################
### CREATES LISTS OF FINANCIAL DATA PER DAY
############################################
START_OF_LIST = 10
INTERVAL = 8
END_OF_LIST = len(l) - 6
for i in range(START_OF_LIST, END_OF_LIST, INTERVAL):
open_price.append(l[i])
for i in range(START_OF_LIST + 3, END_OF_LIST + 3, INTERVAL):
close_price.append(l[i])
###########################
### PERCENT CHANGE IN A DAY
###########################
# These lines of code get a percent change in a day and then
# average it out for a period of time
change = 0
for i in range(len(close_price)):
change = abs(((float(close_price[i]) - (float(open_price[i]))) /
float(close_price[i]))) * 100
percent_change_during.append(float(change))
change_during = 0
for i in range(len(close_price)):
change_during = percent_change_during[i] + change_during
average_change_during = 0
average_change_during = change_during / len(close_price)
return average_change_during
def change_per_day(stock, time):
"""determines average percent change per day
:param stock: stock that will be analyzed
:param time: time period of data collected
:return average percent change per day for a certain time period"""
START_OF_LIST = 10
END_OF_LIST = 169
INTERVAL = 8
stock = yf.Ticker(str(stock))
hist = str(stock.history(period=(str(time)) + "mo"))
open_price = []
close_price = []
percent_change_during = [] # % change during day
percent_change_day = [] # % change from day to day
l = hist.split() #List form of hist split into individual words
############################################
### CREATES LISTS OF FINANCIAL DATA PER DAY
############################################
START_OF_LIST = 10
INTERVAL = 8
END_OF_LIST = len(l) - 6
for i in range(START_OF_LIST, END_OF_LIST, INTERVAL):
open_price.append(l[i])
for i in range(START_OF_LIST + 3, END_OF_LIST + 3, INTERVAL):
close_price.append(l[i])
###########################
### PERCENT CHANGE IN A DAY
###########################
# These lines of code get a percent change in a day and then average
# it out for a period of time
change = 0
for i in range(len(close_price)):
change = abs(((float(close_price[i]) - (float(open_price[i]))) /
float(close_price[i]))) * 100
percent_change_during.append(float(change))
change = 0
for i in range(len(close_price) - 1):
change = abs(((float(percent_change_during[i]) -
(percent_change_during[i + 1])) /
float(percent_change_during[i])))
percent_change_day.append(float(change))
change_during = 0
for i in range(len(close_price) - 1):
change_during = percent_change_day[i] + change_during
average_change_day = 0
average_change_day = change_during / len(close_price)
return average_change_day
# show cashflow
def quarterly_cashflow(stock):
"""gets the quarterly cashflow of a certain company
:param stock: stock that will be analyzed
:return quarterly cashflow"""
stock = yf.Ticker(str(stock))
return stock.quarterly_cashflow
# show earnings
def quarterly_earnings(stock):
"""gets the quarterly earnings of a certain company
:param stock: stock that will be analyzed
:return quarterly earnings"""
stock = yf.Ticker(str(stock))
return stock.quarterly_earnings
# show sustainability
def sustainability(stock):
"""gets the sustainablity of a certain company
:param stock: stock that will be analyzed
:return sustainability"""
stock = yf.Ticker(str(stock))
return stock.sustainability
# show analysts recommendations
def analyst_recommendation(stock):
"""gets the analyst recomendations of a certain company
:param stock: stock that will be analyzed
:return analyst recommendations"""
stock = yf.Ticker(str(stock))
return stock.recommendations
#### Calculating good news vs. Bad News ####
def news(stock):
"""analyzes analyst recommendations using keywords and assigns values to them
:param stock: stock that will be analyzed
:return recommendations value"""
stock = yf.Ticker(str(stock))
reco = str(stock.recommendations) # Stands for recomend
reco = reco.split()
reco.reverse()
del reco[15 :-1]
#### KEY WORDS ###
buy = reco.count("Buy") #Means price is going up = Good
sell = reco.count("Sell") #Means price is going down = Bad
hold = reco.count("Hold") #Means price is going to increase = Good
neutral = reco.count("Neutral") #Means price is not going to drastically change = Neutral
overweight = reco.count("Overweight") #Means stock is better value for money than others = Good
equalweight = reco.count("Equal-Weight") #Means stock is about the same value compared to others = Neutral
underweight = reco.count("Underweight") #Means stock is worse value than what it is assesed to be = Bad
perform = reco.count("Perform") #Means stock performance is on par with the industry average = Neutral
outperform = reco.count("Outperform") #Means stock performance will be slightly better than industry = Good
underperform = reco.count("Underperform") #Means stock performance will be slightly worse than industry = Bad
| |
import datetime
import itertools
import textwrap
import sys
from typing import List
import click
from sqlalchemy.orm.exc import NoResultFound
from sqlalchemy import not_, or_
from pyproj import CRS
from pyproj.exceptions import CRSError
import fire.cli
from fire.cli.utils import klargør_ident_til_søgning
from fire.api.model import (
Punkt,
PunktInformation,
PunktInformationType,
Koordinat,
Observation,
Boolean,
Srid,
)
@click.group()
def info():
"""
Information om objekter i FIRE
"""
pass
def observation_linje(obs: Observation) -> str:
if obs.observationstypeid > 2:
return ""
if obs.slettet:
return ""
fra = obs.opstillingspunkt.ident
til = obs.sigtepunkt.ident
dH = obs.value1
L = obs.value2
N = int(obs.value3)
tid = obs.observationstidspunkt.strftime("%Y-%m-%d %H:%M")
grp = obs.gruppe
oid = obs.objektid
# Geometrisk nivellement
if obs.observationstypeid == 1:
præs = int(obs.value7)
eta_1 = obs.value4
fejlfaktor = obs.value5
centrering = obs.value6
return f"G {præs} {tid} {dH:+09.6f} {L:05.1f} {N:2} {fra:12} {til:12} {fejlfaktor:3.1f} {centrering:4.2f} {eta_1:+07.2f} {grp:6} {oid:6}"
# Trigonometrisk nivellement
if obs.observationstypeid == 2:
fejlfaktor = obs.value4
centrering = obs.value5
return f"T 0 {tid} {dH:+09.6f} {L:05.1f} {N:2} {fra:12} {til:12} {fejlfaktor:3.1f} {centrering:4.2f} 0.00 {grp:6} {oid:6}"
def koordinat_linje(koord: Koordinat) -> str:
"""
Konstruer koordinatoutput i overensstemmelse med koordinatens dimensionalitet,
enhed og proveniens.
"""
native_or_transformed = "t"
if koord.transformeret == Boolean.FALSE:
native_or_transformed = "n"
meta = f"{koord.t.strftime('%Y-%m-%d %H:%M')} {koord.srid.name:<15.15} {native_or_transformed} "
# Se i proj.db: Er koordinatsystemet lineært eller vinkelbaseret?
try:
grader = False
if CRS(koord.srid.name).axis_info[0].unit_name in ("degree", "radian"):
grader = True
except CRSError:
# ignorer pyproj.exceptions.CRSError: Antag at ukendte koordinatsystemers enheder
# er lineære, bortset fra specialtilfældet NAD83G
if koord.srid.name == "GL:NAD83G":
grader = True
dimensioner = 0
if koord.x is not None and koord.y is not None:
dimensioner = 2
if koord.z is not None:
if dimensioner == 2:
dimensioner = 3
else:
dimensioner = 1
if dimensioner == 1:
linje = meta + f"{koord.z:.5f} ({koord.sz:.0f})"
if dimensioner == 2:
if grader:
linje = (
meta
+ f"{koord.x:.10f}, {koord.y:.10f} ({koord.sx:.0f}, {koord.sy:.0f})"
)
else:
linje = (
meta + f"{koord.x:.4f}, {koord.y:.4f} ({koord.sx:.0f}, {koord.sy:.0f})"
)
if dimensioner == 3:
linje = meta + f"{koord.x:.10f}, {koord.y:.10f}, {koord.z:.5f}"
if koord.sx is not None and koord.sy is not None and koord.sz is not None:
linje += f" ({koord.sx:.0f}, {koord.sy:.0f}, {koord.sz:.0f})"
return linje
def punktinforapport(punktinformationer: List[PunktInformation]) -> None:
"""
Hjælpefunktion for 'punkt_fuld_rapport'.
"""
for info in punktinformationer:
tekst = info.tekst or ""
# efter mellemrum rykkes teksten ind på linje med resten af
# attributteksten
tekst = tekst.replace("\n", "\n" + " " * 30).replace("\r", "").rstrip(" \n")
tal = info.tal or ""
# marker slukkede punktinformationer med rød tekst og et minus tv for linjen
if info.registreringtil:
fire.cli.print(f" -{info.infotype.name:27} {tekst}{tal}", fg="red")
else:
fire.cli.print(f" {info.infotype.name:27} {tekst}{tal}")
def koordinatrapport(koordinater: List[Koordinat], options: str) -> None:
"""
Hjælpefunktion for 'punkt_fuld_rapport': Udskriv formateret koordinatliste
"""
koordinater.sort(
key=lambda x: (x.srid.name, x.t.strftime("%Y-%m-%dT%H:%M")), reverse=True
)
ts = True if "ts" in options.split(",") else False
alle = True if "alle" in options.split(",") else False
for koord in koordinater:
tskoord = koord.srid.name.startswith("TS:")
if tskoord and not ts:
continue
if koord.registreringtil is not None:
if alle or (ts and tskoord):
fire.cli.print(". " + koordinat_linje(koord), fg="red")
else:
fire.cli.print("* " + koordinat_linje(koord), fg="green")
fire.cli.print("")
def observationsrapport(
observationer_til: List[Observation],
observationer_fra: List[Observation],
options: str,
opt_detaljeret: bool,
) -> None:
"""
Hjælpefunktion for 'punkt_fuld_rapport': Udskriv formateret observationsliste
"""
# p.t. er kun nivellementsobservationer understøttet
if options not in ["niv", "alle"]:
return
n_obs_til = len(observationer_til)
n_obs_fra = len(observationer_fra)
if n_obs_til + n_obs_fra == 0:
return
if n_obs_til > 0:
punktid = observationer_til[0].sigtepunktid
else:
punktid = observationer_fra[0].opstillingspunktid
observationer = [
obs
for obs in observationer_fra + observationer_til
if obs.observationstypeid in [1, 2]
]
# "gruppe"-elementet er meningsfyldt for klassiske retningsmålinger
# men kun begrænset relevant for nivellementsobservationer, hvor den
# dog historisk er blevet populeret med journalsideinformation.
# I disse tilfælde er det en nyttig ekstra parameter til relevanssorteringen
# nedenfor. I tilfælde hvor "gruppe" ikke er sat sætter vi den til 0.
# Dermed undgås sammenligning af inkompatible datatyper i sorteringen.
for obs in observationer:
if obs.gruppe is None:
obs.gruppe = 0
# Behjertet forsøg på at sortere de udvalgte observationer,
# så de giver bedst mulig mening for brugeren: Først præs,
# så andre, og indenfor hver gruppe baglæns kronologisk og med
# frem/tilbage par så vidt muligt grupperet. Det er ikke nemt!
observationer.sort(
key=lambda x: (
(x.value7 if x.observationstypeid == 1 else 0),
(x.observationstidspunkt.year),
(x.gruppe),
(x.sigtepunktid if x.sigtepunktid != punktid else x.opstillingspunktid),
(x.observationstidspunkt),
),
reverse=True,
)
n_vist = len(observationer)
if n_vist == 0:
return
fire.cli.print(
" [Trig/Geom][Præs][T] dH L N Fra Til ne d eta grp id"
)
fire.cli.print(" " + 110 * "-")
for obs in observationer:
linje = observation_linje(obs)
if linje != "" and linje is not None:
fire.cli.print(" " + observation_linje(obs))
fire.cli.print(" " + 110 * "-")
if not opt_detaljeret:
return
fire.cli.print(f" Observationer ialt: {n_obs_til + n_obs_fra}")
fire.cli.print(f" Observationer vist: {n_vist}")
# Find ældste og yngste observation
min_obs = datetime.datetime(9999, 12, 31, 0, 0, 0)
max_obs = datetime.datetime(1, 1, 1, 0, 0, 0)
for obs in itertools.chain(observationer_fra, observationer_til):
if obs.observationstidspunkt < min_obs:
min_obs = obs.observationstidspunkt
if obs.observationstidspunkt > max_obs:
max_obs = obs.observationstidspunkt
fire.cli.print(f" Ældste observation: {min_obs}")
fire.cli.print(f" Nyeste observation: {max_obs}")
fire.cli.print(" " + 110 * "-")
def punkt_fuld_rapport(
punkt: Punkt,
ident: str,
i: int,
n: int,
opt_obs: str,
opt_koord: str,
opt_detaljeret: bool,
) -> None:
"""
Rapportgenerator for funktionen 'punkt' nedenfor.
"""
# Header
fire.cli.print("")
fire.cli.print("-" * 80)
if n > 1:
fire.cli.print(f" PUNKT {punkt.ident} ({i}/{n})", bold=True)
else:
fire.cli.print(f" PUNKT {punkt.ident}", bold=True)
fire.cli.print("-" * 80)
# Geometri, fire-id, oprettelsesdato og PunktInformation håndteres
# under et, da det giver et bedre indledende overblik
try:
for geometriobjekt in punkt.geometriobjekter:
# marker slukkede geometriobjekter med rød tekst og et minus tv for linjen
if geometriobjekt.registreringtil:
fire.cli.print(
f" -Lokation {geometriobjekt.geometri}", fg="red"
)
else:
fire.cli.print(
f" Lokation {geometriobjekt.geometri}"
)
except Exception:
pass
fire.cli.print(f" Oprettelsesdato {punkt.registreringfra}")
punktinforapport(punkt.punktinformationer)
if opt_detaljeret:
fire.cli.print(f" uuid {punkt.id}")
fire.cli.print(f" objekt-id {punkt.objektid}")
fire.cli.print(f" sagsid {punkt.sagsevent.sagsid}")
fire.cli.print(f" sagsevent-fra {punkt.sagseventfraid}")
if punkt.sagseventtilid is not None:
fire.cli.print(f" sagsevent-til {punkt.sagseventtilid}")
# Koordinater og observationer klares af specialiserede hjælpefunktioner
if "ingen" not in opt_koord.split(","):
fire.cli.print("")
fire.cli.print("--- KOORDINATER ---", bold=True)
koordinatrapport(punkt.koordinater, opt_koord)
if opt_obs != "":
fire.cli.print("")
fire.cli.print("--- OBSERVATIONER ---", bold=True)
observationsrapport(
punkt.observationer_til, punkt.observationer_fra, opt_obs, opt_detaljeret
)
fire.cli.print("")
@info.command()
@click.option(
"-K",
"--koord",
default="",
help="ts: Udskriv også tidsserier; alle: Udskriv også historiske koordinater; ingen: Udelad alle",
)
@click.option(
"-O",
"--obs",
is_flag=False,
default="",
help="niv/alle: Udskriv observationer",
)
@click.option(
"-D",
"--detaljeret",
is_flag=True,
default=False,
help="Udskriv også sjældent anvendte elementer",
)
@click.option(
"-n",
"--antal",
is_flag=False,
default=20,
help="Begræns antallet af punkter der udskrives",
)
@fire.cli.default_options()
@click.argument("ident")
def punkt(
ident: str, obs: str, koord: str, detaljeret: bool, antal: int, **kwargs
) -> None:
"""
Vis al tilgængelig information om et fikspunkt
IDENT kan være enhver form for navn et punkt er kendt som, blandt andet
GNSS stationsnummer, G.I./G.M.-nummer, refnr, landsnummer, uuid osv.
Søgningen er delvist versalfølsom, men tager højde for minuskler, udeladte
punktummer og manglende foranstillede nuller, i ofte forekommende, let
genkendelige tilfælde (GNSS-id, GI/GM-numre, lands- og købstadsnumre).
Hvis der indgår procenttegn i det søgte punktnavn opfattes disse som
jokertegn, og søgningen returnerer alle punkter der matcher mønstret.
Punkt-klassen er omfattende og består af følgende elementer:
Punkt = Punkt(\n
'geometriobjekter', -- lokationskoordinat\n
'id', -- uuid: intern databaseidentifikation\n
'koordinater', -- alle tilgængelige koordinater\n
'metadata', -- øh\n
'objektid', -- databaserækkenummer\n
'observationer_fra', -- alle observationer udført fra punkt\n
'observationer_til', -- alle observationer udført til punkt\n
'punktinformationer', -- attributter og punktbeskrivelser\n
'registreringfra', -- oprettelsesdato/registreringsdato\n
'registreringtil', -- invalideringstidspunkt\n
'sagsevent', -- ?? seneste sagsevent??\n
'sagseventfraid', -- sagsevent for punktoprettelsen\n
'sagseventtilid', -- sagsevent for punktinvalideringen\n
'slettet' -- øh\n
)
Anfører man ikke specifikke tilvalg vises kun basale dele: Attributter og
punktbeskrivelser + gældende koordinater.
Tilvalg `--detaljer/-D` udvider med sjældnere brugte informationer
Tilvalg `--koord/-K` kan sættes til ts, alle, ingen - eller kombinationer:
fx ts,alle. `alle` tilvælger historiske koordinater, `ts` tilvælger
tidsseriekoordinater, `ingen`fravælger alle koordinatoplysninger.
Tilvalg `--obs/-O` kan sættes til alle eller niv. Begge tilvælger visning
af observationer til/fra det søgte punkt. P.t. understøttes kun visning af
nivellementsobservationer.
"""
ident = klargør_ident_til_søgning(ident)
try:
punkter = fire.cli.firedb.hent_punkter(ident)
except NoResultFound:
fire.cli.print(f"Fejl: Kunne ikke finde {ident}.", fg="red", err=True)
sys.exit(1)
# Succesfuld søgning - vis hvad der blev fundet
n = len(punkter)
for i, punkt in enumerate(punkter):
if i == antal:
break
punkt_fuld_rapport(punkt, punkt.ident, i + 1, n, | |
<filename>src/pylines/io.py
import random
import os
from . import line_count, create_idx_key, get_idx_key, get_write_fn, get_read_fn, _io_type
from . import _env, parser, json, glob, Timer
if _env['tqdm']:
from tqdm.auto import tqdm, trange
if _env['tf']:
from .tflow import setup_tf_serialization_features, serialize_tf_example, SerializeTFWorker, TFRWriter
from .tflow import TFDatasetFromTensors, TFRDataset
if _env['torch']:
from .torch import serialize_torch_example, SerializeTorchWorker, setup_torch_serialization_features
from .torch import TorchWriter, DynamicCollate, PylinesDataset, PylinesIterableFunctionDataset, PylinesDatasetFromIterator
#if _env['ray']:
# import ray.util.multiprocessing as mp
#else:
import math
from .logger import get_logger
import multiprocessing as mp
import hashlib
import gc
logger = get_logger()
_tokenize_fn = None
# https://stackoverflow.com/questions/620367/how-to-jump-to-a-particular-line-in-a-huge-text-file
class LineSeekableFile:
def __init__(self, seekable):
self.fin = seekable
self.line_map = list() # Map from line index -> file position.
self.line_map.append(0)
while seekable.readline():
self.line_map.append(seekable.tell())
def index(self):
return self.line_map
def __len__(self):
return len(self.line_map)
def __getitem__(self, index):
# NOTE: This assumes that you're not reading the file sequentially.
# For that, just use 'for line in file'.
self.fin.seek(self.line_map[index])
return self.fin.readline()
class LazyLoadFile:
def __init__(self, filename, skip_broken=True):
self.filename = filename
self.reader = get_read_fn(filename)
self._skip = skip_broken
self.fseek = LineSeekableFile(self.reader)
if self._skip:
self.lineidx = 0
self.badidx = 0
def random_iter(self, num_lines=None):
num_lines = num_lines if num_lines else len(self.fseek)
total_idx = [i for i in range(num_lines)]
random.shuffle(total_idx)
for idx in total_idx:
if self._skip:
try:
yield self.loads(self.fseek[idx])
self.lineidx += 1
except:
self.badidx += 1
else:
yield self.loads(self.fseek[idx])
def quick_iter(self, num_lines=None):
num_lines = num_lines if num_lines else len(self.fseek)
for x, line in enumerate(self.reader):
if self._skip:
try:
yield self.loads(line)
self.lineidx += 1
except:
self.badidx += 1
else:
yield self.loads(line)
if x >= num_lines:
break
def iter(self):
for line in self.reader:
if self._skip:
try:
yield self.loads(line)
self.lineidx += 1
except:
self.badidx += 1
else:
yield self.loads(line)
def loads(self, v):
return parser.parse(v).as_dict()
def __getitem__(self, idx):
return self.loads(self.fseek[idx])
def __len__(self):
return len(self.fseek)
def stats(self):
return {'loaded': self.lineidx, 'missed': self.badidx}
def resetstats(self):
self.lineidx = 0
self.badidx = 0
return {'loaded': self.lineidx, 'missed': self.badidx}
def setup_tokenize_fn(tokenizer_fn):
assert _env['transformers'], 'Transformers must be installed to use tokenize function'
global _tokenize_fn
_tokenize_fn = tokenizer_fn
def TokenizerWorker(ex):
try:
result = _tokenize_fn(ex)
return result
except:
return None
def setup_iter_fn(iter_fn):
global _iter_func
_iter_func = iter_fn
def IterWorker(ex):
try:
result = _iter_func(ex)
return result
except:
return None
def setup_filter_fns(filter_fns):
global _filter_func
_filter_func = filter_fns
def FilterWorker(ex):
result = {}
for key in ex:
if key not in _filter_func['bypass'] and key in _filter_func:
res = _filter_func[key](ex[key])
if res:
result[key] = res
elif key in _filter_func['bypass']:
result[key] = ex[key]
if bool(result):
return None
return result
def FileIterator(filename):
with get_read_fn(filename) as f:
for line in f:
yield parser.parse(line).as_dict()
raise StopIteration
def make_hashes(inputs):
return hashlib.sha256(str.encode(inputs)).hexdigest()
def check_hashes(inputs, hashed_text):
if make_hashes(inputs) == hashed_text:
return hashed_text
return False
class Pylines:
def __init__(self, input_fns=None, output_fn=None, skip_broken=True, overwrite_output=False, use_lazy=False, use_mp=True, use_idx=False, total_lines=0):
self._skip, self._lazy, self._mp, self._idx, self._overwrite = skip_broken, use_lazy, use_mp, use_idx, overwrite_output
self.total_lines = total_lines
self.writer, self.reader = None, None
self.input_fns, self.output_fn = None, None
self.stats = {}
self.timer = Timer()
self.stored_items = list()
self._io(input_fns, output_fn)
def as_tokenizer(self, tokenizer_fn=None, input_fns=None, use_mp=True):
if tokenizer_fn:
setup_tokenize_fn(tokenizer_fn)
assert _tokenize_fn, 'tokenizer_fn must first be set before being able to run'
self._io(input_fns, output_fn=None)
for result in self._as_iter(_tokenize_fn, TokenizerWorker, use_mp, desc='Tokenization'):
yield result
logger.info(f'{self.timer.stop()} for Tokenizing {self.total_lines} Items')
def run_tokenizer(self, tokenizer_fn=None, input_fns=None, output_fn=None, use_mp=True):
self._io(input_fns, output_fn)
for result in self.as_tokenizer(tokenizer_fn=tokenizer_fn, use_mp=use_mp):
self.write(result)
self.flush()
def as_processor(self, iter_func=None, input_fns=None, use_mp=True):
if iter_func:
setup_iter_fn(iter_func)
assert _iter_func, 'iter_func must first be set before running'
self._io(input_fns, output_fn=None)
for result in self._as_iter(_iter_func, IterWorker, use_mp, desc='Iterator Function'):
yield result
logger.info(f'{self.timer.stop()} for {self.total_lines} Items')
def run_processor(self, iter_func=None, input_fns=None, output_fn=None, use_mp=True):
self._io(input_fns, output_fn)
for result in self.as_processor(iter_func=iter_func, use_mp=use_mp):
self.write(result)
self.flush()
# filter_funcs = {'text': filter_fuc, 'target': filter_func, 'idx': filter_func, 'bypass': ['key_1', 'key_2']}
def as_filter(self, filter_funcs=None, input_fns=None, use_mp=True):
if filter_funcs:
setup_filter_fns(filter_funcs)
assert _filter_func, 'filter_funcs must first be set before running'
self._io(input_fns, output_fn=None)
for result in self._as_iter(FilterWorker, FilterWorker, use_mp, desc='Filtering Items'):
yield result
logger.info(f'{self.timer.stop()} for Filtering {self.total_lines} Items')
def run_filter(self, filter_funcs=None, input_fns=None, output_fn=None, use_mp=True):
self._io(input_fns, output_fn)
for result in self.as_filter(filter_funcs=filter_funcs, use_mp=use_mp):
self.write(result)
self.flush()
def _tftensordict(self, all_examples, dataset_features=None):
_features = list()
_tensor_examples = dict()
for axis in dataset_features:
_features += dataset_features[axis]['names']
for feats in _features:
_tensor_examples[feats] = list()
for ex in all_examples:
for key, v in ex.items():
if key in _features:
_tensor_examples[key].extend(v)
return _tensor_examples
def _tfencoder(self, all_examples, dataset_features=None, slices=True, use_mp=True):
if dataset_features:
for axis in dataset_features:
assert 'names' in dataset_features[axis], 'names is a required key for dataset features.'
setup_tf_serialization_features(dataset_features)
if slices:
_tensor_ds = self._tftensordict(all_examples, dataset_features)
return _tensor_ds
else:
for serialized_ex in self._as_iter_items(all_examples, serialize_tf_example, SerializeTFWorker, use_mp=use_mp, desc=f'Serializing to TFRecords'):
yield serialized_ex
def _tfwriter(self, all_examples, output_dir, dataset_features=None, start_idx=1, split_key='split', split='train', write_string='{}_shard_{}.tfrecords', shard_size=50000, overwrite=False, use_tempdir=False, use_mp=True):
_total_match = self.count_matching(split_key, split) if split_key else self.total_lines
with TFRWriter(output_dir, _total_match, start_idx, split, write_string, shard_size, overwrite, use_tempdir) as writer:
for serialized_ex in self._tfencoder(all_examples, dataset_features, slices=False, use_mp=use_mp):
writer.write(serialized_ex)
tfrecord_files, total_items = writer.close()
return tfrecord_files, total_items
def _torchencoder(self, all_examples, dataset_features=None, use_mp=True):
if dataset_features:
setup_torch_serialization_features(dataset_features)
for serialized_ex in self._as_iter_items(all_examples, serialize_torch_example, SerializeTorchWorker, use_mp=use_mp, desc=f'Serializing to Torch'):
yield serialized_ex
def _torchwriter(self, all_examples, output_dir, dataset_features=None, start_idx=1, split_key='split', split='train', write_string='{}_shard_{}.pkl', shard_size=50000, overwrite=False, use_tempdir=False, use_mp=True, compression=True):
_total_match = self.count_matching(split_key, split) if split_key else self.total_lines
with TorchWriter(output_dir, _total_match, start_idx, split, write_string, shard_size, overwrite, use_tempdir, compression) as writer:
for serialized_ex in self._torchencoder(all_examples, dataset_features, use_mp):
writer.write(serialized_ex)
torch_files, total_items = writer.close()
return torch_files, total_items
def _tokenize_examples(self, tokenizer_fn, use_mp=True):
all_results = list()
if tokenizer_fn:
for result in self.as_tokenizer(tokenizer_fn, use_mp=use_mp):
all_results.append(result)
else:
logger.warning(f'No Tokenizer Function Provided. Assuming Input Files are Pretokenized.')
for result in self.as_iterator():
all_results.append(result)
logger.info(f'Loaded {len(all_results)} Examples. Keys: {list(i for i in all_results[0])}')
return all_results
def as_encoder(self, dataset_features=None, tokenizer_fn=None, serialization='tf', input_fns=None, use_mp=True):
_methods = ['tf', 'torch']
assert serialization in _methods, f'Currently only {_methods} are supported'
assert _env[serialization], f'{serialization} library is required to run Serialization'
self._io(input_fns, output_fn=None)
all_examples = self._tokenize_examples(tokenizer_fn, use_mp)
if serialization == 'tf':
for serialized_ex in self._tfencoder(all_examples, dataset_features, use_mp):
yield serialized_ex
elif serialization == 'torch':
for serialized_ex in self._torchencoder(all_examples, dataset_features, use_mp):
yield serialized_ex
logger.info(f'{self.timer.stop()} for Serializing [{serialization}] {len(all_examples)} Examples')
def run_encoder(self, output_dir, dataset_features=None, tokenizer_fn=None, serialization='tf', input_fns=None, start_idx=1, split_key='split', split='train', write_string='{}_shard_{}.tfrecords', shard_size=50000, overwrite=False, use_tempdir=False, use_mp=True, compression=True):
self._io(input_fns, output_fn=None)
all_examples = self._tokenize_examples(tokenizer_fn, use_mp)
if serialization == 'tf':
tfrecord_files, total_items = self._tfwriter(all_examples, output_dir, dataset_features, start_idx, split_key, split, write_string, shard_size, overwrite, use_tempdir, use_mp)
return tfrecord_files, total_items
elif serialization == 'torch':
torch_files, total_items = self._torchwriter(all_examples, output_dir, dataset_features, start_idx, split_key, split, write_string, shard_size, overwrite, use_tempdir, use_mp, compression)
return torch_files, total_items
def as_dataset(self, batch_sizes, dataset_features=None, tokenizer_fn=None, framework='tf', input_fns=None, split_key='split', splits=['train', 'validation', 'test'], use_mp=True):
self._io(input_fns, output_fn=None)
all_examples = self._tokenize_examples(tokenizer_fn, use_mp)
_dataset = dict()
_encoder_fn = self._tfencoder if framework == 'tf' else None
if splits:
_splitdataset = self._dataset_splits(all_examples, split_key, splits)
for split in splits:
if _encoder_fn:
_encoded_examples = list()
for example in _encoder_fn(_splitdataset[split], dataset_features, use_mp):
_encoded_examples.append(example)
else:
_encoded_examples = _splitdataset[split]
_dataset[split] = {'examples': _encoded_examples, 'batch_size': batch_sizes[split]}
_splitdataset = None
gc.collect()
else:
if _encoder_fn:
_encoded_examples = list()
for example in _encoder_fn(all_examples, dataset_features, use_mp):
_encoded_examples.append(example)
else:
_encoded_examples = all_examples
_dataset['train'] = {'examples': _encoded_examples, 'batch_size': batch_sizes}
splits = ['train']
if framework == 'tf':
_tfdataset = TFDatasetFromTensors(_dataset, dataset_features)
return _tfdataset
elif framework == 'torch':
_torchdataset = dict()
for split in splits:
_torchdataset[split] = PylinesDataset(num_examples=len(_dataset[split]['examples']), examples=_dataset[split]['examples'])
logger.info('Torch Dataset should be used with DynamicCollate function with the DataLoader for Optimal Performance')
return _torchdataset
def _dataset_splits(self, all_examples, split_key, splits):
split_results = dict()
for split in splits:
split_results[split] = list()
for example in all_examples:
ex_split = example[split_key]
split_results[ex_split].append(example)
return split_results
def _as_iter(self, IterFunc, Worker, use_mp, desc):
pbar = trange(self.total_lines, desc=desc) if _env['tqdm'] else None
self.timer.start(desc)
if use_mp:
if isinstance(use_mp, int):
pool = mp.Pool(use_mp)
else:
pool = mp.Pool()
for fn in self.input_fns:
for result in pool.imap_unordered(Worker, FileIterator(fn)):
if result:
yield result
if pbar:
pbar.update()
else:
for fn in self.input_fns:
for result in self._file_iter(fn):
ex = IterFunc(result)
if ex:
yield ex
if pbar:
pbar.update()
if pbar:
pbar.close()
def _as_iter_items(self, items, IterFunc, Worker, use_mp, desc):
pbar = trange(len(items), desc=desc) if _env['tqdm'] else None
self.timer.start(desc)
if use_mp:
if isinstance(use_mp, int):
pool = mp.Pool(use_mp)
else:
pool = mp.Pool()
for result in pool.imap_unordered(Worker, items):
if result:
yield result
if pbar:
pbar.update()
else:
for item in items:
ex = IterFunc(item)
if ex:
yield ex
if pbar:
pbar.update()
if pbar:
pbar.close()
def deduplicate(self, keys, input_fns=None, output_fn=None, write=True):
self._io(input_fns, output_fn)
_sets = {}
results = list()
| |
#!/usr/bin/env python3
try:
import math
import sqlite3
import subprocess
import time
import itertools
import gc
import os
import sys
from metaMLST_functions import *
except ImportError as e:
print ("Error while importing python modules! Remember that this script requires: sys,os,subprocess,sqlite3,argparse,re:\n"+str(e))
sys.exit(1)
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
try:
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Align.Applications import MuscleCommandline
except ImportError as e:
metamlst_print("Failed in importing Biopython. Please check Biopython is installed properly on your system!",'FAIL',bcolors.FAIL)
sys.exit(1)
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter,
description='Detects the MLST profiles from a collection of intermediate files from MetaMLST.py')
parser.add_argument("folder", help="Path to the folder containing .nfo MetaMLST.py files",nargs='?')
parser.add_argument("-d",'--database', metavar="DB PATH", help="Specify a different MetaMLST-Database. If unset, use the default Database. You can create a custom DB with metaMLST-index.py)")
parser.add_argument("--filter", metavar="species1,species2...", help="Filter for specific set of organisms only (METAMLST-KEYs, comma separated. Use metaMLST-index.py --listspecies to get MLST keys)")
parser.add_argument("-z", metavar="ED", help="Maximum Edit Distance from the closest reference to call a new MLST allele. Default: 5", default=5, type=int)
parser.add_argument("--meta", metavar="METADATA_PATH", help="Metadata file (CSV)")
parser.add_argument("--idField", help="Field number pointing to the 'sampleID' value in the metadata file", default=0, type=int)
parser.add_argument("--outseqformat", choices=['A', 'A+', 'B', 'B+', 'C','C+'], help="A : Concatenated Fasta (Only Detected STs)\r\n\
A+ : Concatenated Fasta (All STs)\r\n\
B : Single loci (Only New Loci)\r\n\
B+ : Single loci (All loci)\r\n\
C : CSV STs Table [default]")
parser.add_argument("-j", metavar="subjectID,diet,age...", help="Embed a LIST of metadata in the the output sequences (A or A+ outseqformat modes). Requires a comma separated list of field names from the metadata file specified with --meta")
parser.add_argument("--jgroup", help="Group the output sequences (A or A+ outseqformat modes) by ST, rather than by sample. Requires -j", action="store_true")
parser.add_argument("--version", help="Prints version informations", action='store_true')
args=parser.parse_args()
if args.version:
print_version()
sys.exit(0)
try:
#download the database if a non existing (but default-named) DB file is passed
if not args.database:
dbPath=check_install()
else:
dbPath=args.database
metaMLSTDB = metaMLST_db(dbPath)
conn = metaMLSTDB.conn
cursor = metaMLSTDB.cursor
except IOError:
metamlst_print("Failed to connect to the database: please check your database file!",'FAIL',bcolors.FAIL)
sys.exit(1)
cel = {}
if args.folder is None:
parser.print_help()
sys.exit(0)
try:
if not os.path.isdir(args.folder+'/merged'): os.makedirs(args.folder+'/merged')
except IOError:
print ("IOError: unable to access "+args.folder+"!")
#print defineProfile(conn,['ecoli_adk_10','ecoli_fumC_11','ecoli_gyrB_4','ecoli_icd_8','ecoli_mdh_8','ecoli_purA_8','ecoli_recA_2'])
#print defineProfile(conn,['ecoli_adk_21','ecoli_fumC_35','ecoli_gyrB_27','ecoli_icd_6','ecoli_mdh_5','ecoli_purA_5','ecoli_recA_4'])
#sys.exit(0)
for file in os.listdir(args.folder):
if file.split('.')[-1] != 'nfo': continue
for line in open(args.folder+'/'+file,'r'):
organism = line.split()[0]
sampleName = line.split()[1]
genes =line.split()[2::]
#apply the species filter
if args.filter and organism not in args.filter: continue
if organism not in cel: cel[organism] = []
cel[organism].append((dict((x.split('::')[0],(x.split('::')[1].upper(),x.split('::')[2],x.split('::')[3])) for x in genes),sampleName))
print (bcolors.OKBLUE+'MetaMLST Database file: '+bcolors.ENDC+os.path.basename(dbPath)+'\n')
for bacterium,bactRecord in cel.items(): #For each bacterium:
print (bcolors.OKBLUE+'+'+('-'*78)+'+'+bcolors.ENDC )
print (bcolors.OKBLUE+'|'+bcolors.ENDC+str(db_getOrganisms(metaMLSTDB.conn,bacterium)).center(78)+bcolors.OKBLUE+'|'+bcolors.ENDC)
print (bcolors.OKBLUE+'+'+('-'*78)+'+'+bcolors.ENDC )
profil = open(args.folder+'/merged/'+bacterium+'_ST.txt','w')
stringBase = {}
oldProfiles = {}
genesBase={}
profilesBase={}
encounteredProfiles={}
isolates=[]
newSequences = {} #contains the new seqs (reconstructed) for further alignment
mainGeneStructure = {} #key:label,value:sequence.
metadataJoinField = 'sampleID'
cursor.execute("SELECT profileCode FROM profiles WHERE bacterium = ? ORDER BY profileCode DESC LIMIT 1",(bacterium,))
lastProfile = 100000
# lastGenes = dict((row['gene'],row['maxGene']) for row in cursor.execute("SELECT gene, MAX(alleleVariant) as maxGene FROM alleles WHERE bacterium = ? GROUP BY gene",(bacterium,)))
lastGenes = dict((row['gene'],100000) for row in cursor.execute("SELECT gene, MAX(alleleVariant) as maxGene FROM alleles WHERE bacterium = ? GROUP BY gene",(bacterium,)))
#Get all KNOWN profiles for that bacterium
for row in cursor.execute("SELECT profileCode,gene,alleleVariant FROM profiles,alleles WHERE alleleCode = alleles.recID AND alleles.bacterium = ?",(bacterium,)):
if row['profileCode'] not in oldProfiles: oldProfiles[row['profileCode']] = [0,{}]
oldProfiles[row['profileCode']][1][row['gene']] = row['alleleVariant']
for bacteriumLine,sampleRecord in bactRecord: #for each entry of that bacterium:
#bacteriumLine is a dict. Key: genes, Values: sequences. No sequence = no news.
#>> {gene_allele: (sequence,accuracy) , ...}
profileLine = {}
newAlleles = []
flagRecurrent = False
sum_of_accuracies = 0.0
for geneLabel,(geneSeq,geneAccur,percent_snps) in bacteriumLine.items(): #for each gene of the entry
#TODO: change seq_recog
geneOrganism,geneName,geneAllele = geneLabel.split('_')
sum_of_accuracies += float(geneAccur)
if geneSeq == '' or sequenceExists(metaMLSTDB.conn,bacterium,geneSeq):
# WE HAVE A DATABASE SEQUENCE
#print "WE HAVE A DATABASE SEQUENCE"
#geneSeq = cursor.execute("SELECT sequence")
if geneSeq != '': geneAllele = sequenceLocate(metaMLSTDB.conn,bacterium,geneSeq)
profileLine[geneName] = (geneAllele,0)
elif geneSeq in genesBase:
profileLine[geneName] = (genesBase[geneSeq].split('_')[2],2)
flagRecurrent = True #a new allele is recurring
elif geneSeq not in genesBase:
## WE HAVE A NEW SEQUENCE
geneCategoryCode = 1 #default: blue (new allele, accepted)
if args.z != None:
geneCategoryCode = 3 #default becomes now not accepted
for refCode,refSeq in sequencesGetAll(metaMLSTDB.conn,bacterium,geneName).items():
if stringDiff(geneSeq,refSeq) <= args.z:
#print geneName,refCode,stringDiff(geneSeq,refSeq)
geneCategoryCode = 1 # if match allele_max_snps: accept
break
geneNewAlleleNumber = str(lastGenes[geneName]+1)
lastGenes[geneName]+=1
geneNewLabel = geneOrganism+'_'+geneName+'_'+geneNewAlleleNumber
genesBase[geneSeq] = geneNewLabel
#print "\t New Sequence for "+geneName+" -> "+geneNewLabel
profileLine[geneName] = (geneNewAlleleNumber,geneCategoryCode) # 1: blue (new allele, accepted)
# 3: red (new allele, not accepted)
newAlleles.append(geneName)
if geneName not in newSequences: newSequences[geneName] = []
newSequences[geneName].append(SeqRecord(Seq(geneSeq),id=geneNewLabel, description = ''))
meanAccuracy = sum_of_accuracies / float(len(bacteriumLine))
if len(newAlleles) == 0:
## Existent MLST profile -> Look for it (--> ISOLATES)
#Tries to define an existing MLST profile with the alleles
if not flagRecurrent:
tryDefine = defineProfile(metaMLSTDB.conn,[bacterium+'_'+k+'_'+v[0] for k,v in profileLine.items()])
#lopo
if tryDefine and tryDefine[0][1] == 100:
#encounteredProfiles[tryDefine[0][0]] = [profileLine,1,0]
oldProfiles[tryDefine[0][0]][0]+=1
isolates.append((tryDefine[0][0],meanAccuracy,sampleRecord))
continue
foundExistant = 0
for key,(element,abundance,isNewProfile) in encounteredProfiles.items():
if [k+str(v[0]) for k,v in sorted(profileLine.items())] == [k+str(v[0]) for k,v in sorted(element.items())]:
foundExistant = key
if foundExistant:
encounteredProfiles[foundExistant][1] += 1
isolates.append((foundExistant,meanAccuracy,sampleRecord))
else:
lastProfile+=1
encounteredProfiles[lastProfile] = [profileLine,1,2]
isolates.append((lastProfile,meanAccuracy,sampleRecord))
else:
# THIS IS A NEW PROFILE
lastProfile+=1
profileCategoryCode = 1
if args.z != None:
for k,(v,cat) in profileLine.items():
if cat == 3: #if rejectable allele
profileCategoryCode = 3 #rejectable profile
break
encounteredProfiles[lastProfile] = [profileLine,1,profileCategoryCode]
if profileCategoryCode != 3: isolates.append((lastProfile,meanAccuracy,sampleRecord))
# PROFILE LINE: dictionary['gene']: allele, hits, color
# -- ExistingCode | Effect --
# 1 | GREEN -> New, with new alleles
# 2 | YELLOW -> New, with old allelese
# 3 | RED -> New, with new alleles some of which rejectable
# |
#Old profiles
profil.write('ST\t'+'\t'.join([x for x in sorted(lastGenes.keys())] )+'\r\n')
print ('KNOWN MLST profiles found:\nST\t'+'\t'.join([x for x in sorted(lastGenes.keys())])+'\tHits')
# OLD PROFILES
for profileCode,(hits,profile) in oldProfiles.items():
profil.write(str(profileCode)+'\t'+'\t'.join([str(v) for k,v in sorted(profile.items())])+'\r\n')
if hits > 0:
print (bcolors.FAIL+str(profileCode)+bcolors.ENDC + '\t' + '\t'.join([str(v) for k,v in sorted(profile.items())])+'\t'+str(hits))
sys.stdout.flush()
#NEW PROFILES (ACCEPTED)
print ('\n\nNEW MLST profiles found:\nST\t'+'\t'.join([x for x in sorted(lastGenes.keys())])+'\tHits')
for profileID,(profile,hits,profileCategoryCode) in encounteredProfiles.items():
if profileCategoryCode not in [1,2]: continue
if profileCategoryCode == 1: profileNumber = bcolors.WARNING+str(profileID)+bcolors.ENDC
elif profileCategoryCode == 2: profileNumber = bcolors.OKGREEN+str(profileID)+bcolors.ENDC
print (profileNumber + '\t' + '\t'.join([bcolors.OKBLUE+str(v[0])+bcolors.ENDC if v[1] == 1 else bcolors.OKGREEN+str(v[0])+bcolors.ENDC if v[1] == 2 else bcolors.FAIL+str(v[0])+bcolors.ENDC if v[1] == 3 else str(v[0]) for k,v in sorted(profile.items())]) + '\t' + str(hits))
sys.stdout.flush()
profil.write(str(profileID)+'\t'+'\t'.join([str(v[0]) for k,v in sorted(profile.items())])+'\n')
#NEW PROFILES (REJECTED)
print ('\n\nREJECTED NEW MLST profiles, as they have > SNPs than max-threshold (-z '+str(args.z)+')\nST\t'+'\t'.join([x for x in sorted(lastGenes.keys())])+'\tHits')
for profileID,(profile,hits,profileCategoryCode) in encounteredProfiles.items():
if profileCategoryCode not in [3]: continue
profileNumber = bcolors.OKBLUE+str(profileID)+bcolors.ENDC
print (str(profileID) + '\t' + '\t'.join([bcolors.OKBLUE+str(v[0])+bcolors.ENDC if v[1] == 1 else bcolors.OKGREEN+str(v[0])+bcolors.ENDC if v[1] == 2 else bcolors.FAIL+str(v[0])+bcolors.ENDC if v[1] == 3 else str(v[0]) for k,v in sorted(profile.items())]) + '\t' + str(hits))
sys.stdout.flush()
#profil.write(str(profileID)+'\t'+'\t'.join([str(v[0]) for k,v in sorted(profile.items())])+'\n')
profil.close()
print ("")
metamlst_print("Outputing results",'...',bcolors.ENDC)
#ISOLATES FILE OUTPUT
isolafil = open(args.folder+'/merged/'+bacterium+'_report.txt','w')
identifiers = {}
p1line = False
keys=[]
if args.meta:
for line in open(args.meta):
if line == '': continue
if not p1line:
p1line=True
keys = [str(x).strip() for x in line.split('\t')]
metadataJoinField = keys[args.idField]
else:
l = line.strip().split('\t')
if len(l) == len(keys): identifiers[l[args.idField]] = dict((keys[i],l[i]) for i in range(0,len(keys)))
else: metamlst_print("Warning: some metadata fields are empty, please provide a suitable metadata file ",'!',bcolors.WARNING)
isolafil.write('ST\tConfidence\t'+'\t'.join(keys)+'\n')
STmapper={} #used to keep track of STs to output in form of concatenated sequences
for profileST,meanAccur,sampleName in isolates:
if profileST not in STmapper: STmapper[profileST] = []
if sampleName.endswith('.fna'): sampleName = sampleName.split('.')[0]
if sampleName in identifiers:
strl=[]
for ky in keys:
strl.append(identifiers[sampleName][ky])
isolafil.write(str(profileST)+'\t'+str(round(meanAccur,2))+'\t'+'\t'.join(strl)+'\n')
STmapper[profileST].append(identifiers[sampleName])
else:
if args.meta: metamlst_print("Warning: "+sampleName+' is not in metadata file','!',bcolors.WARNING)
isolafil.write(str(profileST)+'\t'+str(round(meanAccur,2))+'\t'+str(sampleName)+'\n')
#if args.j: STmapper[profileST] = dict((virtKey,'-') for virtKey in args.j)
STmapper[profileST].append({'sampleID':sampleName})
isolafil.close()
#SEQUENCES OUTPUT
if args.outseqformat:
if args.outseqformat == 'B':
SeqIO.write(sorted( list(itertools.chain(*newSequences.values())) ,key=lambda x: x.id),args.folder+'/merged/'+bacterium+'_sequences.fna', "fasta")
seqTable={}
preaLignTable={}
for row in cursor.execute("SELECT gene,alleleVariant,sequence FROM alleles WHERE bacterium = ? ORDER BY bacterium,gene,alleleVariant",(bacterium,)):
label = bacterium+'_'+row['gene']+'_'+str(row['alleleVariant'])
if row['gene'] not in preaLignTable: preaLignTable[row['gene']] = []
preaLignTable[row['gene']].append(SeqRecord(Seq(row['sequence']), id = label, description=''))
for seqGene,seqList in newSequences.items():
if seqGene not in preaLignTable: preaLignTable[seqGene] = []
for seqElement in seqList:
preaLignTable[seqGene].append(seqElement)
if args.outseqformat == 'B+':
SeqIO.write(sorted( list(itertools.chain(*preaLignTable.values())) ,key=lambda x: x.id),args.folder+'/merged/'+bacterium+'_sequences.fna', "fasta")
if args.outseqformat == 'C':
seqfile = open(args.folder+'/merged/'+bacterium+'_sequences.txt','w')
nalign_Table= dict((k.id,k.seq) for k in list(itertools.chain(*preaLignTable.values())))
seqfile.write('ST\t'+'\t'.join([str(x) for x in sorted(lastGenes.keys())] )+'\r\n')
for profileCode,(hits,profile) in oldProfiles.items(): #for each old profile
if hits>0 or args.outseqformat == 'C+':
seqfile.write(str(profileCode)+'\t'+'\t'.join([str(nalign_Table[bacterium+'_'+gen+'_'+str(alle)]) for gen,alle in sorted(profile.items())] )+'\r\n')
for profileCode,(profile,hits,isNewProfile) in encounteredProfiles.items(): #for each encountered profile
if isNewProfile == 3: continue #rejected profiles
seqfile.write(str(profileCode)+'\t'+'\t'.join([str(nalign_Table[bacterium+'_'+gen+'_'+str(alle[0])]) for gen,alle in sorted(profile.items())] )+'\r\n')
seqfile.close()
if args.outseqformat in ['A','A+']: #sequences, merged
for gene,seqs in preaLignTable.items(): #for each gene
cS = StringIO()
tld=[]
for seq in seqs:
if len(seq) not in tld: tld.append(len(seq))
if len(tld) > 1: #more than one length: need to align!
#print(('\tAligning Sequences ['+gene+']:').ljust(50)),
metamlst_print('Sequences of ['+gene+'] need to be aligned','...',bcolors.ENDC)
sys.stdout.flush()
SeqIO.write(seqs,cS, "fasta")
muscle_cline = MuscleCommandline('muscle')
stdout, stderr = muscle_cline(stdin=cS.getvalue())
for sequence in SeqIO.parse(StringIO(stdout), "fasta"):
seqTable[sequence.id] = str(sequence.seq)
metamlst_print('Sequences of ['+gene+'] need to be aligned','DONE',bcolors.OKGREEN)
else:
#print(('\tAligned Sequences ['+gene+'] ('+str(len(tld))+'):').ljust(50)),
metamlst_print('Sequences of ['+gene+'] are aligned','OK',bcolors.OKGREEN)
for seq in seqs:
seqTable[seq.id] = str(seq.seq)
#print(bcolors.OKGREEN+'[ - Done - ]'+bcolors.ENDC)
metamlst_print('Sequences Alignment Completed','DONE',bcolors.OKGREEN)
sys.stdout.flush()
phyloSeq = []
#OLD
for profileCode,(hits,profile) in oldProfiles.items(): #for each old profile
stSeq = ''
if hits>0: #old, detected, present in the samples
for gen,all in sorted(profile.items()):
#print gen,all
stSeq+=str(seqTable[bacterium+'_'+gen+'_'+str(all)])
if args.j:
listofkeys = dict((k,[]) for k in args.j.split(','))
if profileCode in STmapper:
prog = 0
for i in | |
<gh_stars>0
import numpy as np
import time
from numba import njit, jit
# written by qth,2021/04/22
from scipy.optimize import linear_sum_assignment
from scipy.cluster.vq import kmeans2
from sklearn.cluster import DBSCAN
# physical/external base state of all entites
class EntityState(object):
def __init__(self):
# physical position
self.p_pos = None
# physical orientation
self.p_ang = None ## extra angle attribute, can shoot only towards angle, can move along any direction
# physical velocity
self.p_vel = None
# state of agents (including communication and internal/mental state)
class AgentState(EntityState):
def __init__(self):
super(AgentState, self).__init__()
# communication utterance
self.c = None
# action of the agent
class Action(object):
def __init__(self):
# physical action
self.u = None ## first two components for x,y. third component for rotation
self.shoot = False ## number greater than 0 means to shoot
# communication action
self.c = None
# properties and state of physical world entity
class Entity(object):
def __init__(self, size = 0.05 ,color = None):
# name
self.name = ''
# properties:
self.size = size
# entity can move / be pushed
self.movable = False
# entity collides with others
self.collide = True
# material density (affects mass)
self.density = 25.0
# color
self.color = color
# max speed and accel
self.max_speed = None
self.accel = None
# state
self.state = EntityState()
# mass
self.initial_mass = 1.0
@property
def mass(self):
return self.initial_mass
## bullet is an entity
class Bullet(Entity):
def __init__(self, bulletType):
super(Bullet, self).__init__(size = 0.01)
self.name = 'bullet'
self.movable = True
self.type = bulletType # 'attacker' or 'guard' bullet
self.color = np.array([0, 0.85, 0]) if self.type == 'guard' else np.array([0.85, 0.35, 0.35])
def distance_matrix_AB(A, B):
assert A.shape[-1] == 2 # assert 2D situation
assert B.shape[-1] == 2 # assert 2D situation
n_A_subject = A.shape[-2]
n_B_subject = B.shape[-2]
A = np.repeat(np.expand_dims(A,-2), n_B_subject, axis=-2) # =>(64, Na, Nb, 2)
B = np.repeat(np.expand_dims(B,-2), n_A_subject, axis=-2) # =>(64, Nb, Na, 2)
Bt = np.swapaxes(B,-2,-3) # =>(64, Na, Nb, 2)
dis = Bt-A # =>(64, Na, Nb, 2)
dis = np.linalg.norm(dis, axis=-1)
return dis
# properties of landmark entities
class Landmark(Entity):
def __init__(self):
super(Landmark, self).__init__()
# properties of agent entities
class Agent(Entity):
def __init__(self, iden=None):
super(Agent, self).__init__()
# agents are movable by default
self.movable = True
# cannot send communication signals
self.silent = False
# cannot observe the world
self.blind = False
# physical motor noise amount
self.u_noise = None
# communication noise amount
self.c_noise = None
# control range
self.u_range = 1.0
# state
self.state = AgentState()
# action
self.action = Action()
# script behavior to execute
self.action_callback = None
# script behavior to execute
self.action_callback_test = None
## number of bullets hit
self.numHit = 0 # overall
self.numWasHit = 0
self.hit = False # in last time
self.wasHit = False
## shooting cone's radius and width (in radian)
self.shootRad = 0.4 # default value (same for guards and attackers, can be changed in fortattack_env_v1)
self.shootWin = np.pi/4
self.alive = True # alive/dead
self.justDied = False # helps compute reward for agent when it just died
self.prevDist = None
if iden is not None:
self.iden = iden
# multi-agent world
class World(object):
def __init__(self):
## lists of agents, entities and bullets (can change at execution-time!)
self.agents = []
self.landmarks = []
self.bullets = []
# communication channel dimensionality
self.dim_c = 0
# position dimensionality
self.dim_p = 3 ## x, y, angle
# color dimensionality
self.dim_color = 3
# simulation timestep
self.dt = 0.1
# physical damping
self.damping = 0.25
# contact response parameters
self.contact_force = 1e+2
self.contact_margin = 1e-10 # 1e-3
## wall positions
# self.wall_pos = [-1,1,-0.8,0.8] # (xmin, xmax) vertical and (ymin,ymax) horizontal walls
self.wall_pos = [-1, 1, -1, 1] # (xmin, xmax) vertical and (ymin,ymax) horizontal walls
# written by qth, 2021/04/20,用于判断是否第一次初始化
self.start_flag = True
self.target_index = 0
# self.leader_id = 4
self.tar_pos = np.full((4, 2), 0, "float")
# red
# self.tar_pos[0][0] = 0
# self.tar_pos[0][1] = -0.5
# self.tar_pos[1][0] = 0.5
# self.tar_pos[1][1] = 0
# self.tar_pos[2][0] = -0.5
# self.tar_pos[2][1] = 0
# self.tar_pos[3][0] = 0
# self.tar_pos[3][1] = 1
self.teams_result_step1 = None
self.team_centroid_step1 = None
from .fortattack_parallel_run import ScenarioConfig
self.s_cfg = ScenarioConfig
# return all alive agents
@property
def alive_agents(self):
return [agent for agent in self.agents if agent.alive]
# return all agents that are not adversaries
@property
def alive_guards(self):
return [agent for agent in self.agents if (agent.alive and not agent.attacker)]
# return all agents that are not adversaries
@property
def guards(self):
return [agent for agent in self.agents if not agent.attacker]
# return all adversarial agents
@property
def alive_attackers(self):
return [agent for agent in self.agents if (agent.alive and agent.attacker)]
# return all adversarial agents
@property
def attackers(self):
return [agent for agent in self.agents if agent.attacker]
# return all active in the world
@property
def active_entities(self):
return [agent for agent in self.agents if agent.alive] + self.landmarks + self.bullets ## now bullets are also entities
# return all entities in the world
@property
def entities(self):
return [agent for agent in self.agents] + self.landmarks + self.bullets ## now bullets are also entities
# return all agents controllable by external policies
@property
def alive_policy_agents(self):
return [agent for agent in self.agents if (agent.alive and agent.action_callback is None)]
# return all agents controllable by external policies
@property
def policy_agents(self):
return [agent for agent in self.agents if agent.action_callback is None]
# return all agents controlled by world scripts
@property
def active_scripted_agents(self):
return [agent for agent in self.agents if (agent.alive and agent.action_callback is not None)]
# return all agents controlled by world scripts
@property
def scripted_agents(self):
return [agent for agent in self.agents if agent.action_callback is not None]
# return all agents controlled by world scripts
@property
def scripted_agents_test(self):
return [agent for agent in self.agents if agent.action_callback_test is not None]
# update state of the world
def step(self):
# print('second step')
# set actions for scripted agents
## IGNORE FOLLOWING: scripted agents are probably non-learning heuristic agents
# for agent in self.scripted_agents_test:
# agent.action_callback_test(agent, self)
# for agent in self.scripted_agents:
# agent.action_callback(agent, self)
# print(agent.action)
# 判断是否存在脚本策略,同时存活着的的对手,如果有则调用脚本策略
# written by qth,2021/04/16
if len(self.scripted_agents) != 0:
self.attackers_policy_1(self.scripted_agents, self.alive_guards)
## The following is where are actions are actually applied for learning agents
## -------- apply effects of laser ------------- ##
self.apply_laser_effect() ## calling it effect as it might apply force, kill, etc...
# ------------- Calculate total physical (p_force) on each agent ------------- #
p_force = [None] * len(self.active_entities)
# apply agent physical controls
p_force = self.apply_action_force(p_force)
# apply environment forces
p_force = self.apply_environment_force(p_force)
## apply wall collision forces
p_force = self.apply_wall_collision_force(p_force)
# integrate physical state
# calculates new state based on forces
self.integrate_state(p_force)
## The following is for communication - IGNORE --------------- ##
# update agent communication state
for agent in self.alive_agents:
self.update_agent_state(agent)
def attackers_policy_1(self, attackers_agent, guards_agent):
'''
大规模集群协同策略
Args:
attackers_agent: 攻击方智能体
guards_agent: 防守方智能体
Returns:
written by qth, 2021/04/13
'''
# 初始化
for i, agent in enumerate(attackers_agent):
agent.action.u = np.zeros(self.dim_p) ## We'll use this now for Graph NN
agent.action.shoot = False
# 设定我方智能体的感知范围
obversation_radius = (self.wall_pos[1] - self.wall_pos[0]) #/3 # 即整个地图x轴方向的1/3
joint_forces = [0, 0]
############## 初始化参数 ###############
eps = 0.5 # 态势评估时,以该值作为半径,对所有对手分布进行聚类成簇
MinPoints = 2 # 表示每簇中最少的对手个数
num_members = 5 # 我方agent每组成员个数
min_members =3 # 我方agent每组成员最少个数,一旦小于该值,则重新分配组队。
################ 态势评估 ###############
# 通过聚类算法,对感知到的对手分布情况进行聚类,评估对手的整体分布情况
# 聚类的个数为我方agent的分组数
# 下面先从全局感知角度,利用聚类方法,计算对手分布情况。
guards_agent_position = np.zeros((len(guards_agent), 2))
for i_g, guard_agent in enumerate(guards_agent):
guards_agent_position[i_g] = guard_agent.state.p_pos
# print(guards_agent_position)
result = DBSCAN(eps, min_samples = MinPoints).fit(guards_agent_position)
# labels_表示聚类后的类别,通常为0,1,2..,最大数表示聚类后的簇的个数,-1表示属于噪音,
# 不在要求的范围内的或者个数小于2个的类别都会标识为-1
# label = result.labels_
# print(label)
# cluster_results用于存储落在各个不同簇的元素,其中键值即为簇的类别(0,1,2...)
cluster_results = {}
for cluster_index, cluster_class in enumerate(result.labels_):
if cluster_class not in cluster_results.keys():
cluster_results[cluster_class] = [guards_agent_position[cluster_index]]
# print(guards_agent_position[cluster_index])
else:
cluster_results[cluster_class].append(guards_agent_position[cluster_index])
# 对手分布中各簇的中心位置列表,作为我方agent的打击目标
cluster_centroid = []
cluster_radius = []
for key in cluster_results.keys():
cluster_index_position = np.array(cluster_results[key])
# print("cluster_class:%d" % key)
# print(cluster_index_position)
# 对各个簇的各元素再次聚类,得到该簇的中心点,用于引导我方agent
# 其中,centroid表示该簇的中心,label表示k-means2方法聚类得到的标签类别,由于聚类k为1
# 因此,此处的label都是同一类的簇
# 其中,key=-1的簇不能作为中心,因为所有噪点都标记为-1类,该中心不具有实际意义。
if key != -1:
for i in range(5):
try:
centroid, label = kmeans2(cluster_index_position, 1, iter=20, minit='++',seed=np.random.randint(100), missing='raise')
break
except:
pass
if i >= 4:
team_centroid, team_labels = kmeans2(cluster_index_position, 1, iter=20, minit='++',seed=np.random.randint(100), missing = 'warn')
print('处理空聚类')
break
# assert False
cluster_centroid.append(centroid)
else:
# | |
thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]).
If None, then the threshold maximizing the metric will be used.
:returns: The F1 for the given set of thresholds.
"""
return self.metric("f1", thresholds=thresholds)
def F2(self, thresholds=None):
"""
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]).
If None, then the threshold maximizing the metric will be used.
:returns: The F2 for this set of metrics and thresholds.
"""
return self.metric("f2", thresholds=thresholds)
def F0point5(self, thresholds=None):
"""
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]).
If None, then the threshold maximizing the metric will be used.
:returns: The F0.5 for this set of metrics and thresholds.
"""
return self.metric("f0point5", thresholds=thresholds)
def accuracy(self, thresholds=None):
"""
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]).
If None, then the threshold maximizing the metric will be used.
:returns: The accuracy for this set of metrics and thresholds.
"""
return self.metric("accuracy", thresholds=thresholds)
def error(self, thresholds=None):
"""
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]).
If None, then the threshold minimizing the error will be used.
:returns: The error for this set of metrics and thresholds.
"""
return H2OBinomialModelMetrics._accuracy_to_error(self.metric("accuracy", thresholds=thresholds))
def precision(self, thresholds=None):
"""
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]).
If None, then the threshold maximizing the metric will be used.
:returns: The precision for this set of metrics and thresholds.
"""
return self.metric("precision", thresholds=thresholds)
def tpr(self, thresholds=None):
"""
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]).
If None, then the threshold maximizing the metric will be used.
:returns: The True Postive Rate.
"""
return self.metric("tpr", thresholds=thresholds)
def tnr(self, thresholds=None):
"""
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]).
If None, then the threshold maximizing the metric will be used.
:returns: The True Negative Rate.
"""
return self.metric("tnr", thresholds=thresholds)
def fnr(self, thresholds=None):
"""
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]).
If None, then the threshold maximizing the metric will be used.
:returns: The False Negative Rate.
"""
return self.metric("fnr", thresholds=thresholds)
def fpr(self, thresholds=None):
"""
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]).
If None, then the threshold maximizing the metric will be used.
:returns: The False Positive Rate.
"""
return self.metric("fpr", thresholds=thresholds)
def recall(self, thresholds=None):
"""
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]).
If None, then the threshold maximizing the metric will be used.
:returns: Recall for this set of metrics and thresholds.
"""
return self.metric("recall", thresholds=thresholds)
def sensitivity(self, thresholds=None):
"""
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]).
If None, then the threshold maximizing the metric will be used.
:returns: Sensitivity or True Positive Rate for this set of metrics and thresholds.
"""
return self.metric("sensitivity", thresholds=thresholds)
def fallout(self, thresholds=None):
"""
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]).
If None, then the threshold maximizing the metric will be used.
:returns: The fallout (same as False Positive Rate) for this set of metrics and thresholds.
"""
return self.metric("fallout", thresholds=thresholds)
def missrate(self, thresholds=None):
"""
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]).
If None, then the threshold maximizing the metric will be used.
:returns: The miss rate (same as False Negative Rate).
"""
return self.metric("missrate", thresholds=thresholds)
def specificity(self, thresholds=None):
"""
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]).
If None, then the threshold maximizing the metric will be used.
:returns: The specificity (same as True Negative Rate).
"""
return self.metric("specificity", thresholds=thresholds)
def mcc(self, thresholds=None):
"""
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]).
If None, then the threshold maximizing the metric will be used.
:returns: The absolute MCC (a value between 0 and 1, 0 being totally dissimilar, 1 being identical).
"""
return self.metric("absolute_mcc", thresholds=thresholds)
def max_per_class_error(self, thresholds=None):
"""
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]).
If None, then the threshold minimizing the error will be used.
:returns: Return 1 - min(per class accuracy).
"""
return H2OBinomialModelMetrics._accuracy_to_error(self.metric("min_per_class_accuracy", thresholds=thresholds))
def mean_per_class_error(self, thresholds=None):
"""
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]).
If None, then the threshold minimizing the error will be used.
:returns: mean per class error.
"""
return H2OBinomialModelMetrics._accuracy_to_error(self.metric("mean_per_class_accuracy", thresholds=thresholds))
@staticmethod
def _accuracy_to_error(accuracies):
errors = List()
errors.extend([acc[0], 1 - acc[1]] for acc in accuracies)
setattr(errors, 'value',
[1 - v for v in accuracies.value] if isinstance(accuracies.value, list)
else 1 - accuracies.value
)
return errors
def metric(self, metric, thresholds=None):
"""
:param str metric: A metric among :const:`maximizing_metrics`.
:param thresholds: thresholds parameter must be a number or a list (i.e. [0.01, 0.5, 0.99]).
If None, then the threshold maximizing the metric will be used.
If 'all', then all stored thresholds are used and returned with the matching metric.
:returns: The set of metrics for the list of thresholds.
The returned list has a 'value' property holding only
the metric value (if no threshold provided or if provided as a number),
or all the metric values (if thresholds provided as a list)
"""
assert_is_type(thresholds, None, 'all', numeric, [numeric])
if metric not in H2OBinomialModelMetrics.maximizing_metrics:
raise ValueError("The only allowable metrics are {}".format(', '.join(H2OBinomialModelMetrics.maximizing_metrics)))
h2o_metric = (H2OBinomialModelMetrics.metrics_aliases[metric] if metric in H2OBinomialModelMetrics.metrics_aliases
else metric)
value_is_scalar = is_type(metric, str) and (thresholds is None or is_type(thresholds, numeric))
if thresholds is None:
thresholds = [self.find_threshold_by_max_metric(h2o_metric)]
elif thresholds == 'all':
thresholds = None
elif is_type(thresholds, numeric):
thresholds = [thresholds]
metrics = List()
thresh2d = self._metric_json['thresholds_and_metric_scores']
if thresholds is None: # fast path to return all thresholds: skipping find_idx logic
metrics.extend(list(t) for t in zip(thresh2d['threshold'], thresh2d[h2o_metric]))
else:
for t in thresholds:
idx = self.find_idx_by_threshold(t)
metrics.append([t, thresh2d[h2o_metric][idx]])
setattr(metrics, 'value',
metrics[0][1] if value_is_scalar
else list(r[1] for r in metrics)
)
return metrics
def plot(self, type="roc", server=False):
"""
Produce the desired metric plot.
:param type: the type of metric plot (currently, only ROC supported).
:param server: if True, generate plot inline using matplotlib's "Agg" backend.
:returns: None
"""
# TODO: add more types (i.e. cutoffs)
assert_is_type(type, "roc")
# check for matplotlib. exit if absent.
try:
imp.find_module('matplotlib')
import matplotlib
if server: matplotlib.use('Agg', warn=False)
import matplotlib.pyplot as plt
except ImportError:
print("matplotlib is required for this function!")
return
if type == "roc":
plt.xlabel('False Positive Rate (FPR)')
plt.ylabel('True Positive Rate (TPR)')
plt.title('ROC Curve')
plt.text(0.5, 0.5, r'AUC={0:.4f}'.format(self._metric_json["AUC"]))
plt.plot(self.fprs, self.tprs, 'b--')
plt.axis([0, 1, 0, 1])
if not server: plt.show()
@property
def fprs(self):
"""
Return all false positive rates for all threshold values.
:returns: a list of false positive rates.
"""
return self._metric_json["thresholds_and_metric_scores"]["fpr"]
@property
def tprs(self):
"""
Return all true positive rates for all threshold values.
:returns: a list of true positive rates.
"""
return self._metric_json["thresholds_and_metric_scores"]["tpr"]
def roc(self):
"""
Return the coordinates of the ROC curve as a tuple containing the false positive rates as a list and true positive rates as a list.
:returns: The ROC values.
"""
return self.fprs, self.tprs
metrics_aliases = dict(
fallout='fpr',
missrate='fnr',
recall='tpr',
sensitivity='tpr',
specificity='tnr'
)
#: metrics names allowed for confusion matrix
maximizing_metrics = ('absolute_mcc', 'accuracy', 'precision',
'f0point5', 'f1', 'f2',
'mean_per_class_accuracy', 'min_per_class_accuracy',
'tns', 'fns', 'fps', 'tps',
'tnr', 'fnr', 'fpr', 'tpr') + tuple(metrics_aliases.keys())
def confusion_matrix(self, metrics=None, thresholds=None):
"""
Get the confusion matrix for the specified metric
:param metrics: A string (or list of strings) among metrics listed in :const:`maximizing_metrics`. Defaults to 'f1'.
:param thresholds: A value (or list of values) between 0 and 1.
If None, then the thresholds maximizing each provided metric will be used.
:returns: a list of ConfusionMatrix objects (if there are more than one to return), or a single ConfusionMatrix
(if there is only one).
"""
# make lists out of metrics and thresholds arguments
if metrics is None and thresholds is None:
metrics = ['f1']
if isinstance(metrics, list):
metrics_list = metrics
elif metrics is None:
metrics_list = []
else:
| |
# Renishaw wdf Raman spectroscopy file reader
# Code inspired by Henderson, Alex DOI:10.5281/zenodo.495477
from __future__ import print_function
import struct
import numpy
import io
from .types import LenType, DataType, MeasurementType
from .types import ScanType, UnitType, DataType
from .types import Offsets, ExifTags
from .utils import convert_wl, convert_attr_name
from sys import stderr
try:
import PIL
from PIL import Image
from PIL.TiffImagePlugin import IFDRational
except ImportError:
PIL = None
class WDFReader(object):
"""Reader for Renishaw(TM) WiRE Raman spectroscopy files (.wdf format)
The wdf file format is separated into several DataBlocks, with starting 4-char
strings such as (incomplete list):
`WDF1`: File header for information
`DATA`: Spectra data
`XLST`: Data for X-axis of data, usually the Raman shift or wavelength
`YLST`: Data for Y-axis of data, possibly not important
`WMAP`: Information for mapping, e.g. StreamLine or StreamLineHR mapping
`MAP `: Mapping information(?)
`ORGN`: Data for stage origin
`TEXT`: Annotation text etc
`WXDA`: ? TODO
`WXDM`: ? TODO
`ZLDC`: ? TODO
`BKXL`: ? TODO
`WXCS`: ? TODO
`WXIS`: ? TODO
`WHTL`: Whilte light image
Following the block name, there are two indicators:
Block uid: int32
Block size: int64
Args:
file_name (file) : File object for the wdf file
Attributes:
title (str) : Title of measurement
username (str) : Username
application_name (str) : Default WiRE
application_version (int,) * 4 : Version number, e.g. [4, 4, 0, 6602]
measurement_type (int) : Type of measurement
0=unknown, 1=single, 2=multi, 3=mapping
scan_type (int) : Scan of type, see values in scan_types
laser_wavenumber (float32) : Wavenumber in cm^-1
count (int) : Numbers of experiments (same type), can be smaller than capacity
spectral_units (int) : Unit of spectra, see unit_types
xlist_type (int) : See unit_types
xlist_unit (int) : See unit_types
xlist_length (int): Size for the xlist
xdata (numpy.array): x-axis data
ylist_type (int): Same as xlist_type
ylist_unit (int): Same as xlist_unit
ylist_length (int): Same as xlist_length
ydata (numpy.array): y-data, possibly not used
point_per_spectrum (int): Should be identical to xlist_length
data_origin_count (int) : Number of rows in data origin list
capacity (int) : Max number of spectra
accumulation_count (int) : Single or multiple measurements
block_info (dict) : Info block at least with following keys
DATA, XLST, YLST, ORGN
# TODO types?
"""
def __init__(self, file_name, debug=False):
try:
self.file_obj = open(str(file_name), "rb")
except IOError:
raise IOError("File {0} does noe exist!".format(file_name))
# Initialize the properties for the wdfReader class
self.title = ""
self.username = ""
self.measurement_type = None
self.scan_type = None
self.laser_length = None
self.count = None
self.spectral_unit = None
self.xlist_type = None
self.xlist_unit = None
self.ylist_type = None
self.ylist_unit = None
self.point_per_spectrum = None
self.data_origin_count = None
self.capacity = None
self.application_name = ""
self.application_version = [None]*4
self.xlist_length = 0
self.ylist_length = 0
self.accumulation_count = None
self.block_info = {} # each key has value (uid, offset, size)
self.is_completed = False
self.debug = debug
# Parse the header section in the wdf file
self.__locate_all_blocks()
# Parse individual blocks
self.__treat_block_data("WDF1")
self.__treat_block_data("DATA")
self.__treat_block_data("XLST")
self.__treat_block_data("YLST")
self.__treat_block_data("ORGN")
self.__treat_block_data("WMAP")
self.__treat_block_data("WHTL")
# Reshape spectra after reading mapping information
self.__reshape_spectra()
# self._parse_wmap()
# Finally print the information
if self.debug:
print(("File Metadata").center(80, "="),
file=stderr)
self.print_info(file=stderr)
print("=" * 80, file=stderr)
def close(self):
self.file_obj.close()
if hasattr(self, "img"):
self.img.close()
def __get_type_string(self, attr, data_type):
"""Get the enumerated-data_type as string
"""
val = getattr(self, attr) # No error checking
if data_type is None:
return val
else:
return data_type(val).name
def __read_type(self, type, size=1):
""" Unpack struct data for certain type
"""
if type in ["int16", "int32", "int64", "float", "double"]:
if size > 1:
raise NotImplementedError(
"Does not support read number type with size >1")
# unpack into unsigned values
fmt_out = LenType["s_" + type].value
fmt_in = LenType["l_" + type].value
return struct.unpack(fmt_out, self.file_obj.read(fmt_in * size))[0]
elif type == "utf8":
# Read utf8 string with determined size block
return self.file_obj.read(size).decode("utf8").replace("\x00", "")
else:
raise ValueError("Unknown data length format!")
def __locate_single_block(self, pos):
"""Get block information starting at pos
"""
self.file_obj.seek(pos)
block_name = self.file_obj.read(0x4).decode("ascii")
if len(block_name) < 4:
raise EOFError
block_uid = self.__read_type("int32")
block_size = self.__read_type("int64")
return block_name, block_uid, block_size
def __locate_all_blocks(self):
"""Get information for all data blocks and store them inside self.block_info
"""
curpos = 0
finished = False
while not finished:
try:
block_name, block_uid, block_size = self.__locate_single_block(
curpos)
self.block_info[block_name] = (block_uid, curpos, block_size)
curpos += block_size
except (EOFError, UnicodeDecodeError):
finished = True
def __treat_block_data(self, block_name):
"""Get data according to specific block name
"""
if block_name not in self.block_info.keys():
if self.debug:
print("Block name {0} not present in current measurement".
format(block_name), file=stderr)
return
# parse individual blocks with names
actions = {
"WDF1": ("_parse_header", ()),
"DATA": ("_parse_spectra", ()),
"XLST": ("_parse_xylist", ("X")),
"YLST": ("_parse_xylist", ("Y")),
"ORGN": ("_parse_orgin_list", ()),
"WMAP": ("_parse_wmap", ()),
"WHTL": ("_parse_img", ()),
}
func_name, val = actions[block_name]
getattr(self, func_name)(*val)
# The method for reading the info in the file header
def _parse_header(self):
"""Solve block WDF1
"""
self.file_obj.seek(0) # return to the head
# Must make the conversion under python3
block_ID = self.file_obj.read(Offsets.block_id).decode("ascii")
block_UID = self.__read_type("int32")
block_len = self.__read_type("int64")
# First block must be "WDF1"
if (block_ID != "WDF1") \
or (block_UID != 0 and block_UID != 1) \
or (block_len != Offsets.data_block):
raise ValueError("The wdf file format is incorrect!")
# TODO what are the digits in between?
# The keys from the header
self.file_obj.seek(Offsets.measurement_info) # space
self.point_per_spectrum = self.__read_type("int32")
self.capacity = self.__read_type("int64")
self.count = self.__read_type("int64")
# If count < capacity, this measurement is not completed
self.is_completed = (self.count == self.capacity)
self.accumulation_count = self.__read_type("int32")
self.ylist_length = self.__read_type("int32")
self.xlist_length = self.__read_type("int32")
self.data_origin_count = self.__read_type("int32")
self.application_name = self.__read_type("utf8", 24) # Must be "WiRE"
for i in range(4):
self.application_version[i] = self.__read_type("int16")
self.scan_type = ScanType(self.__read_type("int32"))
self.measurement_type = MeasurementType(self.__read_type("int32"))
# For the units
self.file_obj.seek(Offsets.spectral_info)
self.spectral_unit = UnitType(self.__read_type("int32"))
self.laser_length = convert_wl(self.__read_type("float")) # in nm
# Username and title
self.file_obj.seek(Offsets.file_info)
self.username = self.__read_type("utf8",
Offsets.usr_name -
Offsets.file_info)
self.title = self.__read_type("utf8",
Offsets.data_block -
Offsets.usr_name)
def _parse_xylist(self, dir):
"""Get information from XLST or YLST blocks
"""
if not dir.upper() in ["X", "Y"]:
raise ValueError("Direction argument `dir` must be X or Y!")
name = dir.upper() + "LST"
uid, pos, size = self.block_info[name]
offset = Offsets.block_data
self.file_obj.seek(pos + offset)
setattr(self, "{0}list_type".format(dir.lower()),
DataType(self.__read_type("int32")))
setattr(self, "{0}list_unit".format(dir.lower()),
UnitType(self.__read_type("int32")))
size = getattr(self, "{0}list_length".format(dir.lower()))
if size == 0: # Possibly not started
raise ValueError("{0}-List possibly not initialized!".
format(dir.upper()))
# self.file_obj.seek(pos + offset)
data = numpy.fromfile(self.file_obj, dtype="float32", count=size)
setattr(self, "{0}data".format(dir.lower()), data)
return
def _parse_spectra(self, start=0, end=-1):
"""Get information from DATA block
"""
if end == -1: # take all spectra
end = self.count - 1
if (start not in range(self.count)) or (end not in range(self.count)):
raise ValueError("Wrong start and end indices of spectra!")
if start > end:
raise ValueError("Start cannot be larger than end!")
# Determine start position
uid, pos, size = self.block_info["DATA"]
pos_start = pos + Offsets.block_data + LenType["l_float"].value * \
start * self.point_per_spectrum
n_row = end - start + 1
self.file_obj.seek(pos_start)
spectra_data = numpy.fromfile(
self.file_obj, dtype="float32",
count=n_row * self.point_per_spectrum)
# if len(spectra_data.shape) > 1:
# The spectra is only 1D array
# spectra_data = spectra_data.reshape(
# n_row, spectra_data.size // n_row)
self.spectra = spectra_data
return
def _parse_orgin_list(self):
"""Get information from OriginList
Set the following attributes:
`self.origin_list_header`: 2D-array
`self.origin_list`: origin list
"""
# First confirm origin list type
uid, pos, size = self.block_info["ORGN"]
self.origin_list_header = [[None, ] * 5
for i in range(self.data_origin_count)]
# All possible to have x y and z positions!
self.xpos = numpy.zeros(self.count)
self.ypos = numpy.zeros(self.count)
self.zpos = numpy.zeros(self.count)
list_increment = Offsets.origin_increment + \
LenType.l_double.value * self.capacity
curpos = pos + Offsets.origin_info
for i in range(self.data_origin_count):
self.file_obj.seek(curpos)
p1 = self.__read_type("int32")
p2 = self.__read_type("int32")
s = self.__read_type("utf8", 0x10)
# First index: is the list x, or y pos?
self.origin_list_header[i][0] = (p1 >> 31 & 0b1) == 1
# Second: Data type of the row
self.origin_list_header[i][1] = DataType(p1 & ~(0b1 << 31))
# Third: Unit
self.origin_list_header[i][2] = UnitType(p2)
# Fourth: annotation
self.origin_list_header[i][3] = s
# Last: the actual data
# array = numpy.empty(self.count)
# Time appears to be recorded as int64 in 100 nanosecond intervals
# Possibly using the .NET DateTime epoch
# Reference does not appear to be Unix Epoch time
# Set | |
def import_csv_data(self, provenance, file, **kwargs): # noqa: E501
"""Import a CSV file for the given provenanceURI # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.import_csv_data(provenance, file, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str provenance: Provenance URI (required)
:param file file: File (required)
:param str authorization: Authentication token (required)
:param str accept_language: Request accepted language
:return: DataCSVValidationDTO
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.import_csv_data_with_http_info(provenance, file, **kwargs) # noqa: E501
else:
(data) = self.import_csv_data_with_http_info(provenance, file, **kwargs) # noqa: E501
return data
def import_csv_data_with_http_info(self, provenance, file, **kwargs): # noqa: E501
"""Import a CSV file for the given provenanceURI # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.import_csv_data_with_http_info(provenance, file, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str provenance: Provenance URI (required)
:param file file: File (required)
:param str authorization: Authentication token (required)
:param str accept_language: Request accepted language
:return: DataCSVValidationDTO
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['provenance', 'file', ] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method import_csv_data" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'provenance' is set
if ('provenance' not in params or
params['provenance'] is None):
raise ValueError("Missing the required parameter `provenance` when calling `import_csv_data`") # noqa: E501
# verify the required parameter 'file' is set
if ('file' not in params or
params['file'] is None):
raise ValueError("Missing the required parameter `file` when calling `import_csv_data`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'provenance' in params:
query_params.append(('provenance', params['provenance'])) # noqa: E501
header_params = {}
#if 'authorization' in params:
# header_params['Authorization'] = params['authorization'] # noqa: E501
#if 'accept_language' in params:
# header_params['Accept-Language'] = params['accept_language'] # noqa: E501
form_params = []
local_var_files = {}
if 'file' in params:
local_var_files['file'] = params['file'] # noqa: E501
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['multipart/form-data']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/core/data/import', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DataCSVValidationDTO', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def post_data_file(self, description, file, **kwargs): # noqa: E501
"""Add a data file # noqa: E501
{\"rdf_type\":\"http://www.opensilex.org/vocabulary/oeso#Image\", \"date\":\"2020-08-21T00:00:00+01:00\", \"target\":\"http://plot01\", \"provenance\": { \"uri\":\"http://opensilex.dev/provenance/1598001689415\" }, \"metadata\":{ \"LabelView\" : \"side90\", \"paramA\" : \"90\"}} # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_data_file(description, file, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str description: File description with metadata (required)
:param file file: Data file (required)
:param str authorization: Authentication token (required)
:param str accept_language: Request accepted language
:return: ObjectUriResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.post_data_file_with_http_info(description, file, **kwargs) # noqa: E501
else:
(data) = self.post_data_file_with_http_info(description, file, **kwargs) # noqa: E501
return data
def post_data_file_with_http_info(self, description, file, **kwargs): # noqa: E501
"""Add a data file # noqa: E501
{\"rdf_type\":\"http://www.opensilex.org/vocabulary/oeso#Image\", \"date\":\"2020-08-21T00:00:00+01:00\", \"target\":\"http://plot01\", \"provenance\": { \"uri\":\"http://opensilex.dev/provenance/1598001689415\" }, \"metadata\":{ \"LabelView\" : \"side90\", \"paramA\" : \"90\"}} # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_data_file_with_http_info(description, file, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str description: File description with metadata (required)
:param file file: Data file (required)
:param str authorization: Authentication token (required)
:param str accept_language: Request accepted language
:return: ObjectUriResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['description', 'file', ] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method post_data_file" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'description' is set
if ('description' not in params or
params['description'] is None):
raise ValueError("Missing the required parameter `description` when calling `post_data_file`") # noqa: E501
# verify the required parameter 'file' is set
if ('file' not in params or
params['file'] is None):
raise ValueError("Missing the required parameter `file` when calling `post_data_file`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
#if 'authorization' in params:
# header_params['Authorization'] = params['authorization'] # noqa: E501
#if 'accept_language' in params:
# header_params['Accept-Language'] = params['accept_language'] # noqa: E501
form_params = []
local_var_files = {}
if 'description' in params:
form_params.append(('description', params['description'])) # noqa: E501
if 'file' in params:
local_var_files['file'] = params['file'] # noqa: E501
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['multipart/form-data']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/core/datafiles', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ObjectUriResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def post_data_file_paths(self, body, **kwargs): # noqa: E501
"""Describe datafiles and give their relative paths in the configured storage system. In the case of already stored datafiles. # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_data_file_paths(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param list[DataFilePathCreationDTO] body: Metadata of the file (required)
:param str authorization: Authentication token (required)
:param str accept_language: Request accepted language
:return: ObjectUriResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.post_data_file_paths_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.post_data_file_paths_with_http_info(body, **kwargs) # noqa: E501
return data
def post_data_file_paths_with_http_info(self, body, **kwargs): # noqa: E501
"""Describe datafiles and give their relative paths in the configured storage system. In the case of already stored datafiles. # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_data_file_paths_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param list[DataFilePathCreationDTO] body: Metadata of the file (required)
:param str authorization: Authentication token (required)
:param str accept_language: Request accepted language
:return: ObjectUriResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', ] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method post_data_file_paths" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `post_data_file_paths`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
#if 'authorization' in params:
# header_params['Authorization'] = params['authorization'] # noqa: E501
#if 'accept_language' in params:
# header_params['Accept-Language'] = params['accept_language'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/core/datafiles/description', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ObjectUriResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search_data_list(self, **kwargs): # noqa: E501
"""Search data # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. | |
<reponame>sdpython/mlprodic
# -*- coding: utf-8 -*-
"""
@brief test log(time=3s)
"""
import unittest
from typing import Any
import numpy
from pyquickhelper.pycode import ExtTestCase, ignore_warnings
from mlprodict.npy import onnxnumpy, onnxnumpy_default, onnxnumpy_np
import mlprodict.npy.numpy_onnx_impl as nxnp
from mlprodict.npy import (
OnnxNumpyCompiler as ONC, NDArray, NDArraySameTypeSameShape)
@ignore_warnings(DeprecationWarning)
def get_bool(unused):
try:
return numpy.bool_
except AttributeError:
return bool
numpy_bool = get_bool(None)
@onnxnumpy_default
def test_abs(x: NDArray[Any, numpy.float32],
) -> NDArray[Any, numpy.float32]:
"onnx numpy abs"
return nxnp.abs(x)
@onnxnumpy_default
def test_abs_abs(x: NDArray[Any, numpy.float32],
) -> NDArray[Any, numpy.float32]:
"onnx numpy abs abs"
return nxnp.abs(nxnp.abs(x))
@onnxnumpy_default
def test_abs_add(x: NDArray[Any, numpy.float32],
) -> NDArray[Any, numpy.float32]:
"onnx numpy addition"
return nxnp.abs(x) + x
@onnxnumpy_default
def test_abs_add4(x: NDArray[Any, numpy.float32],
) -> NDArray[Any, numpy.float32]:
"onnx numpy addition"
x2 = x + x
return x2 + x2
@onnxnumpy_default
def test_abs_addm(x1: NDArray[Any, numpy.float32],
x2: NDArray[Any, numpy.float32]
) -> NDArray[Any, numpy.float32]:
"onnx numpy addition"
return nxnp.abs(x1) + x2
@onnxnumpy_default
def test_abs_add2(x: NDArray[Any, numpy.float32],
) -> NDArray[Any, numpy.float32]:
"onnx numpy addition"
return nxnp.abs(x) + numpy.float32(2)
@onnxnumpy_default
def test_abs_sub(x: NDArray[Any, numpy.float32],
) -> NDArray[Any, numpy.float32]:
"onnx numpy addition"
return nxnp.abs(x) - x
@onnxnumpy_default
def test_abs_mul(x: NDArray[Any, numpy.float32],
) -> NDArray[Any, numpy.float32]:
"onnx numpy addition"
return nxnp.abs(x) * x
@onnxnumpy_default
def test_abs_pow(x: NDArray[Any, numpy.float32],
) -> NDArray[Any, numpy.float32]:
"onnx numpy power"
return nxnp.abs(x) ** numpy.float32(2)
@onnxnumpy_default
def test_abs_mod(x: NDArray[Any, numpy.float32],
) -> NDArray[Any, numpy.float32]:
"onnx numpy modulo"
return nxnp.abs(x) % numpy.float32(2)
@onnxnumpy_default
def test_abs_matmul(x: NDArray[Any, numpy.float32],
) -> NDArray[Any, numpy.float32]:
"onnx numpy addition"
return nxnp.abs(x) @ x
@onnxnumpy_default
def test_abs_matmul2(x: NDArray[Any, numpy.float32],
) -> NDArray[Any, numpy.float32]:
"onnx numpy addition"
return nxnp.matmul(nxnp.abs(x), x)
@onnxnumpy_default
def test_abs_div(x: NDArray[Any, numpy.float32],
) -> NDArray[Any, numpy.float32]:
"onnx numpy division"
return nxnp.abs(x) / x
@onnxnumpy_default
def test_abs_idiv(x: NDArray[Any, numpy.float32],
) -> NDArray[Any, numpy.int64]:
"onnx numpy int division"
return nxnp.abs(x).astype(numpy.int64) // x.astype(numpy.int64)
@onnxnumpy_default
def test_abs_equal(x: NDArray[Any, numpy.float32],
) -> NDArray[Any, numpy_bool]:
"onnx numpy equality"
return nxnp.abs(x) == x
@onnxnumpy_default
def test_abs_not_equal(x: NDArray[Any, numpy.float32],
) -> NDArray[Any, numpy_bool]:
"onnx numpy inequality"
return nxnp.abs(x) != x
@onnxnumpy_default
def test_abs_greater(x: NDArray[Any, numpy.float32],
) -> NDArray[Any, numpy_bool]:
"onnx numpy greater"
return nxnp.abs(x) > x
@onnxnumpy_default
def test_abs_greater_or_equal(x: NDArray[Any, numpy.float32],
) -> NDArray[Any, numpy_bool]:
"onnx numpy greater or equal"
return nxnp.abs(x) >= x
@onnxnumpy_default
def test_abs_less(x: NDArray[Any, numpy.float32],
) -> NDArray[Any, numpy_bool]:
"onnx numpy less"
return nxnp.abs(x) < x
@onnxnumpy_default
def test_abs_less_or_equal(x: NDArray[Any, numpy.float32],
) -> NDArray[Any, numpy_bool]:
"onnx numpy less or equal"
return nxnp.abs(x) <= x
@onnxnumpy_default
def test_abs_and(x: NDArray[Any, numpy.float32],
) -> NDArray[Any, numpy_bool]:
"onnx numpy and"
return (nxnp.abs(x) < x) and (nxnp.abs(x) < numpy.float32(0))
@onnxnumpy_default
def test_abs_and2(x: NDArray[Any, numpy.float32],
) -> NDArray[Any, numpy_bool]:
"onnx numpy and"
return (nxnp.abs(x) < x) & (nxnp.abs(x) < numpy.float32(0))
@onnxnumpy_default
def test_abs_or(x: NDArray[Any, numpy.float32],
) -> NDArray[Any, numpy_bool]:
"onnx numpy or"
return (nxnp.abs(x) < x) or (nxnp.abs(x) < numpy.float32(0))
@onnxnumpy_default
def test_abs_or2(x: NDArray[Any, numpy.float32],
) -> NDArray[Any, numpy_bool]:
"onnx numpy or"
return (nxnp.abs(x) < x) | (nxnp.abs(x) < numpy.float32(0))
@onnxnumpy_default
def test_abs_sum1(x: NDArray[Any, numpy.float32],
) -> NDArray[Any, numpy.float32]:
"onnx numpy sum"
return nxnp.sum(nxnp.abs(x), axis=0)
@onnxnumpy_default
def test_abs_sum2(x: NDArray[Any, numpy.float32],
) -> NDArray[Any, numpy.float32]:
"onnx numpy sum"
return nxnp.sum(nxnp.abs(x), axis=1, keepdims=1)
@onnxnumpy_default
def test_abs_transpose_t(x: NDArray[Any, numpy.float32],
) -> NDArray[Any, numpy.float32]:
"onnx numpy transpose T"
return nxnp.abs(x).T
@onnxnumpy_default
def test_abs_cast(x: NDArray[Any, numpy.float32],
) -> NDArray[Any, numpy.int64]:
"onnx numpy cast"
return nxnp.abs(x).astype(numpy.int64)
@onnxnumpy_default
def test_abs_reshape(x: NDArray[Any, numpy.float32],
) -> NDArray[Any, numpy.float32]:
"onnx numpy reshape"
return nxnp.abs(x).reshape((-1, 1))
@onnxnumpy(op_version=11)
def test_abs_reshape_11(x: NDArray[Any, numpy.float32],
) -> NDArray[Any, numpy.float32]:
"onnx numpy reshape with opset 11"
return nxnp.abs(x).reshape((-1, 1))
@onnxnumpy_default
def test_abs_slice(x: NDArray[Any, numpy.float32],
) -> NDArray[Any, numpy.float32]:
"onnx numpy slice 1"
return nxnp.abs(x)[:, 1]
@onnxnumpy_default
def test_abs_slice2(x: NDArray[Any, numpy.float32],
) -> NDArray[Any, numpy.float32]:
"onnx numpy slice 2"
return nxnp.abs(x)[:1, 1]
@onnxnumpy_default
def test_abs_slice23(x: NDArray[Any, numpy.float32],
) -> NDArray[Any, numpy.float32]:
"onnx numpy slice 23"
return nxnp.abs(x)[::2, ::3]
@onnxnumpy_default
def test_abs_slice_end(x: NDArray[Any, numpy.float32],
) -> NDArray[Any, numpy.float32]:
"onnx numpy slice end"
return nxnp.abs(x)[1:, :3]
@onnxnumpy_default
def test_abs_gather(x: NDArray[Any, numpy.float32],
) -> NDArray[Any, numpy.float32]:
"onnx numpy gather"
return nxnp.abs(x)[1]
@onnxnumpy_default
def test_abs_gather2(x: NDArray[Any, numpy.float32],
) -> NDArray[Any, numpy.float32]:
"onnx numpy gather"
return nxnp.abs(x)[:, 1]
@onnxnumpy_default
def test_abs_neg(x: NDArray[Any, numpy.float32],
) -> NDArray[Any, numpy.float32]:
"onnx numpy neg"
return - nxnp.abs(x)
@onnxnumpy_default
def test_abs_not(x: NDArray[Any, numpy.float32],
) -> NDArray[Any, numpy.bool_]:
"onnx numpy not"
temp = nxnp.abs(x) > numpy.float32(0)
return temp.not_()
@onnxnumpy_default
def test_abs_filter(x: NDArray[Any, numpy.float32],
) -> NDArray[Any, numpy.float32]:
"onnx numpy filter"
return nxnp.abs(x)[x[:, 0] > numpy.float32(15)]
@onnxnumpy_default
def test_log(x: NDArray[Any, numpy.float32],
) -> NDArray[Any, numpy.float32]:
"onnx numpy log"
return nxnp.log(x)
@onnxnumpy_np(signature=NDArraySameTypeSameShape("floats"))
def test_abs_log_multi(x):
"onnx numpy log multiple type"
return nxnp.log(nxnp.abs(x))
@onnxnumpy_np(signature=NDArraySameTypeSameShape("floats"))
def test_abs_log_multi_dtype(x):
"onnx numpy log multiple type"
return nxnp.log(nxnp.abs(x) + x.dtype(1))
@onnxnumpy_default
def test_abs_shape(x: NDArray[Any, numpy.float32],
) -> NDArray[Any, numpy.int64]:
"onnx numpy shape"
return nxnp.abs(x).shape
@onnxnumpy_default
def test_abs_size(x: NDArray[Any, numpy.float32],
) -> NDArray[Any, numpy.int64]:
"onnx numpy size"
return nxnp.abs(x).size
@onnxnumpy_default
def test_abs_flatten(x: NDArray[Any, numpy.float32],
) -> NDArray[Any, numpy.int64]:
"onnx numpy flatten"
return nxnp.abs(x).flatten()
@onnxnumpy_default
def test_abs_flatten2(x: NDArray[Any, numpy.float32],
) -> NDArray[Any, numpy.int64]:
"onnx numpy flatten"
return nxnp.abs(x).flatten(axis=1)
@onnxnumpy_default
def test_abs_set1a(x: NDArray[Any, numpy.float32],
) -> NDArray[Any, numpy.float32]:
"onnx numpy set"
temp = nxnp.abs(x).copy()
temp[2] = numpy.float32(-1.5)
return temp
@onnxnumpy_default
def test_abs_set1b(x: NDArray[Any, numpy.float32],
) -> NDArray[Any, numpy.float32]:
"onnx numpy set"
temp = nxnp.abs(x).copy()
temp[:4] = numpy.float32(-1.5)
return temp
@onnxnumpy_default
def test_abs_set1c(x: NDArray[Any, numpy.float32],
) -> NDArray[Any, numpy.float32]:
"onnx numpy set"
temp = nxnp.abs(x).copy()
temp[:4:2] = numpy.float32(-1.5)
return temp
@onnxnumpy_default
def test_abs_set1d(x: NDArray[Any, numpy.float32],
) -> NDArray[Any, numpy.float32]:
"onnx numpy set"
temp = nxnp.abs(x).copy()
temp[:4:2] = numpy.array([-1.5, -1.6], dtype=numpy.float32)
return temp
@onnxnumpy_default
def test_abs_set1e(x: NDArray[Any, numpy.float32],
) -> NDArray[Any, numpy.float32]:
"onnx numpy set"
temp = nxnp.abs(x).copy()
temp[2:] = numpy.float32(-1.5)
return temp
@onnxnumpy_default
def test_abs_set1f(x: NDArray[Any, numpy.float32],
) -> NDArray[Any, numpy.float32]:
"onnx numpy set"
temp = nxnp.abs(x).copy()
temp[3:5] = numpy.float32(-1.5)
return temp
@onnxnumpy_default
def test_abs_set1g(x: NDArray[Any, numpy.float32],
) -> NDArray[Any, numpy.float32]:
"onnx numpy set"
temp = nxnp.abs(x).copy()
temp[3:] = numpy.array([-1.5] * 4, dtype=numpy.float32)
return temp
@onnxnumpy_default
def test_abs_set1h(x: NDArray[Any, numpy.float32],
) -> NDArray[Any, numpy.float32]:
"onnx numpy set"
cp = x.copy()
cp[x < numpy.float32(0)] = numpy.array([-1], dtype=numpy.float32)
return cp
@onnxnumpy_default
def test_abs_set1i(x: NDArray[Any, numpy.float32],
) -> NDArray[Any, numpy.float32]:
"onnx numpy set"
cp = x.copy()
z = x < numpy.float32(0)
cp[z] = -x
return cp
@onnxnumpy_default
def onnx_log_1(x: NDArray[Any, numpy.float32]) -> NDArray[Any, numpy.float32]:
return nxnp.log(nxnp.cst(numpy.float32(1)) + x)
@onnxnumpy_default
def onnx_log_1r(x: NDArray[Any, numpy.float32]) -> NDArray[Any, numpy.float32]:
return nxnp.log(numpy.float32(1) + x)
@onnxnumpy_default
def onnx_log_11(x: NDArray[Any, numpy.float32]) -> NDArray[Any, numpy.float32]:
return nxnp.log(nxnp.cst(1.) + x)
@onnxnumpy_default
def onnx_exp_1r_sub(x: NDArray[Any, numpy.float32]) -> NDArray[Any, numpy.float32]:
return nxnp.exp(numpy.float32(1) - x)
@onnxnumpy_default
def onnx_log_1r_div(x: NDArray[Any, numpy.float32]) -> NDArray[Any, numpy.float32]:
return nxnp.log(numpy.float32(2) / x)
@onnxnumpy_default
def onnx_log_1r_mul3(x: NDArray[Any, numpy.float32]) -> NDArray[Any, numpy.float32]:
return nxnp.log(nxnp.cst(numpy.array([2], dtype=numpy.float32)) * x)
@onnxnumpy_default
def onnx_log_1r_mul(x: NDArray[Any, numpy.float32]) -> NDArray[Any, numpy.float32]:
return nxnp.log(numpy.float32(2) * x)
class TestOnnxVariable(ExtTestCase):
def test_py_abs(self):
x = numpy.array([[6.1, -5], [3.5, -7.8]], dtype=numpy.float32)
y = test_abs(x)
self.assertEqualArray(y, numpy.abs(x))
self.assertEqual(test_abs.__doc__, "onnx numpy abs")
self.assertTrue(hasattr(test_abs, 'compiled'))
self.assertIsInstance(test_abs.compiled, ONC)
rep = repr(test_abs.compiled)
self.assertStartsWith("OnnxNumpyCompiler(", rep)
def test_py_abs_add(self):
x = numpy.array([[6.1, -5], [3.5, -7.8]], dtype=numpy.float32)
y = test_abs_add(x)
self.assertEqualArray(y, numpy.abs(x) + x)
def test_py_abs_addm(self):
x = numpy.array([[6.1, -5], [3.5, -7.8]], dtype=numpy.float32)
y = test_abs_addm(x, x)
self.assertEqualArray(y, numpy.abs(x) + x)
def test_py_abs_add_cst(self):
x = numpy.array([[6.1, -5], [3.5, -7.8]], dtype=numpy.float32)
y = test_abs_add2(x)
self.assertEqualArray(y, numpy.abs(x) + 2)
def test_py_abs_add4(self):
x = numpy.array([[6.1, -5], [3.5, -7.8]], dtype=numpy.float32)
y = test_abs_add4(x)
text = str(test_abs_add4.compiled.onnx_).split('op_type: "Add"')
self.assertEqual(len(text), 3)
self.assertEqualArray(y, (x + x) + (x + x))
def test_py_abs_sub(self):
x = numpy.array([[6.1, -5], [3.5, -7.8]], dtype=numpy.float32)
y = test_abs_sub(x)
self.assertEqualArray(y, numpy.abs(x) - x)
def test_py_abs_mul(self):
x = numpy.array([[6.1, -5], [3.5, -7.8]], dtype=numpy.float32)
y = test_abs_mul(x)
self.assertEqualArray(y, numpy.abs(x) * x)
def test_py_abs_mod(self):
x = numpy.array([[6.1, -5], [3.5, -7.8]], dtype=numpy.float32)
y = test_abs_mod(x)
self.assertEqualArray(y, numpy.abs(x) % 2)
def test_py_abs_pox(self):
x = numpy.array([[6.1, -5], [3.5, -7.8]], dtype=numpy.float32)
y = test_abs_pow(x)
self.assertEqualArray(y, numpy.abs(x) ** 2)
def test_py_abs_matmul(self):
x = numpy.array([[6.1, -5], [3.5, -7.8]], dtype=numpy.float32)
y = test_abs_matmul(x)
self.assertEqualArray(y, numpy.abs(x) @ x)
def test_py_abs_matmul2(self):
x = numpy.array([[6.1, -5], [3.5, -7.8]], dtype=numpy.float32)
y = test_abs_matmul2(x)
self.assertEqualArray(y, numpy.abs(x) @ x)
def test_py_abs_div(self):
x = numpy.array([[6.1, -5], [3.5, -7.8]], dtype=numpy.float32)
y = test_abs_div(x)
self.assertEqualArray(y, numpy.abs(x) / x)
x = numpy.array([[6.1, -5], [3.5, -7.8]], dtype=numpy.int64)
y = test_abs_div(x)
self.assertEqualArray(y, numpy.abs(x) / x)
def test_py_abs_idiv(self):
x = numpy.array([[6.1, -5], [3.5, -7.8]], dtype=numpy.float32)
y = test_abs_idiv(x)
self.assertEqualArray(y, numpy.abs(x) // x)
x = numpy.array([[6.1, -5], [3.5, -7.8]], dtype=numpy.int64)
y = test_abs_idiv(x)
self.assertEqualArray(y, numpy.abs(x) // x)
@ignore_warnings(DeprecationWarning)
def test_py_abs_equal(self):
x = numpy.array([[6.1, -5], [3.5, -7.8]], dtype=numpy.float32)
y = test_abs_equal(x)
self.assertEqualArray(y, numpy.abs(x) == x)
@ignore_warnings(DeprecationWarning)
def test_py_abs_not_equal(self):
x = numpy.array([[6.1, -5], [3.5, -7.8]], dtype=numpy.float32)
y = test_abs_not_equal(x)
self.assertEqualArray(y, numpy.abs(x) != x)
@ignore_warnings(DeprecationWarning)
def test_py_abs_greater(self):
x = numpy.array([[6.1, -5], [3.5, -7.8]], dtype=numpy.float32)
y = test_abs_greater(x)
self.assertEqualArray(y, numpy.abs(x) > x)
@ignore_warnings(DeprecationWarning)
def test_py_abs_greater_or_equal(self):
x = numpy.array([[6.1, -5], [3.5, -7.8]], dtype=numpy.float32)
y = test_abs_greater_or_equal(x)
self.assertEqualArray(y, numpy.abs(x) >= x)
@ignore_warnings(DeprecationWarning)
def test_py_abs_less(self):
x = numpy.array([[6.1, -5], [3.5, -7.8]], dtype=numpy.float32)
y = test_abs_less(x)
self.assertEqualArray(y, numpy.abs(x) < x)
@ignore_warnings(DeprecationWarning)
def | |
#!/usr/bin/env python3
import os
import sys
import copy
from pycrate_asn1c.utils import *
from pycrate_asn1c.glob import *
from pycrate_asn1c.setobj import *
from pycrate_asn1c.refobj import *
from pycrate_asn1c.asnobj import get_asnobj, ASN1Obj, INT, OID
from pycrate_asn1c.asnproc import compile_text
#
# License
#
LICENSE_MSG = '#\n\
# Copyright 2019 <NAME> <<EMAIL>>\n\
#\n\
# Licensed under the Apache License, Version 2.0 (the "License");\n\
# you may not use this file except in compliance with the License.\n\
# You may obtain a copy of the License at\n\
#\n\
# http://www.apache.org/licenses/LICENSE-2.0\n\
#\n\
# Unless required by applicable law or agreed to in writing, software\n\
# distributed under the License is distributed on an "AS IS" BASIS,\n\
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\
# See the License for the specific language governing permissions and\n\
# limitations under the License.\n\
\n\
# This file is generated by libv2x, ros2gen.py\n\
\n\
# CAUTION: This file can be under the license of input ASN.1 file.\n\
# The use of this file is your own responsibility.\n\
\n'
LICENSE_IMP = '/*\n\
* Copyright 2019 <NAME> <<EMAIL>>\n\
*\n\
* Licensed under the Apache License, Version 2.0 (the "License");\n\
* you may not use this file except in compliance with the License.\n\
* You may obtain a copy of the License at\n\
*\n\
* http://www.apache.org/licenses/LICENSE-2.0\n\
*\n\
* Unless required by applicable law or agreed to in writing, software\n\
* distributed under the License is distributed on an "AS IS" BASIS,\n\
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\
* See the License for the specific language governing permissions and\n\
* limitations under the License.\n\
*/\n\
\n\
/* This file is generated by libv2x, ros2gen.py */\n\
\n\
/*\n\
* CAUTION: This file can be under the license of input ASN.1 file.\n\
* The use of this file is your own responsibility.\n\
*/\n\
\n'
#
def change_name(name):
new_name = ''
prev = 0 # lower
for i in range(0, len(name)):
if (name[i] == '-'):
new_name += '_'
prev = 1 # _
elif (name[i].isupper()):
if i > 0 and prev == 0: new_name += '_'
new_name += name[i].lower()
prev = 2 # upper
else:
new_name += name[i].lower()
prev = 0
# C/C++ keywords
if (new_name == 'long'):
new_name = 'lon'
elif (new_name == 'class'):
new_name = 'cls'
return new_name
#
def int_range_to_type_str(Obj):
mm = [int(x) for x in Obj._const[0]['text'].split('..')]
if mm[0] < 0:
if mm[0] > -129 and mm[1] < 128: return 'int8'
elif mm[0] > -32769 and mm[1] < 32768: return 'int16'
elif mm[0] > -2147483649 and mm[1] < 2147483648: return 'int32'
else: return 'int64'
else:
if mm[1] < 256: return 'uint8'
elif mm[1] < 65536: return 'uint16'
elif mm[1] < 4294967296: return 'uint32'
else: return 'uint64'
#
def enum_range_to_type_str(Obj):
mx = list(Obj._cont.values())[-1]
if mx < 256: return 'uint8'
elif mx < 65536: return 'uint16'
elif mx < 4294967296: return 'uint32'
else: return 'uint64'
#
def bit_str_range_to_type(Obj):
mx = list(Obj._cont.values())[-1]
if mx < 8: return ('uint8', 2)
elif mx < 16: return ('uint16', 4)
elif mx < 32: return ('uint32', 8)
elif mx < 64: return ('uint64', 16)
else:
print('BIT_STR NOT FOUND', Obj._name)
# print(Obj.get_internals())
sys.exit(1)
#
def gen_msg_bool(Obj, fd, vname):
if Obj._const:
print('BOOL NOT FOUND', Obj._name)
# print(Obj.get_internals())
sys.exit(1)
else:
fd.write('uint8 ' + vname + '\n')
#
def gen_msg_int(Obj, fd, vname):
if Obj._typeref and isinstance(Obj._typeref, ASN1RefType):
if isinstance(Obj._typeref.called, tuple):
fd.write(Obj._typeref.called[1].replace('-', ''))
else:
fd.write(Obj._typeref.called.replace('-', ''))
fd.write(' ' + vname + '\n')
elif Obj._const[0]['type'] == 'VAL':
fd.write(int_range_to_type_str(Obj) + ' ' + vname)
fd.write(' # ' + Obj._const[0]['text'] + '\n')
else:
print('INT NOT FOUND', Obj._name)
# print(Obj.get_internals())
sys.exit(1)
#
def gen_msg_enum(Obj, fd, vname):
fd.write(enum_range_to_type_str(Obj) + ' ' + vname + '\n')
for k in Obj._cont:
fd.write(enum_range_to_type_str(Obj))
fd.write(' ' + change_name(k).upper() + '=' + str(Obj._cont[k]))
fd.write(' # ' + k + '\n')
#
def gen_msg_bit_str(Obj, fd, vname):
bt = bit_str_range_to_type(Obj)
fd.write(bt[0] + ' ' + vname + '\n')
for k in Obj._cont:
fd.write(bt[0] + ' ' + change_name(k).upper() + '=' + str(Obj._cont[k]))
fd.write(' # ' + k + '=0x%0*X' % (bt[1], (1 << Obj._cont[k])) + '\n')
#
def gen_msg_oct_str(Obj, fd, vname):
if Obj._const[0]['type'] == 'SIZE':
mm = Obj._const[0]['text'].replace('SIZE', '')
mm = mm.replace('(', '').replace(')', '').strip().split('..')
if len(mm) < 2: fd.write('uint8' + '[' + mm[0] + '] ' + vname)
else: fd.write('uint8' + '[<=' + mm[1] + '] ' + vname)
fd.write(' # ' + Obj._const[0]['text'] + '\n')
else:
print('OCT_STR NOT FOUND', Obj._name)
# print(Obj.get_internals())
sys.exit(1)
#
def gen_msg_str_ia5(Obj, fd, vname):
if Obj._const[0]['type'] == 'SIZE':
mm = Obj._const[0]['text'].replace('SIZE', '')
mm = mm.replace('(', '').replace(')', '').strip().split('..')
if len(mm) < 2: fd.write('int8' + '[' + mm[0] + '] ' + vname)
else: fd.write('int8' + '[<=' + mm[1] + '] ' + vname)
fd.write(' # ' + Obj._const[0]['text'] + '\n')
else:
print('STR_IA5 NOT FOUND', Obj._name)
# print(Obj.get_internals())
sys.exit(1)
#
def gen_msg_choice(Obj, fd, vname):
for k in Obj._cont:
tr = Obj._cont[k]._typeref;
if isinstance(tr, ASN1RefType):
if isinstance(tr.called, tuple): fd.write(tr.called[1].replace('-', ''))
else: fd.write(tr.called.replace('-', ''))
fd.write('[<=1] ' + change_name(k) + '\n')
else:
obj = Obj._cont[k]
if obj._type == TYPE_SEQ_OF:
gen_msg_seq_of(obj, fd, k, False)
else:
print('CHOICE NOT FOUND', Obj._name, obj._name, obj._type)
# print(Obj.get_internals())
sys.exit(1)
#
def gen_msg_seq_of(Obj, fd, vname, opt):
tr = Obj._cont._typeref;
if isinstance(tr, ASN1RefType):
if isinstance(tr.called, tuple): fd.write(tr.called[1].replace('-', ''))
else: fd.write(tr.called.replace('-', ''))
if Obj._const and Obj._const[0]['type'] == 'SIZE':
mm = Obj._const[0]['text'].replace('SIZE', '')
mm = mm.replace('(', '').replace(')', '').strip().split('..')
if len(mm) < 2: fd.write('[' + mm[0] + '] ' + vname)
else: fd.write('[<=' + mm[1] + '] ' + vname)
fd.write(' # ' + Obj._const[0]['text'] + '\n')
else:
if opt: fd.write('[<=1] ' + vname + '\n')
else: fd.write('[] ' + vname + '\n')
else:
print('SEQ OF NOT FOUND', Obj._name)
# print(Obj.get_internals())
sys.exit(1)
#
def gen_msg_seq(Obj, fd, vname, asnfile, mod_path, mod_name):
#
pcnt = 0
pidx = 0
for k in Obj._cont:
tr = Obj._cont[k]._typeref;
if isinstance(tr, ASN1RefType): pass
elif isinstance(tr, ASN1RefClassField): pass
elif Obj._cont[k]._type in (TYPE_BOOL, TYPE_INT, TYPE_ENUM, TYPE_BIT_STR, TYPE_OCT_STR, TYPE_STR_IA5):
pcnt += 1
#
for k in Obj._cont:
tr = Obj._cont[k]._typeref;
if isinstance(tr, ASN1RefType):
if isinstance(tr.called, tuple): fd.write(tr.called[1].replace('-', ''))
else: fd.write(tr.called.replace('-', ''))
if k in Obj._root_opt: fd.write('[<=1]')
fd.write(' ' + change_name(k) + '\n')
elif isinstance(tr, ASN1RefClassField):
obj = Obj._cont[k]
if obj._type == TYPE_INT:
if isinstance(obj._typeref.called, tuple):
cObj = GLOBAL.MOD[mod_name][obj._typeref.called[1]]
else:
cObj = GLOBAL.MOD[mod_name][obj._typeref.called]
if isinstance(cObj._cont['id']._typeref, ASN1RefType):
if isinstance(cObj._cont['id']._typeref.called, tuple):
fd.write(cObj._cont['id']._typeref.called[1].replace('-', ''))
else:
fd.write(cObj._cont['id']._typeref.called.replace('-', ''))
fd.write(' ' + change_name(k) + '\n')
else:
print('SEQ INT NOT FOUND', Obj._name, obj._name, obj._type)
# print(Obj.get_internals())
sys.exit(1)
elif obj._type == TYPE_OPEN:
for o in obj._const[0]['tab']._val['root']:
oi = o['id']
ot = o['Type']
if isinstance(ot._typeref, ASN1RefType):
if isinstance(ot._typeref.called, tuple):
called = ot._typeref.called[1].replace('-', '')
else:
called = ot._typeref.called.replace('-', '')
fd.write(called + '[<=1] ' + change_name(called))
fd.write(' # id=' + str(oi) + '\n')
else:
print('SEQ OPEN NOT FOUND', Obj._name, obj._name, obj._type)
# print(Obj.get_internals())
sys.exit(1)
else:
print('SEQ CLASS NOT FOUND', Obj._name, obj._name, obj._type)
# print(Obj.get_internals())
sys.exit(1)
else:
obj = Obj._cont[k]
#
if pcnt > 1: pstr = str(pidx)
else: pstr = ''
if obj._type in (TYPE_BOOL, TYPE_INT, TYPE_ENUM, TYPE_BIT_STR, TYPE_OCT_STR, TYPE_STR_IA5):
pidx += 1
#
if obj._type == TYPE_BOOL : gen_msg_bool(obj, fd, vname + pstr)
elif obj._type == TYPE_INT : gen_msg_int(obj, fd, vname + pstr)
elif obj._type == TYPE_ENUM : gen_msg_enum(obj, fd, vname + pstr)
elif obj._type == TYPE_BIT_STR: gen_msg_bit_str(obj, fd, vname + pstr)
elif obj._type == TYPE_OCT_STR: gen_msg_oct_str(obj, fd, vname + pstr)
elif obj._type == TYPE_STR_IA5: gen_msg_str_ia5(obj, fd, vname + pstr)
elif obj._type == TYPE_CHOICE:
mname = (Obj._name + obj._name[0].upper() + obj._name[1:]).replace('-', '')
path = os.path.join(mod_path, mname + '.msg')
ofd = open(path, 'w')
ofd.write(LICENSE_MSG)
ofd.write('# From ' + asnfile + ', ' + mod_name)
ofd.write(', ' + Obj._name + '.' + obj._name)
ofd.write(', ' + obj._type + '\n\n')
gen_msg_choice(obj, ofd, vname)
ofd.close()
#
fd.write(mname)
if k in Obj._root_opt: fd.write('[<=1]')
fd.write(' ' + change_name(k) + ' # anonymous type\n')
elif obj._type == TYPE_SEQ_OF :
gen_msg_seq_of(obj, fd, k, k in Obj._root_opt)
elif obj._type == TYPE_SEQ:
mname = (Obj._name + obj._name[0].upper() + obj._name[1:]).replace('-', '')
path = os.path.join(mod_path, mname + '.msg')
ofd = open(path, 'w')
ofd.write(LICENSE_MSG)
ofd.write('# From ' + asnfile + ', ' + mod_name)
ofd.write(', ' + Obj._name + '.' + obj._name)
ofd.write(', ' + obj._type + '\n\n')
gen_msg_seq(obj, ofd, vname, asnfile, mod_path, mod_name)
ofd.close()
#
fd.write(mname)
if k in Obj._root_opt: fd.write('[<=1]')
fd.write(' ' + change_name(k) + ' # anonymous type\n')
else:
print('SEQ NOT | |
870049, 870059, 870083, 870097, 870109,
870127, 870131, 870137, 870151, 870161, 870169, 870173, 870197,
870211, 870223, 870229, 870239, 870241, 870253, 870271, 870283,
870301, 870323, 870329, 870341, 870367, 870391, 870403, 870407,
870413, 870431, 870433, 870437, 870461, 870479, 870491, 870497,
870517, 870533, 870547, 870577, 870589, 870593, 870601, 870613,
870629, 870641, 870643, 870679, 870691, 870703, 870731, 870739,
870743, 870773, 870787, 870809, 870811, 870823, 870833, 870847,
870853, 870871, 870889, 870901, 870907, 870911, 870917, 870929,
870931, 870953, 870967, 870977, 870983, 870997, 871001, 871021,
871027, 871037, 871061, 871103, 871147, 871159, 871163, 871177,
871181, 871229, 871231, 871249, 871259, 871271, 871289, 871303,
871337, 871349, 871393, 871439, 871459, 871463, 871477, 871513,
871517, 871531, 871553, 871571, 871589, 871597, 871613, 871621,
871639, 871643, 871649, 871657, 871679, 871681, 871687, 871727,
871763, 871771, 871789, 871817, 871823, 871837, 871867, 871883,
871901, 871919, 871931, 871957, 871963, 871973, 871987, 871993,
872017, 872023, 872033, 872041, 872057, 872071, 872077, 872089,
872099, 872107, 872129, 872141, 872143, 872149, 872159, 872161,
872173, 872177, 872189, 872203, 872227, 872231, 872237, 872243,
872251, 872257, 872269, 872281, 872317, 872323, 872351, 872353,
872369, 872381, 872383, 872387, 872393, 872411, 872419, 872429,
872437, 872441, 872453, 872471, 872477, 872479, 872533, 872549,
872561, 872563, 872567, 872587, 872609, 872611, 872621, 872623,
872647, 872657, 872659, 872671, 872687, 872731, 872737, 872747,
872749, 872761, 872789, 872791, 872843, 872863, 872923, 872947,
872951, 872953, 872959, 872999, 873017, 873043, 873049, 873073,
873079, 873083, 873091, 873109, 873113, 873121, 873133, 873139,
873157, 873209, 873247, 873251, 873263, 873293, 873317, 873319,
873331, 873343, 873349, 873359, 873403, 873407, 873419, 873421,
873427, 873437, 873461, 873463, 873469, 873497, 873527, 873529,
873539, 873541, 873553, 873569, 873571, 873617, 873619, 873641,
873643, 873659, 873667, 873671, 873689, 873707, 873709, 873721,
873727, 873739, 873767, 873773, 873781, 873787, 873863, 873877,
873913, 873959, 873979, 873989, 873991, 874001, 874009, 874037,
874063, 874087, 874091, 874099, 874103, 874109, 874117, 874121,
874127, 874151, 874193, 874213, 874217, 874229, 874249, 874267,
874271, 874277, 874301, 874303, 874331, 874337, 874343, 874351,
874373, 874387, 874397, 874403, 874409, 874427, 874457, 874459,
874477, 874487, 874537, 874543, 874547, 874567, 874583, 874597,
874619, 874637, 874639, 874651, 874661, 874673, 874681, 874693,
874697, 874711, 874721, 874723, 874729, 874739, 874763, 874771,
874777, 874799, 874807, 874813, 874823, 874831, 874847, 874859,
874873, 874879, 874889, 874891, 874919, 874957, 874967, 874987,
875011, 875027, 875033, 875089, 875107, 875113, 875117, 875129,
875141, 875183, 875201, 875209, 875213, 875233, 875239, 875243,
875261, 875263, 875267, 875269, 875297, 875299, 875317, 875323,
875327, 875333, 875339, 875341, 875363, 875377, 875389, 875393,
875417, 875419, 875429, 875443, 875447, 875477, 875491, 875503,
875509, 875513, 875519, 875521, 875543, 875579, 875591, 875593,
875617, 875621, 875627, 875629, 875647, 875659, 875663, 875681,
875683, 875689, 875701, 875711, 875717, 875731, 875741, 875759,
875761, 875773, 875779, 875783, 875803, 875821, 875837, 875851,
875893, 875923, 875929, 875933, 875947, 875969, 875981, 875983,
876011, 876013, 876017, 876019, 876023, 876041, 876067, 876077,
876079, 876097, 876103, 876107, 876121, 876131, 876137, 876149,
876181, 876191, 876193, 876199, 876203, 876229, 876233, 876257,
876263, 876287, 876301, 876307, 876311, 876329, 876331, 876341,
876349, 876371, 876373, 876431, 876433, 876443, 876479, 876481,
876497, 876523, 876529, 876569, 876581, 876593, 876607, 876611,
876619, 876643, 876647, 876653, 876661, 876677, 876719, 876721,
876731, 876749, 876751, 876761, 876769, 876787, 876791, 876797,
876817, 876823, 876833, 876851, 876853, 876871, 876893, 876913,
876929, 876947, 876971, 877003, 877027, 877043, 877057, 877073,
877091, 877109, 877111, 877117, 877133, 877169, 877181, 877187,
877199, 877213, 877223, 877237, 877267, 877291, 877297, 877301,
877313, 877321, 877333, 877343, 877351, 877361, 877367, 877379,
877397, 877399, 877403, 877411, 877423, 877463, 877469, 877531,
877543, 877567, 877573, 877577, 877601, 877609, 877619, 877621,
877651, 877661, 877699, 877739, 877771, 877783, 877817, 877823,
877837, 877843, 877853, 877867, 877871, 877873, 877879, 877883,
877907, 877909, 877937, 877939, 877949, 877997, 878011, 878021,
878023, 878039, 878041, 878077, 878083, 878089, 878099, 878107,
878113, 878131, 878147, 878153, 878159, 878167, 878173, 878183,
878191, 878197, 878201, 878221, 878239, 878279, 878287, 878291,
878299, 878309, 878359, 878377, 878387, 878411, 878413, 878419,
878443, 878453, 878467, 878489, 878513, 878539, 878551, 878567,
878573, 878593, 878597, 878609, 878621, 878629, 878641, 878651,
878659, 878663, 878677, 878681, 878699, 878719, 878737, 878743,
878749, 878777, 878783, 878789, 878797, 878821, 878831, 878833,
878837, 878851, 878863, 878869, 878873, 878893, 878929, 878939,
878953, 878957, 878987, 878989, 879001, 879007, 879023, 879031,
879061, 879089, 879097, 879103, 879113, 879119, 879133, 879143,
879167, 879169, 879181, 879199, 879227, 879239, 879247, 879259,
879269, 879271, 879283, 879287, 879299, 879331, 879341, 879343,
879353, 879371, 879391, 879401, 879413, 879449, 879457, 879493,
879523, 879533, 879539, 879553, 879581, 879583, 879607, 879617,
879623, 879629, 879649, 879653, 879661, 879667, 879673, 879679,
879689, 879691, 879701, 879707, 879709, 879713, 879721, 879743,
879797, 879799, 879817, 879821, 879839, 879859, 879863, 879881,
879917, 879919, 879941, 879953, 879961, 879973, 879979, 880001,
880007, 880021, 880027, 880031, 880043, 880057, 880067, 880069,
880091, 880097, 880109, 880127, 880133, 880151, 880153, 880199,
880211, 880219, 880223, 880247, 880249, 880259, 880283, 880301,
880303, 880331, 880337, 880343, 880349, 880361, 880367, 880409,
880421, 880423, 880427, 880483, 880487, 880513, 880519, 880531,
880541, 880543, 880553, 880559, 880571, 880573, 880589, 880603,
880661, 880667, 880673, 880681, 880687, 880699, 880703, 880709,
880723, 880727, 880729, 880751, 880793, 880799, 880801, 880813,
880819, 880823, 880853, 880861, 880871, 880883, 880903, 880907,
880909, 880939, 880949, 880951, 880961, 880981, 880993, 881003,
881009, 881017, 881029, 881057, 881071, 881077, 881099, 881119,
881141, 881143, 881147, 881159, 881171, 881173, 881191, 881197,
881207, 881219, 881233, 881249, 881269, 881273, 881311, 881317,
881327, 881333, 881351, 881357, 881369, 881393, 881407, 881411,
881417, 881437, 881449, 881471, 881473, 881477, 881479, 881509,
881527, 881533, 881537, 881539, 881591, 881597, 881611, 881641,
881663, 881669, 881681, 881707, 881711, 881729, 881743, 881779,
881813, 881833, 881849, 881897, 881899, 881911, 881917, 881939,
881953, 881963, 881983, 881987, 882017, 882019, 882029, 882031,
882047, 882061, 882067, 882071, 882083, 882103, 882139, 882157,
882169, 882173, 882179, 882187, 882199, 882239, 882241, 882247,
882251, 882253, 882263, 882289, 882313, 882359, 882367, 882377,
882389, 882391, 882433, 882439, 882449, 882451, 882461, 882481,
882491, 882517, 882529, 882551, 882571, 882577, 882587, 882593,
882599, 882617, 882631, 882653, 882659, 882697, 882701, 882703,
882719, 882727, 882733, 882751, 882773, 882779, 882823, 882851,
882863, 882877, 882881, 882883, 882907, 882913, 882923, 882943,
882953, 882961, 882967, 882979, 883013, 883049, 883061, 883073,
883087, 883093, 883109, 883111, 883117, 883121, 883163, 883187,
883193, 883213, 883217, 883229, 883231, 883237, 883241, 883247,
883249, 883273, 883279, 883307, 883327, 883331, 883339, 883343,
883357, 883391, 883397, 883409, 883411, 883423, 883429, 883433,
883451, 883471, 883483, 883489, 883517, 883537, 883549, 883577,
883579, 883613, 883621, 883627, 883639, 883661, 883667, 883691,
883697, 883699, 883703, 883721, 883733, 883739, 883763, 883777,
883781, 883783, 883807, 883871, 883877, 883889, 883921, 883933,
883963, 883969, 883973, 883979, 883991, 884003, 884011, 884029,
884057, 884069, 884077, 884087, 884111, 884129, 884131, 884159,
884167, 884171, 884183, 884201, 884227, 884231, 884243, 884251,
884267, 884269, 884287, 884293, 884309, 884311, 884321, 884341,
884353, 884363, 884369, 884371, 884417, 884423, 884437, 884441,
884453, 884483, 884489, 884491, 884497, 884501, 884537, 884573,
884579, 884591, 884593, 884617, 884651, 884669, 884693, 884699,
884717, 884743, 884789, 884791, 884803, 884813, 884827, 884831,
884857, 884881, 884899, 884921, 884951, 884959, 884977, 884981,
884987, 884999, 885023, 885041, 885061, 885083, 885091, 885097,
885103, 885107, 885127, 885133, 885161, 885163, 885169, 885187,
885217, 885223, 885233, 885239, 885251, 885257, 885263, 885289,
885301, 885307, 885331, 885359, 885371, 885383, 885389, 885397,
885403, 885421, 885427, 885449, 885473, 885487, 885497, 885503,
885509, 885517, 885529, 885551, 885553, 885589, 885607, 885611,
885623, 885679, 885713, 885721, 885727, 885733, 885737, 885769,
885791, 885793, 885803, 885811, 885821, 885823, 885839, 885869,
885881, 885883, 885889, 885893, 885919, 885923, 885931, 885943,
885947, 885959, 885961, 885967, 885971, 885977, 885991, 886007,
886013, 886019, 886021, 886031, 886043, 886069, 886097, 886117,
886129, 886163, 886177, 886181, 886183, 886189, 886199, 886241,
886243, 886247, 886271, 886283, 886307, 886313, 886337, 886339,
886349, 886367, 886381, 886387, 886421, 886427, 886429, 886433,
886453, 886463, 886469, 886471, 886493, 886511, 886517, 886519,
886537, 886541, 886547, 886549, 886583, 886591, 886607, 886609,
886619, 886643, 886651, 886663, 886667, 886741, 886747, 886751,
886759, 886777, 886793, 886799, 886807, 886819, 886859, 886867,
886891, 886909, 886913, 886967, 886969, 886973, 886979, 886981,
886987, 886993, 886999, 887017, 887057, 887059, 887069, 887093,
887101, 887113, 887141, 887143, 887153, 887171, 887177, 887191,
887203, 887233, 887261, 887267, 887269, 887291, 887311, 887323,
887333, 887377, 887387, 887399, 887401, 887423, 887441, 887449,
887459, 887479, 887483, 887503, 887533, 887543, 887567, 887569,
887573, 887581, 887599, | |
covariance
kf_params["mu_R2"] = mean
with open(save_file,"wb") as f:
pickle.dump(kf_params,f)
def filter_rollouts(loader,
kf_params,
localizer,
device,
n_iterations = 100,
ber = 2.0,
skew_ratio = 0,
PLOT = True,
speed_init = "none",
state_size = 8,
keep_nums = [1],
wer = 1.25,
cs = 112):
"""
Simulates tracking by performing several predict measure update steps
"""
for keep_num in keep_nums:
ap_errors = []
skewed_iou = [] # how far off each skewed measurement is during init
starting_iou = [] # after initialization, how far off are we
a_priori_iou = {} # after prediction step, how far off is the state
localizer_iou = {} # how far off is the localizer
a_posteriori_iou = {} # after updating, how far off is the state
for i in range(n_pre,n_pre+n_post):
a_priori_iou[i] = []
localizer_iou[i] = []
a_posteriori_iou[i] = []
model_errors = []
meas_errors = []
degradation = np.array([10,10,10,10]) *skew_ratio # should roughly equal localizer error covariance
for iteration in range(n_iterations):
batch,ims = next(iter(loader))
b = batch.shape[0]
# initialize tracker
tracker = Torch_KF("cpu",INIT = kf_params, ADD_MEAN_Q = True, ADD_MEAN_R = False)
obj_ids = [i for i in range(len(batch))]
if speed_init == "smooth":
tracker.add(batch[:,0,:state_size],obj_ids)
else:
tracker.add(batch[:,0,:4],obj_ids)
# initialize storage
ap_states = []
ap_covs = []
loc_meas = []
apst_states = []
apst_covs = []
gts = []
apst_states.append(tracker.X[0].clone())
apst_covs.append(torch.diag(tracker.P[0]).clone())
gts.append(batch[0,n_pre-1,:].clone())
# initialize tracker
for frame in range(1,n_pre):
tracker.predict()
# here, rather than updating with ground truth we degrade ground truth by some amount
measurement = batch[:,frame,:4]
skew = np.random.normal(0,degradation,(len(batch),4))
measurement_skewed = measurement + skew
skewed_iou.append(iou(measurement,measurement_skewed))
tracker.update2(measurement_skewed,obj_ids)
# cumulative error so far
objs = tracker.objs()
objs = [objs[key] for key in objs]
starting = torch.from_numpy(np.array(objs)).double()
starting_iou.append(iou(starting,batch[:,n_pre-1,:]))
for frame_idx in range(n_pre,n_pre + n_post):
gt = batch[:,frame_idx,:]
# get a priori error
tracker.predict()
objs = tracker.objs()
objs = [objs[key] for key in objs]
a_priori = torch.from_numpy(np.array(objs)).double()
a_priori_iou[frame_idx].append(iou(a_priori,gt))
if frame_idx in [1,2,3,4]:
ap_error = a_priori[:,:4] - gt[:,:4]
ap_errors.append(ap_error)
ap_states.append(tracker.X[0].clone())
ap_covs.append(torch.diag(tracker.P[0]).clone())
# at this point, we have gt, the correct bounding boxes for this frame,
# and the tracker states, the estimate of the state for this frame
# expand state estimate and get localizer prediction
# shift back into global coordinates
# ims are collated by frame,then batch index
relevant_ims = ims[frame_idx]
frames =[]
for item in relevant_ims:
with Image.open(item) as im:
im = F.to_tensor(im)
frame = F.normalize(im,mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
# #correct smaller frames
# if frame.shape[1] < 540:
# new_frame = torch.zeros([3,375,frame.shape[2]])
# new_frame[:,:frame.shape[1],:] = frame
# frame = new_frame
# if frame.shape[2] < 1242:
# new_frame = torch.zeros([3,375,1242])
# new_frame[:,:,:frame.shape[2]] = frame
# frame = new_frame
MASK = False
if MASK:
other_objs = dataset.frame_objs[item]
# create copy of frame
frame_copy = frame.clone()
# mask each other object in frame
for obj in other_objs:
xmin = (obj[0] - obj[2] / 2.0).astype(int)
ymin = (obj[1] - obj[2]*obj[3] / 2.0).astype(int)
xmax = (obj[0] + obj[2] / 2.0).astype(int)
ymax = (obj[1] + obj[2]*obj[3] / 2.0).astype(int)
region = obj
shape = frame[:,ymin:ymax,xmin:xmax].shape
r = torch.normal(0.485,0.229,[shape[1],shape[2]])
g = torch.normal(0.456,0.224,[shape[1],shape[2]])
b = torch.normal(0.406,0.225,[shape[1],shape[2]])
rgb = torch.stack([r,g,b])
frame[:,ymin:ymax,xmin:xmax] = rgb
# restore gt_skew pixels
o = gt_skew[idx]
xmin = (o[0] - o[2] / 2.0).int()
ymin = (o[1] - o[2]*obj[3] / 2.0).int()
xmax = (o[0] + o[2] / 2.0).int()
ymax = (o[1] + o[2]*obj[3] / 2.0).int()
frame[:,ymin:ymax,xmin:xmax] = frame_copy[:,ymin:ymax,xmin:xmax]
#plt.imshow(frame.transpose(2,0).transpose(0,1))
#plt.pause(5)
frames.append(frame)
frames = torch.stack(frames).to(device)
# crop image
boxes = a_priori
#convert xyxy into xysr
temp = boxes.clone()
temp[:,0] = (boxes[:,0] + boxes[:,2])/2.0
temp[:,1] = (boxes[:,1] + boxes[:,3])/2.0
temp[:,2] = boxes[:,2] - boxes[:,0]
temp[:,3] = (boxes[:,3] - boxes[:,1])/temp[:,2]
boxes = temp
# convert xysr boxes into xmin xmax ymin ymax
# first row of zeros is batch index (batch is size 0) for ROI align
new_boxes = np.zeros([len(boxes),5])
# use either s or s x r for both dimensions, whichever is larger,so crop is square
#box_scales = np.max(np.stack((boxes[:,2],boxes[:,2]*boxes[:,3]),axis = 1),axis = 1)
box_scales = np.min(np.stack((boxes[:,2],boxes[:,2]*boxes[:,3]),axis = 1),axis = 1) #/2.0
#expand box slightly
#ber = 2.15
box_scales = box_scales * ber# box expansion ratio
new_boxes[:,1] = boxes[:,0] - box_scales/2
new_boxes[:,3] = boxes[:,0] + box_scales/2
new_boxes[:,2] = boxes[:,1] - box_scales/2
new_boxes[:,4] = boxes[:,1] + box_scales/2
for i in range(len(new_boxes)):
new_boxes[i,0] = i # set image index for each
torch_boxes = torch.from_numpy(new_boxes).float().to(device)
# crop using roi align
crops = roi_align(frames,torch_boxes,(cs,cs))
# _,reg_out = localizer(crops)
# torch.cuda.synchronize()
# detections = (reg_out* 224*wer - 224*(wer-1)/2)
# detections = detections.data.cpu()
if True:
reg_boxes, classes = localizer(crops,LOCALIZE = True)
torch.cuda.synchronize()
reg_boxes = reg_boxes.data.cpu()
#confs = confs.data.cpu()
classes = classes.data.cpu()
confs,_ = torch.max(classes, dim = 2)
# use original bboxes to weight best bboxes
n_anchors = reg_boxes.shape[1]
a_priori = a_priori[:,:4]
gt_skew = a_priori.clone() #gt.clone()
a_priori[:,0] = gt_skew[:,0] - new_boxes[:,1]
a_priori[:,1] = gt_skew[:,1] - new_boxes[:,2]
a_priori[:,2] = gt_skew[:,2] - new_boxes[:,1]
a_priori[:,3] = gt_skew[:,3] - new_boxes[:,2]
bs = torch.from_numpy(box_scales).unsqueeze(1).repeat(1,4)
a_priori = a_priori * cs/bs
a_priori = a_priori.unsqueeze(1).repeat(1,n_anchors,1)
#reg_boxes = reg_boxes.unsqueeze(0).repeat(b,1,1)
# evaluate each box based on xy similarity
# x_diff = torch.abs(a_priori[:,0] + a_priori[:,2] - (reg_boxes[:,0] + reg_boxes[:,2]) )
# y_diff = torch.abs(a_priori[:,1] + a_priori[:,3] - (reg_boxes[:,1] + reg_boxes[:,3]) )
# # evaluate each box on width and ratio similarity
# w_diff = torch.abs(a_priori[:,2] - a_priori[:,0] - (reg_boxes[:,2] - reg_boxes[:,0]) )
# h_diff = torch.abs(a_priori[:,3] - a_priori[:,1] - (reg_boxes[:,3] - reg_boxes[:,1]) )
# evaluate each box on conf
alpha = 0.8
beta = 0
gamma = 0
delta = 1
iou_score = md_iou(a_priori.double(),reg_boxes.double())
score = alpha*confs + delta * iou_score
# _,sorted_idxs = torch.sort(score,dim = 1)
# #keep_num = 4
# best5 = sorted_idxs[:,-keep_num:]
# det_list = []
# for k in range(b):
# det5 = reg_boxes[k,best5[k],:]
# avg = det5.mean(dim = 0)
# det_list.append(avg)
# detections = torch.stack(det_list)
best_scores ,keep = torch.max(score,dim = 1)
idx = torch.arange(reg_boxes.shape[0])
detections = reg_boxes[idx,keep,:]
score = iou_score[idx,keep]
confs = confs[idx,keep]
classes = classes[idx,keep]
# 5b. convert to global image coordinates
# these detections are relative to crops - convert to global image coords
# add in original box offsets and scale outputs by original box scales
detections[:,0] = detections[:,0]*box_scales/cs + new_boxes[:,1]
detections[:,2] = detections[:,2]*box_scales/cs + new_boxes[:,1]
detections[:,1] = detections[:,1]*box_scales/cs + new_boxes[:,2]
detections[:,3] = detections[:,3]*box_scales/cs + new_boxes[:,2]
# convert into xysr form
output = np.zeros([len(detections),4])
output[:,0] = (detections[:,0] + detections[:,2]) / 2.0
output[:,1] = (detections[:,1] + detections[:,3]) / 2.0
output[:,2] = (detections[:,2] - detections[:,0])
output[:,3] = (detections[:,3] - detections[:,1]) / output[:,2]
pred = detections
# evaluate localizer
localizer_iou[frame_idx].append(iou(pred.double(),gt.double()))
error = (gt[:,:4]-pred)
meas_errors.append(error)
loc_meas.append(pred[0].clone())
# evaluate a posteriori estimate
tracker.update(pred,obj_ids)
objs = tracker.objs()
objs = [objs[key] for key in objs]
a_posteriori = torch.from_numpy(np.array(objs)).double()
a_posteriori_iou[frame_idx].append(iou(a_posteriori,gt))
apst_states.append(tracker.X[0].clone())
apst_covs.append(torch.diag(tracker.P[0]).clone())
gts.append(gt[0].clone())
if PLOT and iteration < 10:
plot_states(ap_states,
ap_covs,
loc_meas,
apst_states,
apst_covs,
gts,
save_num = iteration)
#break
elif PLOT:
break
if iteration % 50 == 0:
print("Finished iteration {}".format(iteration))
errors = torch.stack(ap_errors)
errors = errors.view(-1,4)
mean = torch.mean(errors, dim = 0)
covariance = torch.zeros((4,4))
for vec in errors:
covariance += torch.mm((vec - mean).unsqueeze(1), (vec-mean).unsqueeze(1).transpose(0,1))
covariance = covariance / errors.shape[0]
print("------------------Results {}: --------------------".format(keep_num))
# print("Skewed initialization IOUs: {}".format(sum(skewed_iou)/len(skewed_iou)))
print("Starting state IOUs: {}".format(sum(starting_iou)/len(starting_iou)))
iou_score = 0
for key in a_priori_iou.keys():
print("Frame {}".format(key))
print("A priori state IOUs: {}".format(sum(a_priori_iou[key])/len(a_priori_iou[key])))
print("Localizer state IOUs: {}".format(sum(localizer_iou[key])/len(localizer_iou[key])))
print("A posteriori state IOUs: {}".format(sum(a_posteriori_iou[key])/len(a_posteriori_iou[key])))
iou_score += sum(a_posteriori_iou[key])/len(a_posteriori_iou[key])
print("A posteriori estimate error mean: {}".format(mean))
print("A posteriori estimate error covariance: \n{}".format(covariance))
iou_score /= key # last key = number of frames in rollout
return iou_score
#%% MAIN CODE BLOCK
if __name__ == "__main__":
# define parameters
b = | |
import femagtools.machine.sm
import pathlib
import pytest
import numpy as np
@pytest.fixture
def sm():
smpars = {"m": 3, "p": 3, "r1": 0.01, "r2": 40, "rotor_mass": 9.941, "kfric_b": 1,
"ldq": [
{"ex_current": 0.6, "i1": [0.0, 82.0, 164.0, 246.0, 328.0, 410.0],
"beta": [-180.0, -165.0, -150.0, -135.0, -120.0, -105.0, -90.0, -75.0, -60.0, -45.0, -30.0, -15.0, 0.0],
"psid": [[0.01742, 0.016110000000000003, 0.009824, 0.006978999999999999, 0.005565, 0.004614],
[0.01742, -0.001185, -0.01436, -0.02008, -
0.02327, -0.025310000000000003],
[0.01742, -0.01786, -0.03719, -
0.04475, -0.04863, -0.05114],
[0.01742, -0.03233, -0.057390000000000004, -
0.06627999999999999, -0.07052, -0.07322999999999999],
[0.01742, -0.043379999999999995, -
0.07287, -0.08267, -0.08713, -0.09005],
[0.01742, -0.05027, -0.08158, -0.09125, -
0.09601, -0.09913999999999999],
[0.01742, -0.0526, -0.08425, -0.09426000000000001, -
0.09928000000000001, -0.10250000000000001],
[0.01742, -0.05027, -0.08158, -0.09124, -
0.09602000000000001, -0.09913999999999999],
[0.01742, -0.043379999999999995, -
0.07287, -0.08267, -0.08713, -0.09005],
[0.01742, -0.03233, -0.057390000000000004, -
0.06631000000000001, -0.07053, -0.07322000000000001],
[0.01742, -0.01786, -0.0372, -
0.04476, -0.04864, -0.05115],
[0.01742, -0.001187, -0.014369999999999999, -
0.02009, -0.023289999999999998, -0.025339999999999998],
[0.01742, 0.016110000000000003, 0.009824, 0.006978999999999999, 0.005565, 0.004614]],
"psiq": [[-0.0, -0.054009999999999996, -0.07958, -0.08945, -0.09473, -0.09842],
[-0.0, -0.0535, -0.07868, -0.08764000000000001, -
0.09232, -0.09562000000000001],
[-0.0, -0.04835, -0.07146, -0.07928, -
0.08334, -0.08610999999999999],
[-0.0, -0.0395, -0.05795, -0.06421, -0.06738, -0.0697],
[-0.0, -0.0279, -0.04025, -0.04434, -
0.046490000000000004, -0.04808],
[-0.0, -0.01442, -0.02121, -
0.02393, -0.0252, -0.02606],
[-0.0, -2.436e-09, -3.6289999999999998e-09, -
4.147e-09, -4.377e-09, -4.526e-09],
[0.0, 0.01442, 0.02122, 0.02393, 0.02521, 0.02606],
[0.0, 0.0279, 0.04025, 0.04435,
0.046490000000000004, 0.04808],
[0.0, 0.0395, 0.05795,
0.06423000000000001, 0.06737, 0.06969],
[0.0, 0.04835, 0.07146,
0.07927000000000001, 0.08333, 0.08614],
[0.0, 0.05351, 0.07867, 0.08764000000000001,
0.09234, 0.09562000000000001],
[0.0, 0.054009999999999996, 0.07958, 0.08946, 0.09472, 0.09842]],
"losses": {"speed": 50.0, "ef": [1.45, 1.45], "hf": [1.0, 1.0],
"styoke_hyst": [[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0]],
"stteeth_hyst": [[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0]],
"styoke_eddy": [[8.197, 75.66, 156.9, 194.6, 215.5, 230.4],
[8.197, 69.25, 156.7,
196.2, 217.9, 233.2],
[8.197, 64.62, 159.3,
200.4, 223.0, 238.5],
[8.197, 62.86, 160.6,
203.5, 225.6, 241.0],
[8.197, 63.39, 166.5,
211.4, 233.0, 247.6],
[8.197, 64.48, 171.1,
214.8, 238.2, 254.1],
[8.197, 64.97, 171.2,
215.8, 239.8, 255.1],
[8.197, 64.48, 171.1,
214.8, 238.2, 254.1],
[8.197, 63.39, 166.5,
211.4, 233.0, 247.6],
[8.197, 62.86, 160.7,
203.7, 225.6, 240.8],
[8.197, 64.62, 159.3,
200.4, 222.9, 238.7],
[8.197, 69.26, 156.7,
196.2, 217.9, 233.3],
[8.197, 75.67, 156.8, 194.7, 215.4, 230.4]],
"stteeth_eddy": [[6.687, 54.45, 100.3, 120.1, 130.1, 137.6],
[6.687, 52.21, 101.1,
121.3, 132.6, 141.3],
[6.687, 48.9, 103.5,
126.2, 139.2, 148.4],
[6.687, 45.99, 100.8,
122.9, 135.0, 143.3],
[6.687, 44.42, 97.99,
120.7, 134.1, 144.6],
[6.687, 43.65, 99.45,
124.9, 141.3, 154.0],
[6.687, 43.46, 99.42,
126.1, 144.4, 157.6],
[6.687, 43.64, 99.46,
124.9, 141.3, 153.9],
[6.687, 44.41, 97.98,
120.7, 134.1, 144.6],
[6.687, 45.98, 100.8,
122.7, 135.0, 143.3],
[6.687, 48.9, 103.5,
126.2, 139.3, 148.5],
[6.687, 52.21, 101.0,
121.4, 132.7, 141.2],
[6.687, 54.43, 100.3, 120.1, 130.1, 137.7]],
"rotor_hyst": [[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0]],
"rotor_eddy": [[0.3769, 6.4, 14.11, 18.25, 20.99, 22.32],
[0.3769, 5.917, 13.88,
18.37, 20.87, 22.41],
[0.3769, 5.464, 14.42,
19.08, 21.71, 23.22],
[0.3769, 5.232, 15.54,
20.82, 23.38, 26.2],
[0.3769, 5.059, 15.47,
20.76, 23.82, 25.47],
[0.3769, 4.962, 15.0,
20.38, 24.05, 27.33],
[0.3769, 4.912, 12.52,
17.07, 20.49, 24.37],
[0.3769, 4.963, 15.0,
20.37, 24.06, 27.32],
[0.3769, 5.059, 15.46,
20.74, 23.85, 25.43],
[0.3769, 5.234, 15.53,
20.75, 23.33, 26.2],
[0.3769, 5.463, 14.42,
19.07, 21.68, 23.24],
[0.3769, 5.919, 13.88,
18.4, 20.83, 22.37],
[0.3769, 6.462, 14.12, 18.2, 20.95, 22.31]]}},
{"ex_current": 1.03, "i1": [0.0, 82.0, 164.0, 246.0, 328.0, 410.0],
"beta": [-180.0, -165.0, -150.0, -135.0, -120.0, -105.0, -90.0, -75.0, -60.0, -45.0, -30.0, -15.0, 0.0],
"psid": [[0.0297, 0.02682, 0.01684, 0.01196, 0.009485, 0.007888000000000001],
[0.0297, 0.01019, -0.0073620000000000005, -
0.01527, -0.01956, -0.022269999999999998],
[0.0297, -0.005846, -0.03052, -
0.040299999999999996, -0.04537, -0.04858],
[0.0297, -0.02028, -0.05113, -0.06227, -
0.06759, -0.07089000000000001],
[0.0297, -0.03148, -0.06805, -
0.08004, -0.08542, -0.08876],
[0.0297, -0.03852, -0.07833999999999999, -
0.08996, -0.09525, -0.09856999999999999],
[0.0297, -0.040920000000000005, -0.08159000000000001, -
0.09318, -0.09875999999999999, -0.1021],
[0.0297, -0.03853, -0.07833999999999999, -
0.08996, -0.09526, -0.09856999999999999],
[0.0297, -0.03148, -0.06806, -0.08004, -
0.08542, -0.08876999999999999],
[0.0297, -0.02028, -0.05113, -0.06227, -
0.0676, -0.07089000000000001],
[0.0297, -0.005847, -0.030529999999999998, -
0.04029, -0.045380000000000004, -0.04858],
[0.0297, 0.01018, -0.007365999999999999, -
0.01528, -0.01956, -0.02228],
[0.0297, 0.02682, 0.01684, 0.01196, 0.009485, 0.007888000000000001]],
"psiq": [[-0.0, -0.05167, -0.07825, -0.08894, -0.09447, -0.09825],
[-0.0, -0.05205, -0.07887000000000001, -
0.08817, -0.09287, -0.09615],
[-0.0, -0.04817, -0.07313, -0.08093, -
0.08467000000000001, -0.0872],
[-0.0, -0.03965, -0.06105000000000001, -
0.06696, -0.06960999999999999, -0.07155],
[-0.0, -0.02809, -0.04306, -
0.04673, -0.04835, -0.04958],
[-0.0, -0.01454, -0.02249, -0.024869999999999996, -
0.025970000000000003, -0.02667],
[-0.0, -2.457e-09, -3.8249999999999995e-09, -
4.278e-09, -4.487999999999999e-09, -4.612e-09],
[0.0, 0.01454, 0.02249, 0.024869999999999996,
0.025970000000000003, 0.026650000000000004],
[0.0, 0.02809, 0.04306, 0.04673,
0.04835, 0.049589999999999995],
[0.0, 0.03965, 0.06105000000000001,
0.06696, 0.06960999999999999, 0.07156],
[0.0, 0.04817, 0.07312, 0.08091, 0.08466, 0.0872],
[0.0, 0.05205, 0.07887000000000001,
0.08815, 0.09288, 0.09612000000000001],
[0.0, 0.05168, 0.07824, 0.08895, 0.09448, 0.09825999999999999]],
"losses": {"speed": 50.0, "ef": [1.45, 1.45], "hf": [1.0, 1.0],
"styoke_hyst": [[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0]],
"stteeth_hyst": [[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0]],
"styoke_eddy": [[22.27, 80.33, 156.3, 194.8, 215.8, 230.6],
[22.27, 68.29, 153.9,
194.4, 216.4, 232.0],
[22.27, 58.94, 155.4,
198.6, 221.6, 237.4],
[22.27, 50.72, 155.8,
200.7, 223.6, 239.5],
[22.27, 45.71, 157.2,
207.0, 230.4, 245.6],
[22.27, 43.44, 161.4,
211.4, 236.4, 252.9],
[22.27, 42.82, 162.3,
212.5, 238.4, 254.3],
[22.27, 43.44, 161.4,
211.5, 236.4, 253.1],
[22.27, 45.71, 157.2,
207.0, 230.4, 245.7],
[22.27, 50.72, 155.8,
200.7, 223.7, 239.5],
[22.27, 58.94, 155.5,
198.5, 221.5, 237.4],
[22.27, 68.29, 153.8,
194.4, 216.5, 232.0],
[22.27, 80.33, 156.3, 194.7, 215.8, 230.7]],
"stteeth_eddy": [[17.97, 56.89, 100.4, 120.9, 130.8, 138.0],
[17.97, 52.94, 99.69,
120.4, 131.9, 140.1],
[17.97, 49.03, 103.1,
125.6, 138.6, 147.7],
[17.97, 43.49, 103.1,
124.9, 136.2, 144.1],
[17.97, 39.08, 98.0,
120.7, 132.5, 143.1],
[17.97, 36.62, 97.78,
124.5, 141.1, 154.3],
[17.97, 35.85, 98.37,
126.6, 145.1, 158.8],
[17.97, 36.61, 97.78,
124.5, 141.2, 154.2],
[17.97, 39.07, 98.0,
120.7, 132.5, 143.1],
[17.97, 43.48, 103.1,
124.8, 136.2, 144.0],
[17.97, 49.02, 103.1,
125.6, 138.6, 147.8],
[17.97, 52.93, 99.67,
120.4, 131.9, 140.3],
[17.97, 56.92, 100.4, 120.9, 130.8, 138.1]],
"rotor_hyst": [[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, | |
<reponame>kidinhvao/Turtle-Race-The-Game<gh_stars>0
from tkinter import *
from turtle import *
import random as rd
from winsound import *
import time as tm
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
import matplotlib.pyplot as plt
import numpy as np
def get_Width(Screen_Width):
wWidth = None
while wWidth is None:
wWidth = numinput("Width", "Input Width: ", int(Screen_Width / 2), int(Screen_Width / 4),
int(Screen_Width - 200))
return wWidth
def get_Height(Screen_Height):
wHeight = None
while wHeight is None:
wHeight = numinput("Width", "Input Width: ", int(Screen_Height / 2), int(Screen_Height / 4),
int(Screen_Height - 200))
return wHeight
def draw_track(wWidth, wHeight, Screen_Width, Screen_Height):
speed(0)
hideturtle()
penup()
goto(-Screen_Width / 2 + 150, Screen_Height / 2 - 100)
for count in range(int(wWidth / 20)):
write(count, align='center')
right(90)
forward(10)
pendown()
for count2 in range(int(wHeight / 20)):
forward(10)
penup()
forward(10)
pendown()
penup()
backward(int(wHeight / 20) * 20 + 10)
left(90)
forward(20)
def run_forward(obj, min_speed, max_speed, tnobj, name):
rd.seed()
x = rd.randint(min_speed, max_speed)
obj.forward(x)
tnobj.clear()
tnobj.forward(x)
tnobj.write(name, align="left", font=("Arial", 10, "normal"))
return x
def run_backward(obj, min_speed, max_speed, tnobj, name):
rd.seed()
x = rd.randint(min_speed, max_speed)
obj.left(180)
tnobj.clear()
obj.forward(x)
tnobj.backward(x)
tnobj.write(name, align="left", font=("Arial", 10, "normal"))
obj.left(180)
return -x
def stun(obj):
obj.left(10)
obj.right(20)
obj.left(10)
def TurSet(obj, color, x, y):
obj.color(color)
obj.shape('turtle')
obj.penup()
obj.goto(x, y)
obj.right(360)
def initialize(t1, t2, t3, t4, wHeight, Screen_Width, Screen_Height):
TurSet(t1, 'red', -(Screen_Width / 2) + 130, Screen_Height / 2 - 130)
TurSet(t2, 'blue', -(Screen_Width / 2) + 130, Screen_Height / 2 - 130 - (wHeight / 5))
TurSet(t3, 'green', -(Screen_Width / 2) + 130, Screen_Height / 2 - 130 - 2 * (wHeight / 5))
TurSet(t4, 'black', -(Screen_Width / 2) + 130, Screen_Height / 2 - 130 - 3 * (wHeight / 5))
def initialize_name(tn1, tn2, tn3, tn4, wHeight, Screen_Width, Screen_Height, name_list):
tn1.clear()
tn2.clear()
tn3.clear()
tn4.clear()
tn1.goto(-(Screen_Width / 2) + 80, Screen_Height / 2 - 130)
tn2.goto(-(Screen_Width / 2) + 80, Screen_Height / 2 - 130 - wHeight / 5)
tn3.goto(-(Screen_Width / 2) + 80, Screen_Height / 2 - 130 - 2 * wHeight / 5)
tn4.goto(-(Screen_Width / 2) + 80, Screen_Height / 2 - 130 - 3 * wHeight / 5)
tn1.write(name_list[0], align="left", font=("Arial", 10, "normal"))
tn2.write(name_list[1], align="left", font=("Arial", 10, "normal"))
tn3.write(name_list[2], align="left", font=("Arial", 10, "normal"))
tn4.write(name_list[3], align="left", font=("Arial", 10, "normal"))
def nameLegal(name):
if name is None:
return False
if len(name) > 14:
return False
if name == "":
return False
return True
def get_name(x):
name = None
while not nameLegal(name):
name = textinput("Name of turtle No." + str(x), "Please name the turtle No." + str(x) + ": ")
return name
def naming_system(wHeight, Screen_Width, Screen_Height, tn1, tn2, tn3, tn4):
name_list = ["", "", "", ""]
name_list[0] = get_name(1)
name_list[1] = get_name(2)
name_list[2] = get_name(3)
name_list[3] = get_name(4)
tn1.penup()
tn2.penup()
tn3.penup()
tn4.penup()
tn1.goto(-(Screen_Width / 2) + 80, Screen_Height / 2 - 130)
tn1.write(name_list[0], align="left", font=("Arial", 10, "normal"))
tn2.goto(-(Screen_Width / 2) + 80, Screen_Height / 2 - 130 - wHeight / 5)
tn2.write(name_list[1], align="left", font=("Arial", 10, "normal"))
tn3.goto(-(Screen_Width / 2) + 80, Screen_Height / 2 - 130 - 2 * wHeight / 5)
tn3.write(name_list[2], align="left", font=("Arial", 10, "normal"))
tn4.goto(-(Screen_Width / 2) + 80, Screen_Height / 2 - 130 - 3 * wHeight / 5)
tn4.write(name_list[3], align="left", font=("Arial", 10, "normal"))
return name_list
def winning_pose(obj, wWidth, wHeight):
PlaySound(None, SND_ASYNC)
PlaySound("Sound02.wav", SND_ASYNC + SND_LOOP)
obj.penup()
obj.goto(0,0)
obj.left(20)
obj.right(40)
obj.left(20)
obj.left(360)
obj.right(360)
obj.circle((wWidth + wHeight)/2)
obj.right(90)
obj.circle(wWidth)
obj.right(90)
obj.circle(wHeight)
obj.right(180)
def turtle_activity(obj, min_speed, max_speed, tnobj, name):
rd.seed()
RNG = rd.randint(1, 100000)
log = 0
if RNG < 90000:
log = run_forward(obj, min_speed, max_speed, tnobj, name)
elif RNG < 95000:
log = run_backward(obj, min_speed, max_speed, tnobj, name)
else:
stun(obj)
return log
def isEnd(distance_track, wWidth, start_time, time_limit):
if distance_track[0] >= wWidth and distance_track[1] >= wWidth and distance_track[2] >= wWidth and \
distance_track[3] >= wWidth:
return True
if tm.time() - start_time >= time_limit:
return True
return False
def race_system(wWidth, t1, t2, t3, t4, tn1, tn2, tn3, tn4, name_list, start_time):
pass_order = [0, 0, 0, 0, 0]
distance_track = [0, 0, 0, 0]
time_limit = int((wWidth/20) * 2 + (wWidth)/20)
flag = 0
while not isEnd(distance_track, wWidth, start_time, time_limit):
runner_list = [1, 2, 3, 4]
runner_left = 4
while runner_left > 0:
rd.seed()
runner_left -= 1
temp = rd.randint(0, runner_left)
temp = runner_list.pop(temp)
if temp == 1 and distance_track[temp - 1] < wWidth:
temp2 = turtle_activity(t1, 1, 10, tn1, name_list[0])
distance_track[0] += temp2
if distance_track[0] >= wWidth:
flag += 1
pass_order[temp - 1] = flag
if (pass_order[temp - 1] == 1) and (pass_order[4] == 0):
pass_order[4] = tm.time() - start_time
elif temp == 2 and distance_track[temp - 1] < wWidth:
temp2 = turtle_activity(t2, 1, 10, tn2, name_list[1])
distance_track[1] += temp2
if distance_track[1] >= wWidth:
flag += 1
pass_order[temp - 1] = flag
if (pass_order[temp - 1] == 1) and (pass_order[4] == 0):
pass_order[4] = tm.time() - start_time
elif temp == 3 and distance_track[temp - 1] < wWidth:
temp2 = turtle_activity(t3, 1, 10, tn3, name_list[2])
distance_track[2] += temp2
if distance_track[2] >= wWidth:
flag += 1
pass_order[temp - 1] = flag
if (pass_order[temp - 1] == 1) and (pass_order[4] == 0):
pass_order[4] = tm.time() - start_time
elif temp == 4 and distance_track[temp - 1] < wWidth:
temp2 = turtle_activity(t4, 1, 10, tn4, name_list[3])
distance_track[3] += temp2
if distance_track[3] >= wWidth:
flag += 1
pass_order[temp - 1] = flag
if (pass_order[temp - 1] == 1) and (pass_order[4] == 0):
pass_order[4] = tm.time() - start_time
for i in range(4):
if pass_order[i] == 0:
pass_order[i] = 20
return pass_order
def close_window(self):
self.destroy()
def announcer(finsishTime, winner, announce_string):
strFinsish = '{:.2f}'.format(finsishTime)
announcePopUp = Toplevel()
announcePopUp.title("Announcer")
PopFrame = Frame(announcePopUp, width=250, height=150)
PopFrame.pack_propagate(0)
PopFrame.pack()
announce = StringVar()
label = Label(PopFrame, textvariable=announce, fg="red")
announce.set(announce_string)
label.pack()
announceWinner = IntVar(value=winner)
label1 = Label(PopFrame, textvariable=announceWinner, fg="blue")
label1.pack()
announce2 = StringVar()
label2 = Label(PopFrame, textvariable=announce2, fg="red")
announce2.set("Winner's finish time: ")
label2.pack()
announceTime = StringVar()
label3 = Label(PopFrame, textvariable=announceTime, fg="blue")
announceTime.set(strFinsish)
label3.pack()
button = Button(PopFrame, text="OK", command=announcePopUp.destroy)
button.pack(side=TOP)
def get_Bet():
bet = None
while bet is None:
bet = numinput("Bet", "Please enter the No. of the one you want to bet for: ", 1, 1, 4)
return bet
def showChar(score, name_list, count, bet_won):
win_graph = Toplevel()
win_graph.title('Result graph')
button = Button(win_graph, text="OK", command=lambda: close_window(win_graph))
y_pos = np.arange(len(name_list))
fig1 = plt.figure(figsize=(6, 6))
a = fig1.add_subplot(111)
a.bar(y_pos, score, align='center', alpha=0.5)
plt.xticks(y_pos, name_list)
plt.ylabel('Total Score')
plt.xlabel('Turtle name')
plt.title('Total Score')
canvas1 = FigureCanvasTkAgg(fig1, master=win_graph)
canvas1.get_tk_widget().pack(side=RIGHT)
canvas1.draw()
fig2 = plt.figure(figsize=(6, 6))
b = fig2.add_subplot(111)
b.pie([bet_won, count - bet_won], colors=['blue', 'yellow'], shadow=True, startangle=90, autopct='%1.1f%%')
b.legend(labels=['won', 'lose'], loc="best")
b.axis('equal')
canvas2 = FigureCanvasTkAgg(fig2, master=win_graph)
canvas2.get_tk_widget().pack(side=LEFT)
canvas2.draw()
button.pack(side=BOTTOM)
def game(t1, t2, t3, t4, wWidth, wHeight, Screen_Width, Screen_Height, score, counter, bet_won, tn1, tn2, tn3, tn4,
name_list):
if counter > 1:
clear()
wWidth = get_Width(Screen_Width)
wHeight = get_Height(Screen_Height)
draw_track(wWidth, wHeight, Screen_Width, Screen_Height)
initialize_name(tn1, tn2, tn3, tn4, wHeight, Screen_Width, Screen_Height, name_list)
initialize(t1, t2, t3, t4, wHeight, Screen_Width, Screen_Height)
bet = get_Bet()
start_time = tm.time()
pass_order = race_system(wWidth, t1, t2, t3, t4, tn1, tn2, tn3, tn4, name_list, start_time)
score[0] += (wWidth / 10) / pass_order[0]
score[1] += (wWidth / 10) / pass_order[1]
score[2] += (wWidth / 10) / pass_order[2]
score[3] += (wWidth / 10) / pass_order[3]
winner = 0
for i in range(4):
if pass_order[i] == 1:
winner = i + 1
if pass_order[int(bet - 1)] == 1:
announcer(pass_order[4], winner, "You won >w<\nWinner is turtle number: ")
bet_won += 1
else:
announcer(pass_order[4], winner, "You lose :'<\nWinner is turtle number: ")
if pass_order[0] == 1:
winning_pose(t1, wWidth, wHeight)
elif pass_order[1] == 1:
winning_pose(t2, wWidth, wHeight)
elif pass_order[2] == 1:
winning_pose(t3, wWidth, wHeight)
else:
winning_pose(t4, wWidth, wHeight)
print(score)
print(counter)
print(bet_won)
showChar(score, name_list, counter, bet_won)
return bet_won
def restart(self, t1, t2, t3, t4, wWidth, wHeight, Screen_Width, Screen_Height, score, counter, bet_won, tn1, tn2, tn3,
tn4, name_list):
self.destroy()
PlaySound(None, SND_ASYNC)
PlaySound("Sound01.wav", SND_ASYNC + SND_LOOP)
bet_won = game(t1, t2, t3, t4, wWidth, wHeight, Screen_Width, Screen_Height, score, counter, bet_won, tn1, tn2, tn3,
tn4, name_list)
RestartPopUp = Toplevel()
RestartPopUp.title("Restart ?")
root1Frame = Frame(RestartPopUp, width=270, height=50)
root1Frame.pack_propagate(0)
root1Frame.pack()
restart_button = Button(root1Frame, text="Restart", fg="black",
command=lambda: restart(RestartPopUp, t1, t2, t3, t4, wWidth, wHeight, Screen_Width,
Screen_Height, score, counter + 1, bet_won, tn1, tn2, tn3, tn4,
name_list))
restart_button.pack(side=TOP)
quit_button = Button(root1Frame, text="Quit", fg="red", command=quit)
quit_button.pack(side=TOP)
def Start(self):
PlaySound("Sound01.wav", | |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# pylint: disable=W0212
# pylint: disable=R0913
# pylint: disable=W0233
# pylint: disable=W0231
"""
Tools for accessing and managing ceph configuration content.
"""
import os
import uuid
import hashlib
import ConfigParser
from cStringIO import StringIO
from vsm import utils
from vsm import exception
from vsm import context as vsm_context
from vsm import manager
from vsm import flags
from vsm import db
from vsm.agent import rpcapi as agent_rpc
from vsm.openstack.common import log as logging
LOG = logging.getLogger(__name__)
FLAGS = flags.FLAGS
class ClusterIdAccessor:
"""
Read and cache the cluster id from /opt/stack/data/vsm/cluster_id the first
time get_cluster_id() is called; thereafter, retrieve the cached value.
"""
_cluster_id = None
def __init__(self):
pass
def get_cluster_id(self):
"""
Cache and return the cluster id. If the local copy is not yet set, read it from the
cluster_id file, and then cache and return it, else just return the cached copy.
:return: the cluster id found in /opt/stack/data/vsm/cluster_id.
"""
if not self._cluster_id:
cluster_id_file = os.path.join(FLAGS.state_path, 'cluster_id')
if os.path.exists(cluster_id_file):
self._cluster_id = utils.read_file_as_root(cluster_id_file).strip()
return self._cluster_id
class ConfigInfo:
"""
Base class for specific types of config info. Provides the fields and the getters for these fields.
"""
_content = ""
_md5sum = ""
_luts = 0
def __init__(self):
pass
def get_content(self):
return self._content
def get_md5sum(self):
return self._md5sum
def get_luts(self):
return self._luts
class DBConfigInfo(ConfigInfo):
"""
A subclass of ConfigInfo that works specifically for accessing data and metadata of the database copy of
ceph configuration data.
"""
def __init__(self, accessor, context):
"""
Read the db cluster:ceph_conf, cluster:ceph_conf_md5, and cluster:ceph_conf_luts. If the cluster_id
cannot be obtained, or if these db fields are not yet populated, just return an empty string for the
contents and the md5 and zero for the timestamp, but don't throw an exception. Since content is stripped
before being written to the db, there's no need to strip content on the way out here.
:param accessor: the cluster id accessor to use to access the correct row in the cluster table.
:param context: the database access context - for rights.
"""
ConfigInfo.__init__(self)
cluster_id = accessor.get_cluster_id()
if cluster_id:
self._content = db.cluster_get_ceph_conf(context, cluster_id)
dbmeta = db.cluster_get_ceph_conf_metadata(context, cluster_id)
if 'ceph_conf_md5sum' in dbmeta:
self._md5sum = dbmeta['ceph_conf_md5sum']
if 'ceph_conf_luts' in dbmeta:
self._luts = dbmeta['ceph_conf_luts']
class FileConfigInfo(ConfigInfo):
"""
A subclass of ConfigInfo that works specifically for accessing data and metadata of a file system copy of
ceph configuration data.
"""
def __init__(self, fp, sync):
"""
Read the file into a memory buffer. If sync is True, also read the file's meta data. If the file
doesn't exist just return an empty string for the file contents, but don't throw an exception.
:param fp: the file path to read from. File content is stripped as it's read in so that md5 check sums
for empty files will match that of empty content from the database.
:param sync: if true, read the file last write timestamp and generate an md5 checksum on the data.
"""
ConfigInfo.__init__(self)
try:
self._content = utils.read_file_as_root(fp).strip()
if sync:
statbuf = os.stat(fp)
self._luts = int(statbuf.st_mtime)
self._md5sum = hashlib.md5(self._content).hexdigest()
except (exception.FileNotFound, os.error):
pass
class CephConfigSynchronizer:
"""
Provides functionality to synchronize the latest updates from either the VSM database or a cluster node's ceph
configuration file (/etc/ceph/ceph.conf). The algorithm used for synchronizing master ceph configuration files
is as follows:
When CephConfigSynchronizer().sync_before_read() is called (with 'sync' == True [default]) the system compares
md5 check sums of the DB copy and the local file system copy. If they're different, the system checks the last
update timestamp (luts) of each of these copies to see which one is newer. If the DB copy is newer, it's written
to the file system. If the file system copy is newer, it's written to the DB and the other agents are notified.
"""
_cluster_id_accessor = ClusterIdAccessor()
_context = vsm_context.get_admin_context()
_host = FLAGS.host
def __init__(self):
pass
def _write_ceph_conf_to_db(self, content):
"""
Write specified content to db. 'content' is stripped before write so empty content will match an empty file's
md5 check sum.
:param content: the ceph configuration file content to be written to the database
"""
cluster_id = self._cluster_id_accessor.get_cluster_id()
if not cluster_id:
LOG.debug('Can not get cluster_id; unable to save ceph.conf to db')
return
db.cluster_update_ceph_conf(self._context, cluster_id, content.strip())
def _request_all_remote_agents_update_ceph_conf_from_db(self):
"""
Send a message to all remote agents to perform a sync between their /etc/ceph/ceph.conf and the db ceph conf.
"""
server_list = db.init_node_get_all(self._context)
for ser in server_list:
if ser['host'] != self._host:
LOG.debug("notifying %s to sync with db" % ser['host'])
agent_rpc.AgentAPI().update_ceph_conf(self._context, ser['host'])
def sync_before_read(self, cfgfile, sync=True):
"""
Check DB cluster:ceph_conf against fp if sync is True. If checksums are different or (only) one of them does
not exist, compare timestamps. Timestamp of missing entity is always considered older than the existing entity.
If DB is newer, write DB to file. If file is newer, sync DB from file and signal agents to sync with DB. Parse
file if it exists.
:param cfgfile: the file path from which to parse config data (ok for file to not exist).
:param sync: sync if True, otherwise just parse specified file if exists.
:return: The latest config content - from db or file (if sync==False, always from file)
"""
fpinfo = FileConfigInfo(cfgfile, sync)
latest_content = fpinfo.get_content()
LOG.debug("fpinfo: %.30s, %s, %d" % (fpinfo.get_content(), fpinfo.get_md5sum(), fpinfo.get_luts()))
if sync:
dbinfo = DBConfigInfo(self._cluster_id_accessor, self._context)
LOG.debug("dbinfo: %.30s, %s, %d" % (dbinfo.get_content(), dbinfo.get_md5sum(), dbinfo.get_luts()))
if fpinfo.get_md5sum() != dbinfo.get_md5sum():
LOG.debug("md5sums different, checking last update timestamp")
if fpinfo.get_luts() > dbinfo.get_luts():
LOG.debug("file timestamp greater than db timestamp; writing file to db and notifying agents")
self._write_ceph_conf_to_db(latest_content)
self._request_all_remote_agents_update_ceph_conf_from_db()
else:
LOG.debug("db timestamp greater than file timestamp; writing db to file")
latest_content = dbinfo.get_content() + '\n'
utils.write_file_as_root(cfgfile, latest_content, "w")
return latest_content
def sync_after_write(self, content):
"""
Write 'content' to DB then notify all agents to sync now.
:param content: the ceph configuration data to be sent to the DB and pulled by all nodes.
"""
LOG.debug("updating db: %.30s" % content)
self._write_ceph_conf_to_db(content)
self._request_all_remote_agents_update_ceph_conf_from_db()
class CephConfigParser(manager.Manager):
"""
Wrap and extend an instance of python config parser to manage configuration data parsed from a ceph
configuration file (normally found in /etc/ceph/$cluster.conf - where $cluster is often 'ceph').
"""
def _load_ceph_conf_from_dict(self, dict_cfg):
"""
Load ceph configuration parameters from a section:options dictionary of dictionaries.
:param dict_cfg: {section: {option:value, option:value}, section...}
:return: None
"""
try:
for section, options in dict_cfg.iteritems():
self._parser.add_section(section)
for option, value in options.iteritems():
self._parser.set(section, option, value)
except:
raise TypeError("dict_cfg must be a dict of dicts - {section:{option:value,...},...}")
def __init__(self, fp=None, sync=True, *args, **kwargs):
"""
Build a python ConfigParser capable of properly parsing a ceph configuration file. Primarily, this means
replacing the optionxform function with one that treats option names with underscores the same as those
without, and in a case-insensitive manner.
If the file path (fp) is not empty and it's either a string or unicode string then attempt to read the
contents of that file into the parser (though the CephConfigSynchronizer's sync_before_read method to give
this file a chance to be synchronized into the database if it's newer than the db version).
If the file path is a dictionary, attempt to parse it as a dictionary representation of configuration file
content, e.g., {section:{option:value,...},...}.
:param fp: an optional file path or configuration dictionary
:param sync: sync with db before reading the file path above if true
:param args: not used
:param kwargs: not used
"""
super(CephConfigParser, self).__init__(*args, **kwargs)
self._parser = ConfigParser.ConfigParser()
self._parser.optionxform = lambda optname: optname.lower().replace(' ', '_')
if fp is not None:
if isinstance(fp, | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2011 <NAME> (<EMAIL>)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Combine the power of grasp sets and randomized planners to get any robot arm picking up objects from a table and putting them in a dish rack.
.. examplepre-block:: graspplanning
Description
-----------
The example uses the powerful TaskManipulation problem interface, which takes advantage of many OpenRAVE features. It performs:
* Pick grasps and validate them with the grasper planner
* Move to the appropriate grasp preshape while avoiding obstacles
* Use an RRT and Jacobian-based gradient descent methods to safely move close to an obstacle
* Use CloseFingers to grasp the object while checking for collisions with other unwanted objects
* Use body grabbing to grasp the object and move it to its destination
* Lower the object until collision and then release and move away from it.
The scene is randomized every run in order to show the powerful of the planners.
.. figure:: ../../images/examples/graspplanning_gallery.jpg
:width: 640
Gallery of runs.
Destinations
============
By default, the grasp planner will choose a grasp that is also valid at a destination point. If
running on custom environments, sometimes it is interesting to see if the robot can just grasp the
object, without moving it to the destination. To test planning without destinations use:
.. code-block:: bash
openrave.py --example graspplanning --nodestinations
5D IK Grasp Planning
====================
It is possible to perform grasp planning with 5D IK.
Neuronics Katana
~~~~~~~~~~~~~~~~
First create the grasp set with:
.. code-block:: bash
openrave.py --database grasping --robot=robots/neuronics-katana.zae --manipname=arm --target=data/box_frootloops.kinbody.xml --manipulatordirection="0 1 0"
Then execute:
.. code-block:: bash
openrave.py --example graspplanning --scene=data/katanatable.env.xml
.. figure:: ../../images/examples/graspplanning_katana.jpg
:width: 640
Kuka Youbot
~~~~~~~~~~~
Grasp set generation:
.. code-block:: bash
openrave.py --database grasping --robot=robots/kuka-youbot.zae --manipulatordirection="0 1 0" --target=data/thinbox.kinbody.xml
Then execute:
.. code-block:: bash
openrave.py --example graspplanning --scene=data/youbot1.env.xml
.. examplepost-block:: graspplanning
"""
from __future__ import with_statement # for python 2.5
__author__ = '<NAME>'
import time
from itertools import izip
import openravepy
if not __openravepy_build_doc__:
from openravepy import *
from numpy import *
try:
from multiprocessing import cpu_count
except:
def cpu_count(): return 1
class GraspPlanning(openravepy.metaclass.AutoReloader):
def __init__(self,robot,randomize=True,dests=None,nodestinations=False,switchpatterns=None,plannername=None,minimumgoalpaths=1):
self.envreal = robot.GetEnv()
self.robot = robot
self.plannername=plannername
self.nodestinations = nodestinations
self.minimumgoalpaths=minimumgoalpaths
try:
self.ikmodel = databases.inversekinematics.InverseKinematicsModel(robot=robot,iktype=IkParameterization.Type.Transform6D)
if not self.ikmodel.load():
self.ikmodel.autogenerate()
except ValueError:
print '6D IK failed, trying 5D IK'
self.ikmodel = databases.inversekinematics.InverseKinematicsModel(robot=robot,iktype=IkParameterization.Type.TranslationDirection5D)
if not self.ikmodel.load():
self.ikmodel.autogenerate()
self.lmodel = databases.linkstatistics.LinkStatisticsModel(self.robot)
if not self.lmodel.load():
self.lmodel.autogenerate()
self.lmodel.setRobotWeights()
self.lmodel.setRobotResolutions(xyzdelta=0.005)
print 'robot resolutions: ',robot.GetDOFResolutions()
print 'robot weights: ',robot.GetDOFWeights()
# could possibly affect generated grasp sets?
# self.cdmodel = databases.convexdecomposition.ConvexDecompositionModel(self.robot)
# if not self.cdmodel.load():
# self.cdmodel.autogenerate()
self.switchpatterns = switchpatterns
with self.envreal:
self.basemanip = interfaces.BaseManipulation(self.robot,plannername=plannername)
self.basemanip.prob.SendCommand('SetMinimumGoalPaths %d'%self.minimumgoalpaths)
self.taskmanip = None
self.updir = array((0,0,1))
# find all the bodies to manipulate
self.graspables = self.getGraspables(dests=dests)
if len(self.graspables) == 0:
print 'attempting to auto-generate a grasp table'
targets=[t for t in self.envreal.GetBodies() if t.GetName().find('mug')>=0 or t.GetName().find('target')>=0]
if len(targets) > 0:
gmodel = databases.grasping.GraspingModel(robot=self.robot,target=targets[0])
if not gmodel.load():
gmodel.numthreads = cpu_count()
gmodel.autogenerate()
self.graspables = self.getGraspables(dests=dests)
self.randomize=randomize
if self.randomize:
self.randomizeObjects()
if dests is None and not self.nodestinations:
tablename = 'table'
table = self.envreal.GetKinBody(tablename)
if table is not None:
alltargets = [graspable[0].target for graspable in self.graspables]
for target in alltargets:
target.Enable(False)
try:
needdests_graspables = [graspable for graspable in self.graspables if graspable[1] is None]
curdests = [graspable[0].target.GetTransform() for graspable in needdests_graspables]
alldests = self.setRandomDestinations([graspable[0].target for graspable in needdests_graspables],table)
for graspable,dests in izip(needdests_graspables,alldests):
graspable[1] = dests+curdests
finally:
for target in alltargets:
target.Enable(True)
else:
print 'could not find %s'%tablename
def getGraspables(self,dests=None):
graspables = []
print 'searching for graspable objects (robot=%s)...'%(self.robot.GetRobotStructureHash())
for target in self.envreal.GetBodies():
if not target.IsRobot():
gmodel = databases.grasping.GraspingModel(robot=self.robot,target=target)
if gmodel.load():
print '%s is graspable'%target.GetName()
graspables.append([gmodel,dests])
return graspables
def GetGraspable(self,name):
for graspable in self.graspables:
if graspable[0].target.GetName() == name:
return graspable
return None
def randomizeObjects(self):
for graspable in self.graspables:
target = graspable[0].target
Tbody = target.GetTransform()
for iter in range(5):
Tnew = array(Tbody)
Tnew[0,3] += -0.1 + 0.2 * random.rand()
Tnew[1,3] += -0.1 + 0.2 * random.rand()
target.SetTransform(Tnew)
if not self.envreal.CheckCollision(target):
Tbody = Tnew
break
target.SetTransform(Tbody)
# randomize the robot
Trobot = self.robot.GetTransform()
for iter in range(5):
Tnew = array(Trobot)
Tnew[0,3] += -0.1 + 0.2 * random.rand()
Tnew[1,3] += -0.1 + 0.2 * random.rand()
self.robot.SetTransform(Tnew)
if not self.envreal.CheckCollision(self.robot):
Trobot = Tnew
break
self.robot.SetTransform(Trobot)
@staticmethod
def setRandomDestinations(targets, table,transdelta=0.1,zoffset=0.01,Trolls=None,randomize=False,preserverotation=True):
with table.GetEnv():
print 'searching for destinations on %s...'%table.GetName()
Ttable = table.GetTransform()
table.SetTransform(eye(4))
ab = table.ComputeAABB()
table.SetTransform(Ttable)
p = ab.pos()
e = ab.extents()
Nx = floor(2*e[0]/transdelta)
Ny = floor(2*e[1]/transdelta)
X = []
Y = []
if randomize:
for x in arange(Nx):
X = r_[X, random.rand(Ny)*0.5/(Nx+1) + (x+1)/(Nx+1)]
Y = r_[Y, random.rand(Ny)*0.5/(Ny+1) + arange(0.5,Ny,1.0)/(Ny+1)]
else:
for x in arange(Nx):
X = r_[X, tile((x+1)/(Nx+1),Ny)]
Y = r_[Y, arange(0.5,Ny,1.0)/(Ny+1)]
translations = c_[p[0]-e[0]+2*e[0]*X,p[1]-e[1]+2*e[1]*Y,tile(p[2]+e[2]+zoffset,len(X))]
if Trolls is None:
Trolls = [matrixFromAxisAngle(array((0,0,1)),roll) for roll in arange(0,2*pi,pi/2)] + [matrixFromAxisAngle(array((1,0,0)),roll) for roll in [pi/2,pi,1.5*pi]]
for target in targets:
target.Enable(False)
try:
alldests = []
for target in targets:
Torg = eye(4)
if preserverotation:
Torg[0:3,0:3] = target.GetTransform()[0:3,0:3]
with target.CreateKinBodyStateSaver():
target.Enable(True)
dests = []
for translation in translations:
for Troll in Trolls:
Troll = array(Troll)
Troll[0:3,3] = translation
target.SetTransform(dot(Ttable, dot(Troll, Torg)))
if not table.GetEnv().CheckCollision(target):
dests.append(target.GetTransform())
alldests.append(dests)
return alldests
finally:
for target in targets:
target.Enable(True)
def viewDestinations(self,gmodel,Tdests,delay=0.5):
with gmodel.target:
for i,T in enumerate(Tdests):
print 'target %s dest %d/%d'%(gmodel.target.GetName(),i,len(Tdests))
gmodel.target.SetTransform(T)
validgrasps, indices = gmodel.computeValidGrasps(returnnum=1)
gmodel.target.GetEnv().UpdatePublishedBodies()
gmodel.showgrasp(validgrasps[0],useik=True,collisionfree=True,delay=delay)
def waitrobot(self,robot=None):
"""busy wait for robot completion"""
if robot is None:
robot = self.robot
while not robot.GetController().IsDone():
time.sleep(0.01)
def graspAndPlaceObject(self,gmodel,dests,waitforkey=False,movehanddown=True,**kwargs):
"""grasps an object and places it in one of the destinations. If no destination is specified, will just grasp it"""
env = self.envreal#.CloneSelf(CloningOptions.Bodies)
robot = self.robot
with env:
self.taskmanip = interfaces.TaskManipulation(self.robot,graspername=gmodel.grasper.plannername,plannername=self.plannername)
self.taskmanip.prob.SendCommand('SetMinimumGoalPaths %d'%self.minimumgoalpaths)
if self.switchpatterns is not None:
self.taskmanip.SwitchModels(switchpatterns=self.switchpatterns)
robot.SetActiveManipulator(gmodel.manip)
robot.SetActiveDOFs(gmodel.manip.GetArmIndices())
istartgrasp = 0
approachoffset = 0.02 if self.ikmodel.iktype == IkParameterization.Type.Transform6D else 0.0
target = gmodel.target
stepsize = 0.001
while istartgrasp < len(gmodel.grasps):
goals,graspindex,searchtime,trajdata = self.taskmanip.GraspPlanning(gmodel=gmodel,grasps=gmodel.grasps[istartgrasp:], approachoffset=approachoffset,destposes=dests, seedgrasps = 3,seeddests=8,seedik=1,maxiter=1000, randomgrasps=self.randomize,randomdests=self.randomize)
istartgrasp = graspindex+1
grasp = gmodel.grasps[graspindex]
Tglobalgrasp = gmodel.getGlobalGraspTransform(grasp,collisionfree=True)
self.waitrobot(robot)
print 'grasp %d initial planning time: %f'%(graspindex,searchtime)
if approachoffset != 0:
print 'moving hand'
expectedsteps = floor(approachoffset/stepsize)
try:
# should not allow any error since destination goal depends on accurate relative placement
# of the gripper with respect to the object
with gmodel.target:
print 'current robot', repr(robot.GetDOFValues())
print 'global direction',repr(dot(gmodel.manip.GetTransform()[0:3,0:3],gmodel.manip.GetDirection())), gmodel.getGlobalApproachDir(grasp)
print 'local direction',grasp[gmodel.graspindices.get('igraspdir')]
gmodel.target.Enable(False)
res = self.basemanip.MoveHandStraight(direction=gmodel.getGlobalApproachDir(grasp), ignorefirstcollision=0,stepsize=stepsize,minsteps=expectedsteps,maxsteps=expectedsteps)
except planning_error:
print 'use a planner to move the rest of the way'
try:
self.basemanip.MoveToHandPosition(matrices=[Tglobalgrasp],maxiter=1000,maxtries=1,seedik=4)
except planning_error,e:
print 'failed to reach grasp',e
continue
self.waitrobot(robot)
self.taskmanip.CloseFingers(translationstepmult=gmodel.translationstepmult,finestep=gmodel.finestep)
self.waitrobot(robot)
with env:
robot.Grab(target)
if waitforkey:
raw_input('press any key to continue grasp')
success = graspindex
if movehanddown:
try:
print 'move hand up'
self.basemanip.MoveHandStraight(direction=self.updir,stepsize=0.003,minsteps=1,maxsteps=60)
except:
print 'failed to move hand up'
self.waitrobot(robot)
if len(goals) > 0:
print 'planning to destination'
try:
self.basemanip.MoveToHandPosition(ikparams=goals,maxiter=2000,maxtries=2,seedik=8)
self.waitrobot(robot)
except planning_error,e:
print 'failed to reach a goal, trying to move goal a little up',e
if goals[0].GetType() == IkParameterizationType.Transform6D:
Tgoal = goals[0].GetTransform6D()
Tgoal[0:3,3] += self.updir*0.015
try:
self.basemanip.MoveToHandPosition(matrices=[Tgoal],maxiter=3000,maxtries=2,seedik=8)
self.waitrobot(robot)
self.basemanip.MoveToHandPosition(ikparams=goals,maxiter=2000,maxtries=2,seedik=8)
self.waitrobot(robot)
except planning_error,e:
print e
success = -1
if movehanddown:
print 'moving hand down'
try:
res = self.basemanip.MoveHandStraight(direction=-self.updir,stepsize=0.003,minsteps=1,maxsteps=100)
except:
print 'failed to move hand down'
self.waitrobot(robot)
try:
res = self.taskmanip.ReleaseFingers(target=target,translationstepmult=gmodel.translationstepmult,finestep=gmodel.finestep)
except planning_error:
res = None
if res is None:
print 'problems releasing, releasing target first'
with env:
robot.ReleaseAllGrabbed()
try:
res = self.taskmanip.ReleaseFingers(target=target,translationstepmult=gmodel.translationstepmult,finestep=gmodel.finestep)
except planning_error:
res = None
if res is None:
print 'forcing fingers'
with env:
robot.SetDOFValues(gmodel.grasps[graspindex][gmodel.graspindices['igrasppreshape']],gmodel.manip.GetGripperIndices())
self.waitrobot(robot)
with env:
robot.ReleaseAllGrabbed()
with CollisionOptionsStateSaver(env.GetCollisionChecker(),CollisionOptions.ActiveDOFs):
if env.CheckCollision(robot):
print 'robot in collision, moving back a little'
try:
self.basemanip.MoveHandStraight(direction=-dot(gmodel.manip.GetTransform()[0:3,0:3],gmodel.manip.GetDirection()), stepsize=stepsize,minsteps=1,maxsteps=10)
self.waitrobot(robot)
except planning_error,e:
pass
if env.CheckCollision(robot):
try:
self.taskmanip.ReleaseFingers(target=target,translationstepmult=gmodel.translationstepmult,finestep=gmodel.finestep)
except planning_error:
res = None
#raise ValueError('robot still in collision?')
if success >= 0:
return success # return successful grasp index
# exhausted all grasps
return -1
def performGraspPlanning(self,withreplacement=True,**kwargs):
print 'starting to pick and place random objects'
graspables = self.graspables[:]
failures = 0
while True:
| |
:const:`pypet.pypetconstants.SINGLE_RUN` ('SINGLE_RUN')
:param stuff_to_store: The trajectory
:param store_data: How to store data see above
:param store_final: If final meta info should be stored
* :const:`pypet.pypetconstants.LEAF`
Stores a parameter or result
Note that everything that is supported by the storage service and that is
stored to disk will be perfectly recovered.
For instance, you store a tuple of numpy 32 bit integers, you will get a tuple
of numpy 32 bit integers after loading independent of the platform!
:param stuff_to_sore: Result or parameter to store
In order to determine what to store, the function '_store' of the parameter or
result is called. This function returns a dictionary with name keys and data to
store as values. In order to determine how to store the data, the storage flags
are considered, see below.
The function '_store' has to return a dictionary containing values only from
the following objects:
* python natives (int, long, str, bool, float, complex),
*
numpy natives, arrays and matrices of type np.int8-64, np.uint8-64,
np.float32-64, np.complex, np.str
*
python lists and tuples of the previous types
(python natives + numpy natives and arrays)
Lists and tuples are not allowed to be nested and must be
homogeneous, i.e. only contain data of one particular type.
Only integers, or only floats, etc.
*
python dictionaries of the previous types (not nested!), data can be
heterogeneous, keys must be strings. For example, one key-value-pair
of string and int and one key-value pair of string and float, and so
on.
* pandas DataFrames_
* :class:`~pypet.parameter.ObjectTable`
.. _DataFrames: http://pandas.pydata.org/pandas-docs/dev/dsintro.html#dataframe
The keys from the '_store' dictionaries determine how the data will be named
in the hdf5 file.
:param store_data:
How to store the data, see above for a descitpion.
:param store_flags: Flags describing how to store data.
:const:`~pypet.HDF5StorageService.ARRAY` ('ARRAY')
Store stuff as array
:const:`~pypet.HDF5StorageService.CARRAY` ('CARRAY')
Store stuff as carray
:const:`~pypet.HDF5StorageService.TABLE` ('TABLE')
Store stuff as pytable
:const:`~pypet.HDF5StorageService.DICT` ('DICT')
Store stuff as pytable but reconstructs it later as dictionary
on loading
:const:`~pypet.HDF%StorageService.FRAME` ('FRAME')
Store stuff as pandas data frame
Storage flags can also be provided by the parameters and results themselves
if they implement a function '_store_flags' that returns a dictionary
with the names of the data to store as keys and the flags as values.
If no storage flags are provided, they are automatically inferred from the
data. See :const:`pypet.HDF5StorageService.TYPE_FLAG_MAPPING` for the mapping
from type to flag.
:param overwrite:
Can be used if parts of a leaf should be replaced. Either a list of
HDF5 names or `True` if this should account for all.
* :const:`pypet.pypetconstants.DELETE` ('DELETE')
Removes an item from disk. Empty group nodes, results and non-explored
parameters can be removed.
:param stuff_to_store: The item to be removed.
:param delete_only:
Potential list of parts of a leaf node that should be deleted.
:param remove_from_item:
If `delete_only` is used, whether deleted nodes should also be erased
from the leaf nodes themseleves.
:param recursive:
If you want to delete a group node you can recursively delete all its
children.
* :const:`pypet.pypetconstants.GROUP` ('GROUP')
:param stuff_to_store: The group to store
:param store_data: How to store data
:param recursive: To recursively load everything below.
:param max_depth:
Maximum depth in case of recursion. `None` for no limit.
* :const:`pypet.pypetconstants.TREE`
Stores a single node or a full subtree
:param stuff_to_store: Node to store
:param store_data: How to store data
:param recursive: Whether to store recursively the whole sub-tree
:param max_depth:
Maximum depth in case of recursion. `None` for no limit.
* :const:`pypet.pypetconstants.DELETE_LINK`
Deletes a link from hard drive
:param name: The full colon separated name of the link
* :const:`pypet.pypetconstants.LIST`
.. _store-lists:
Stores several items at once
:param stuff_to_store:
Iterable whose items are to be stored. Iterable must contain tuples,
for example `[(msg1,item1,arg1,kwargs1),(msg2,item2,arg2,kwargs2),...]`
* :const:`pypet.pypetconstants.ACCESS_DATA`
Requests and manipulates data within the storage.
Storage must be open.
:param stuff_to_store:
A colon separated name to the data path
:param item_name:
The name of the data item to interact with
:param request:
A functional request in form of a string
:param args:
Positional arguments passed to the reques
:param kwargs:
Keyword arguments passed to the request
* :const:`pypet.pypetconstants.OPEN_FILE`
Opens the HDF5 file and keeps it open
:param stuff_to_store: ``None``
* :const:`pypet.pypetconstants.CLOSE_FILE`
Closes an HDF5 file that was kept open, must be open before.
:param stuff_to_store: ``None``
* :const:`pypet.pypetconstants.FLUSH`
Flushes an open file, must be open before.
:param stuff_to_store: ``None``
:raises: NoSuchServiceError if message or data is not understood
"""
opened = True
try:
opened = self._srvc_opening_routine('a', msg, kwargs)
if msg == pypetconstants.MERGE:
self._trj_merge_trajectories(*args, **kwargs)
elif msg == pypetconstants.BACKUP:
self._trj_backup_trajectory(stuff_to_store, *args, **kwargs)
elif msg == pypetconstants.PREPARE_MERGE:
self._trj_prepare_merge(stuff_to_store, *args, **kwargs)
elif msg == pypetconstants.TRAJECTORY:
self._trj_store_trajectory(stuff_to_store, *args, **kwargs)
elif msg == pypetconstants.SINGLE_RUN:
self._srn_store_single_run(stuff_to_store, *args, **kwargs)
elif msg in pypetconstants.LEAF:
self._prm_store_parameter_or_result(stuff_to_store, *args, **kwargs)
elif msg == pypetconstants.DELETE:
self._all_delete_parameter_or_result_or_group(stuff_to_store, *args, **kwargs)
elif msg == pypetconstants.GROUP:
self._grp_store_group(stuff_to_store, *args, **kwargs)
elif msg == pypetconstants.TREE:
self._tree_store_sub_branch(stuff_to_store, *args, **kwargs)
elif msg == pypetconstants.DELETE_LINK:
self._lnk_delete_link(stuff_to_store, *args, **kwargs)
elif msg == pypetconstants.LIST:
self._srvc_store_several_items(stuff_to_store, *args, **kwargs)
elif msg == pypetconstants.ACCESS_DATA:
return self._hdf5_interact_with_data(stuff_to_store, *args, **kwargs)
elif msg == pypetconstants.OPEN_FILE:
opened = False # Wee need to keep the file open to allow later interaction
self._keep_open = True
self._node_processing_timer.active = False # This might be open quite long
# so we don't want to display horribly long opening times
elif msg == pypetconstants.CLOSE_FILE:
opened = True # Simply conduct the closing routine afterwards
self._keep_open = False
elif msg == pypetconstants.FLUSH:
self._hdf5file.flush()
else:
raise pex.NoSuchServiceError('I do not know how to handle `%s`' % msg)
except:
self._logger.error('Failed storing `%s`' % str(stuff_to_store))
raise
finally:
self._srvc_closing_routine(opened)
def _srvc_load_several_items(self, iterable, *args, **kwargs):
"""Loads several items from an iterable
Iterables are supposed to be of a format like `[(msg, item, args, kwarg),...]`
If `args` and `kwargs` are not part of a tuple, they are taken from the
current `args` and `kwargs` provided to this function.
"""
for input_tuple in iterable:
msg = input_tuple[0]
item = input_tuple[1]
if len(input_tuple) > 2:
args = input_tuple[2]
if len(input_tuple) > 3:
kwargs = input_tuple[3]
if len(input_tuple) > 4:
raise RuntimeError('You shall not pass!')
self.load(msg, item, *args, **kwargs)
def _srvc_check_hdf_properties(self, traj):
"""Reads out the properties for storing new data into the hdf5file
:param traj:
The trajectory
"""
for attr_name in HDF5StorageService.ATTR_LIST:
try:
config = traj.f_get('config.hdf5.' + attr_name).f_get()
setattr(self, attr_name, config)
except AttributeError:
self._logger.debug('Could not find `%s` in traj config, '
'using (default) value `%s`.' %
(attr_name, str(getattr(self, attr_name))))
for attr_name, table_name in HDF5StorageService.NAME_TABLE_MAPPING.items():
try:
if table_name in ('parameters', 'config'):
table_name += '_overview'
config = traj.f_get('config.hdf5.overview.' + table_name).f_get()
setattr(self, attr_name, config)
except AttributeError:
self._logger.debug('Could not find `%s` in traj config, '
'using (default) value `%s`.' %
(table_name, str(getattr(self, attr_name))))
for attr_name, name in HDF5StorageService.PR_ATTR_NAME_MAPPING.items():
try:
config = traj.f_get('config.hdf5.' + name).f_get()
setattr(self, attr_name, config)
except AttributeError:
self._logger.debug('Could not find `%s` in traj config, '
'using (default) value `%s`.' %
(name, str(getattr(self, attr_name))))
if ((not self._overview_results_summary or
not self._overview_derived_parameters_summary) and
self._purge_duplicate_comments):
raise RuntimeError('You chose to purge duplicate comments but disabled a summary '
'table. You can only use the purging if you enable '
'the summary tables.')
self._filters = None
def _srvc_store_several_items(self, iterable, *args, **kwargs):
"""Stores several items from an iterable
Iterables are supposed to be of a format like `[(msg, item, args, kwarg),...]`
If `args` and `kwargs` are not part of a tuple, they are taken from the
current `args` and `kwargs` provided to this function.
"""
for input_tuple in iterable:
msg = input_tuple[0]
item = input_tuple[1]
if len(input_tuple) > 2:
args = input_tuple[2]
if len(input_tuple) > 3:
kwargs = input_tuple[3]
if len(input_tuple) > 4:
raise RuntimeError('You shall not pass!')
self.store(msg, item, *args, **kwargs)
def _srvc_opening_routine(self, mode, msg=None, kwargs=()):
"""Opens an hdf5 file for reading or writing
The file is only opened if it has not been opened before (i.e. `self._hdf5file is None`).
:param mode:
'a' for appending
'r' for reading
Unfortunately, pandas currently does not work with read-only mode.
Thus, if | |
The host name of the database.
"""
return pulumi.get(self, "hostname")
@hostname.setter
def hostname(self, value: pulumi.Input[str]):
pulumi.set(self, "hostname", value)
@property
@pulumi.getter
def port(self) -> pulumi.Input[int]:
"""
(Updatable) The port used to connect to the database.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: pulumi.Input[int]):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def protocol(self) -> pulumi.Input[str]:
"""
(Updatable) The protocol used to connect to the database.
"""
return pulumi.get(self, "protocol")
@protocol.setter
def protocol(self, value: pulumi.Input[str]):
pulumi.set(self, "protocol", value)
@property
@pulumi.getter
def service(self) -> pulumi.Input[str]:
"""
(Updatable) The name of the service alias used to connect to the database.
"""
return pulumi.get(self, "service")
@service.setter
def service(self, value: pulumi.Input[str]):
pulumi.set(self, "service", value)
@pulumi.input_type
class ExternalNonContainerDatabaseDatabaseManagementConfigArgs:
def __init__(__self__, *,
database_management_connection_id: Optional[pulumi.Input[str]] = None,
database_management_status: Optional[pulumi.Input[str]] = None,
license_model: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] database_management_connection_id: The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the [external database connector](https://docs.cloud.oracle.com/iaas/api/#/en/database/latest/datatypes/CreateExternalDatabaseConnectorDetails).
:param pulumi.Input[str] database_management_status: The status of the Database Management service.
:param pulumi.Input[str] license_model: The Oracle license model that applies to the external database.
"""
if database_management_connection_id is not None:
pulumi.set(__self__, "database_management_connection_id", database_management_connection_id)
if database_management_status is not None:
pulumi.set(__self__, "database_management_status", database_management_status)
if license_model is not None:
pulumi.set(__self__, "license_model", license_model)
@property
@pulumi.getter(name="databaseManagementConnectionId")
def database_management_connection_id(self) -> Optional[pulumi.Input[str]]:
"""
The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the [external database connector](https://docs.cloud.oracle.com/iaas/api/#/en/database/latest/datatypes/CreateExternalDatabaseConnectorDetails).
"""
return pulumi.get(self, "database_management_connection_id")
@database_management_connection_id.setter
def database_management_connection_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "database_management_connection_id", value)
@property
@pulumi.getter(name="databaseManagementStatus")
def database_management_status(self) -> Optional[pulumi.Input[str]]:
"""
The status of the Database Management service.
"""
return pulumi.get(self, "database_management_status")
@database_management_status.setter
def database_management_status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "database_management_status", value)
@property
@pulumi.getter(name="licenseModel")
def license_model(self) -> Optional[pulumi.Input[str]]:
"""
The Oracle license model that applies to the external database.
"""
return pulumi.get(self, "license_model")
@license_model.setter
def license_model(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "license_model", value)
@pulumi.input_type
class ExternalNonContainerDatabaseOperationsInsightsConfigArgs:
def __init__(__self__, *,
operations_insights_connector_id: Optional[pulumi.Input[str]] = None,
operations_insights_status: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] operations_insights_connector_id: The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the [external database connector](https://docs.cloud.oracle.com/iaas/api/#/en/database/latest/datatypes/CreateExternalDatabaseConnectorDetails).
:param pulumi.Input[str] operations_insights_status: The status of Operations Insights
"""
if operations_insights_connector_id is not None:
pulumi.set(__self__, "operations_insights_connector_id", operations_insights_connector_id)
if operations_insights_status is not None:
pulumi.set(__self__, "operations_insights_status", operations_insights_status)
@property
@pulumi.getter(name="operationsInsightsConnectorId")
def operations_insights_connector_id(self) -> Optional[pulumi.Input[str]]:
"""
The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the [external database connector](https://docs.cloud.oracle.com/iaas/api/#/en/database/latest/datatypes/CreateExternalDatabaseConnectorDetails).
"""
return pulumi.get(self, "operations_insights_connector_id")
@operations_insights_connector_id.setter
def operations_insights_connector_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "operations_insights_connector_id", value)
@property
@pulumi.getter(name="operationsInsightsStatus")
def operations_insights_status(self) -> Optional[pulumi.Input[str]]:
"""
The status of Operations Insights
"""
return pulumi.get(self, "operations_insights_status")
@operations_insights_status.setter
def operations_insights_status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "operations_insights_status", value)
@pulumi.input_type
class ExternalPluggableDatabaseDatabaseManagementConfigArgs:
def __init__(__self__, *,
database_management_connection_id: Optional[pulumi.Input[str]] = None,
database_management_status: Optional[pulumi.Input[str]] = None,
license_model: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] database_management_connection_id: The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the [external database connector](https://docs.cloud.oracle.com/iaas/api/#/en/database/latest/datatypes/CreateExternalDatabaseConnectorDetails).
:param pulumi.Input[str] database_management_status: The status of the Database Management service.
:param pulumi.Input[str] license_model: The Oracle license model that applies to the external database.
"""
if database_management_connection_id is not None:
pulumi.set(__self__, "database_management_connection_id", database_management_connection_id)
if database_management_status is not None:
pulumi.set(__self__, "database_management_status", database_management_status)
if license_model is not None:
pulumi.set(__self__, "license_model", license_model)
@property
@pulumi.getter(name="databaseManagementConnectionId")
def database_management_connection_id(self) -> Optional[pulumi.Input[str]]:
"""
The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the [external database connector](https://docs.cloud.oracle.com/iaas/api/#/en/database/latest/datatypes/CreateExternalDatabaseConnectorDetails).
"""
return pulumi.get(self, "database_management_connection_id")
@database_management_connection_id.setter
def database_management_connection_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "database_management_connection_id", value)
@property
@pulumi.getter(name="databaseManagementStatus")
def database_management_status(self) -> Optional[pulumi.Input[str]]:
"""
The status of the Database Management service.
"""
return pulumi.get(self, "database_management_status")
@database_management_status.setter
def database_management_status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "database_management_status", value)
@property
@pulumi.getter(name="licenseModel")
def license_model(self) -> Optional[pulumi.Input[str]]:
"""
The Oracle license model that applies to the external database.
"""
return pulumi.get(self, "license_model")
@license_model.setter
def license_model(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "license_model", value)
@pulumi.input_type
class ExternalPluggableDatabaseOperationsInsightsConfigArgs:
def __init__(__self__, *,
operations_insights_connector_id: Optional[pulumi.Input[str]] = None,
operations_insights_status: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] operations_insights_connector_id: The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the [external database connector](https://docs.cloud.oracle.com/iaas/api/#/en/database/latest/datatypes/CreateExternalDatabaseConnectorDetails).
:param pulumi.Input[str] operations_insights_status: The status of Operations Insights
"""
if operations_insights_connector_id is not None:
pulumi.set(__self__, "operations_insights_connector_id", operations_insights_connector_id)
if operations_insights_status is not None:
pulumi.set(__self__, "operations_insights_status", operations_insights_status)
@property
@pulumi.getter(name="operationsInsightsConnectorId")
def operations_insights_connector_id(self) -> Optional[pulumi.Input[str]]:
"""
The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the [external database connector](https://docs.cloud.oracle.com/iaas/api/#/en/database/latest/datatypes/CreateExternalDatabaseConnectorDetails).
"""
return pulumi.get(self, "operations_insights_connector_id")
@operations_insights_connector_id.setter
def operations_insights_connector_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "operations_insights_connector_id", value)
@property
@pulumi.getter(name="operationsInsightsStatus")
def operations_insights_status(self) -> Optional[pulumi.Input[str]]:
"""
The status of Operations Insights
"""
return pulumi.get(self, "operations_insights_status")
@operations_insights_status.setter
def operations_insights_status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "operations_insights_status", value)
@pulumi.input_type
class KeyStoreAssociatedDatabaseArgs:
def __init__(__self__, *,
db_name: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] db_name: The name of the database that is associated with the key store.
:param pulumi.Input[str] id: The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the key store.
"""
if db_name is not None:
pulumi.set(__self__, "db_name", db_name)
if id is not None:
pulumi.set(__self__, "id", id)
@property
@pulumi.getter(name="dbName")
def db_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the database that is associated with the key store.
"""
return pulumi.get(self, "db_name")
@db_name.setter
def db_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "db_name", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the key store.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@pulumi.input_type
class KeyStoreTypeDetailsArgs:
def __init__(__self__, *,
admin_username: pulumi.Input[str],
connection_ips: pulumi.Input[Sequence[pulumi.Input[str]]],
secret_id: pulumi.Input[str],
type: pulumi.Input[str],
vault_id: pulumi.Input[str]):
"""
:param pulumi.Input[str] admin_username: (Updatable) The administrator username to connect to Oracle Key Vault
:param pulumi.Input[Sequence[pulumi.Input[str]]] connection_ips: (Updatable) The list of Oracle Key Vault connection IP addresses.
:param pulumi.Input[str] secret_id: (Updatable) The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the Oracle Cloud Infrastructure [secret](https://docs.cloud.oracle.com/iaas/Content/KeyManagement/Concepts/keyoverview.htm#concepts).
:param pulumi.Input[str] type: (Updatable) The type of key store.
:param pulumi.Input[str] vault_id: (Updatable) The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the Oracle Cloud Infrastructure [vault](https://docs.cloud.oracle.com/iaas/Content/KeyManagement/Concepts/keyoverview.htm#concepts).
"""
pulumi.set(__self__, "admin_username", admin_username)
pulumi.set(__self__, "connection_ips", connection_ips)
pulumi.set(__self__, "secret_id", secret_id)
pulumi.set(__self__, "type", type)
pulumi.set(__self__, "vault_id", vault_id)
@property
@pulumi.getter(name="adminUsername")
def admin_username(self) -> pulumi.Input[str]:
"""
(Updatable) The administrator username to connect to Oracle Key Vault
"""
return pulumi.get(self, "admin_username")
@admin_username.setter
def admin_username(self, value: pulumi.Input[str]):
pulumi.set(self, "admin_username", value)
@property
@pulumi.getter(name="connectionIps")
def connection_ips(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
(Updatable) The list of Oracle Key Vault connection IP addresses.
"""
return pulumi.get(self, "connection_ips")
@connection_ips.setter
def connection_ips(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "connection_ips", value)
@property
@pulumi.getter(name="secretId")
def secret_id(self) -> pulumi.Input[str]:
"""
(Updatable) The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the Oracle Cloud Infrastructure [secret](https://docs.cloud.oracle.com/iaas/Content/KeyManagement/Concepts/keyoverview.htm#concepts).
"""
return pulumi.get(self, "secret_id")
@secret_id.setter
def secret_id(self, value: pulumi.Input[str]):
pulumi.set(self, "secret_id", value)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
"""
(Updatable) The type of key store.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@property
@pulumi.getter(name="vaultId")
def vault_id(self) -> pulumi.Input[str]:
"""
(Updatable) The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the Oracle Cloud Infrastructure [vault](https://docs.cloud.oracle.com/iaas/Content/KeyManagement/Concepts/keyoverview.htm#concepts).
"""
return pulumi.get(self, "vault_id")
@vault_id.setter
def vault_id(self, value: pulumi.Input[str]):
pulumi.set(self, "vault_id", value)
@pulumi.input_type
class MigrationAdditionalMigrationArgs:
def __init__(__self__, *,
cloud_exadata_infrastructure_id: Optional[pulumi.Input[str]] = None,
cloud_vm_cluster_id: Optional[pulumi.Input[str]] = None,
db_system_id: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] cloud_exadata_infrastructure_id: The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the cloud Exadata infrastructure.
:param pulumi.Input[str] cloud_vm_cluster_id: The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the cloud VM cluster.
:param pulumi.Input[str] db_system_id: The DB system [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm).
"""
if cloud_exadata_infrastructure_id is not None:
pulumi.set(__self__, "cloud_exadata_infrastructure_id", cloud_exadata_infrastructure_id)
if cloud_vm_cluster_id is not None:
pulumi.set(__self__, "cloud_vm_cluster_id", cloud_vm_cluster_id)
if db_system_id is not None:
pulumi.set(__self__, "db_system_id", db_system_id)
@property
@pulumi.getter(name="cloudExadataInfrastructureId")
def cloud_exadata_infrastructure_id(self) -> Optional[pulumi.Input[str]]:
"""
The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the cloud Exadata infrastructure.
"""
return pulumi.get(self, "cloud_exadata_infrastructure_id")
@cloud_exadata_infrastructure_id.setter
def cloud_exadata_infrastructure_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cloud_exadata_infrastructure_id", value)
@property
@pulumi.getter(name="cloudVmClusterId")
def cloud_vm_cluster_id(self) -> Optional[pulumi.Input[str]]:
"""
The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the cloud VM cluster.
"""
return pulumi.get(self, "cloud_vm_cluster_id")
@cloud_vm_cluster_id.setter
def cloud_vm_cluster_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cloud_vm_cluster_id", value)
@property
@pulumi.getter(name="dbSystemId")
def db_system_id(self) -> Optional[pulumi.Input[str]]:
"""
The DB system [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm).
"""
return pulumi.get(self, "db_system_id")
@db_system_id.setter
def db_system_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "db_system_id", value)
@pulumi.input_type
class PluggableDatabaseConnectionStringsArgs:
def __init__(__self__, *,
all_connection_strings: Optional[pulumi.Input[Mapping[str, Any]]] = None,
pdb_default: Optional[pulumi.Input[str]] = None,
pdb_ip_default: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[Mapping[str, Any]] all_connection_strings: All connection strings to use to connect to the pluggable database.
:param pulumi.Input[str] pdb_default: A host name-based PDB connection string.
:param pulumi.Input[str] pdb_ip_default: An IP-based PDB connection string.
"""
if all_connection_strings is not None:
pulumi.set(__self__, "all_connection_strings", all_connection_strings)
if pdb_default is not None:
pulumi.set(__self__, "pdb_default", pdb_default)
if pdb_ip_default is not None:
pulumi.set(__self__, "pdb_ip_default", pdb_ip_default)
@property
@pulumi.getter(name="allConnectionStrings")
def all_connection_strings(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
All connection strings to use to connect to the pluggable database.
"""
return pulumi.get(self, "all_connection_strings")
@all_connection_strings.setter
def all_connection_strings(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "all_connection_strings", value)
@property
@pulumi.getter(name="pdbDefault")
def pdb_default(self) -> Optional[pulumi.Input[str]]:
"""
A host name-based PDB connection string.
"""
return pulumi.get(self, "pdb_default")
@pdb_default.setter
def pdb_default(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "pdb_default", value)
@property
@pulumi.getter(name="pdbIpDefault")
def pdb_ip_default(self) -> Optional[pulumi.Input[str]]:
"""
An IP-based PDB connection string.
"""
return pulumi.get(self, "pdb_ip_default")
@pdb_ip_default.setter
def pdb_ip_default(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "pdb_ip_default", value)
@pulumi.input_type
class PluggableDatabasesLocalCloneConnectionStringsArgs:
def __init__(__self__, *,
all_connection_strings: Optional[pulumi.Input[Mapping[str, Any]]] = None,
pdb_default: Optional[pulumi.Input[str]] = None,
pdb_ip_default: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[Mapping[str, Any]] all_connection_strings: All connection strings to use | |
"""DLKit Services implementations of cataloging service."""
# pylint: disable=no-init
# osid specification includes some 'marker' interfaces.
# pylint: disable=too-many-ancestors
# number of ancestors defined in spec.
# pylint: disable=too-few-public-methods,too-many-public-methods
# number of methods defined in spec. Worse yet, these are aggregates.
# pylint: disable=invalid-name
# method and class names defined in spec.
# pylint: disable=no-self-use,unused-argument
# to catch unimplemented methods.
# pylint: disable=super-init-not-called
# it just isn't.
from . import osid
from .osid_errors import Unimplemented, IllegalState, InvalidArgument
from dlkit.abstract_osid.cataloging import objects as abc_cataloging_objects
from dlkit.manager_impls.cataloging import managers as cataloging_managers
DEFAULT = 0
COMPARATIVE = 0
PLENARY = 1
FEDERATED = 0
ISOLATED = 1
ANY_STATUS = 0
ACTIVE = 1
UNSEQUESTERED = 0
SEQUESTERED = 1
AUTOMATIC = 0
MANDATORY = 1
DISABLED = -1
class CatalogingProfile(osid.OsidProfile, cataloging_managers.CatalogingProfile):
"""CatalogingProfile convenience adapter including related Session methods."""
def __init__(self):
self._provider_manager = None
def supports_catalog_lookup(self):
"""Pass through to provider supports_catalog_lookup"""
# Implemented from kitosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_catalog_lookup()
def supports_catalog_query(self):
"""Pass through to provider supports_catalog_query"""
# Implemented from kitosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_catalog_query()
def supports_catalog_admin(self):
"""Pass through to provider supports_catalog_admin"""
# Implemented from kitosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_catalog_admin()
def supports_catalog_hierarchy(self):
"""Pass through to provider supports_catalog_hierarchy"""
# Implemented from kitosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_catalog_hierarchy()
def supports_catalog_hierarchy_design(self):
"""Pass through to provider supports_catalog_hierarchy_design"""
# Implemented from kitosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_catalog_hierarchy_design()
def get_catalog_record_types(self):
"""Pass through to provider get_catalog_record_types"""
# Implemented from kitosid template for -
# osid.resource.ResourceProfile.get_resource_record_types
return self._provider_manager.get_catalog_record_types()
catalog_record_types = property(fget=get_catalog_record_types)
def get_catalog_search_record_types(self):
"""Pass through to provider get_catalog_search_record_types"""
# Implemented from kitosid template for -
# osid.resource.ResourceProfile.get_resource_record_types
return self._provider_manager.get_catalog_search_record_types()
catalog_search_record_types = property(fget=get_catalog_search_record_types)
class CatalogingManager(osid.OsidManager, osid.OsidSession, CatalogingProfile, cataloging_managers.CatalogingManager):
"""CatalogingManager convenience adapter including related Session methods."""
def __init__(self, proxy=None):
self._runtime = None
self._provider_manager = None
self._provider_sessions = dict()
self._session_management = AUTOMATIC
self._catalog_view = DEFAULT
# This is to initialize self._proxy
osid.OsidSession.__init__(self, proxy)
self._sub_package_provider_managers = dict()
def _set_catalog_view(self, session):
"""Sets the underlying catalog view to match current view"""
if self._catalog_view == COMPARATIVE:
try:
session.use_comparative_catalog_view()
except AttributeError:
pass
else:
try:
session.use_plenary_catalog_view()
except AttributeError:
pass
def _get_provider_session(self, session_name, proxy=None):
"""Gets the session for the provider"""
agent_key = self._get_agent_key(proxy)
if session_name in self._provider_sessions[agent_key]:
return self._provider_sessions[agent_key][session_name]
else:
session = self._instantiate_session('get_' + session_name, self._proxy)
self._set_catalog_view(session)
if self._session_management != DISABLED:
self._provider_sessions[agent_key][session_name] = session
return session
def _get_sub_package_provider_manager(self, sub_package_name):
if sub_package_name in self._sub_package_provider_managers:
return self._sub_package_provider_managers[sub_package_name]
config = self._runtime.get_configuration()
parameter_id = Id('parameter:{0}ProviderImpl@dlkit_service'.format(sub_package_name))
provider_impl = config.get_value_by_parameter(parameter_id).get_string_value()
if self._proxy is None:
# need to add version argument
sub_package = self._runtime.get_manager(sub_package_name.upper(), provider_impl)
else:
# need to add version argument
sub_package = self._runtime.get_proxy_manager(sub_package_name.upper(), provider_impl)
self._sub_package_provider_managers[sub_package_name] = sub_package
return sub_package
def _get_sub_package_provider_session(self, sub_package, session_name, proxy=None):
"""Gets the session from a sub-package"""
agent_key = self._get_agent_key(proxy)
if session_name in self._provider_sessions[agent_key]:
return self._provider_sessions[agent_key][session_name]
else:
manager = self._get_sub_package_provider_manager(sub_package)
try:
session = self._instantiate_session('get_' + session_name + '_for_bank',
proxy=self._proxy,
manager=manager)
except AttributeError:
session = self._instantiate_session('get_' + session_name,
proxy=self._proxy,
manager=manager)
self._set_bank_view(session)
if self._session_management != DISABLED:
self._provider_sessions[agent_key][session_name] = session
return session
def _instantiate_session(self, method_name, proxy=None, *args, **kwargs):
"""Instantiates a provider session"""
if 'manager' in kwargs:
session_class = getattr(kwargs['manager'], method_name)
del kwargs['manager']
else:
session_class = getattr(self._provider_manager, method_name)
if proxy is None:
try:
return session_class(bank_id=self._catalog_id, *args, **kwargs)
except AttributeError:
return session_class(*args, **kwargs)
else:
try:
return session_class(bank_id=self._catalog_id, proxy=proxy, *args, **kwargs)
except AttributeError:
return session_class(proxy=proxy, *args, **kwargs)
def initialize(self, runtime):
"""OSID Manager initialize"""
from .primitives import Id
if self._runtime is not None:
raise IllegalState('Manager has already been initialized')
self._runtime = runtime
config = runtime.get_configuration()
parameter_id = Id('parameter:catalogingProviderImpl@dlkit_service')
provider_impl = config.get_value_by_parameter(parameter_id).get_string_value()
if self._proxy is None:
# need to add version argument
self._provider_manager = runtime.get_manager('CATALOGING', provider_impl)
else:
# need to add version argument
self._provider_manager = runtime.get_proxy_manager('CATALOGING', provider_impl)
def close_sessions(self):
"""Close all sessions, unless session management is set to MANDATORY"""
if self._session_management != MANDATORY:
self._provider_sessions = dict()
def use_automatic_session_management(self):
"""Session state will be saved unless closed by consumers"""
self._session_management = AUTOMATIC
def use_mandatory_session_management(self):
"""Session state will be saved and can not be closed by consumers"""
self._session_management = MANDATORY
def disable_session_management(self):
"""Session state will never be saved"""
self._session_management = DISABLED
self.close_sessions()
def get_catalog_lookup_session(self, *args, **kwargs):
"""Pass through to provider get_catalog_lookup_session"""
# Implemented from kitosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_manager_template
return self._provider_manager.get_catalog_lookup_session(*args, **kwargs)
catalog_lookup_session = property(fget=get_catalog_lookup_session)
def get_catalog_query_session(self, *args, **kwargs):
"""Pass through to provider get_catalog_query_session"""
# Implemented from kitosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_manager_template
return self._provider_manager.get_catalog_query_session(*args, **kwargs)
catalog_query_session = property(fget=get_catalog_query_session)
def get_catalog_admin_session(self, *args, **kwargs):
"""Pass through to provider get_catalog_admin_session"""
# Implemented from kitosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_manager_template
return self._provider_manager.get_catalog_admin_session(*args, **kwargs)
catalog_admin_session = property(fget=get_catalog_admin_session)
def get_catalog_hierarchy_session(self, *args, **kwargs):
"""Pass through to provider get_catalog_hierarchy_session"""
# Implemented from kitosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_manager_template
return self._provider_manager.get_catalog_hierarchy_session(*args, **kwargs)
catalog_hierarchy_session = property(fget=get_catalog_hierarchy_session)
def get_catalog_hierarchy_design_session(self, *args, **kwargs):
"""Pass through to provider get_catalog_hierarchy_design_session"""
# Implemented from kitosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_manager_template
return self._provider_manager.get_catalog_hierarchy_design_session(*args, **kwargs)
catalog_hierarchy_design_session = property(fget=get_catalog_hierarchy_design_session)
def get_cataloging_rules_manager(self, *args, **kwargs):
"""Pass through to provider unimplemented"""
raise Unimplemented('Unimplemented in dlkit.services')
cataloging_rules_manager = property(fget=get_cataloging_rules_manager)
##
# The following methods are from osid.cataloging.CatalogLookupSession
def can_lookup_catalogs(self):
"""Pass through to provider CatalogLookupSession.can_lookup_catalogs"""
# Implemented from kitosid template for -
# osid.resource.BinLookupSession.can_lookup_bins_template
return self._get_provider_session('catalog_lookup_session').can_lookup_catalogs()
def use_comparative_catalog_view(self):
"""Pass through to provider CatalogLookupSession.use_comparative_catalog_view"""
self._catalog_view = COMPARATIVE
# self._get_provider_session('catalog_lookup_session') # To make sure the session is tracked
for session in self._get_provider_sessions():
try:
session.use_comparative_catalog_view()
except AttributeError:
pass
def use_plenary_catalog_view(self):
"""Pass through to provider CatalogLookupSession.use_plenary_catalog_view"""
self._catalog_view = PLENARY
# self._get_provider_session('catalog_lookup_session') # To make sure the session is tracked
for session in self._get_provider_sessions():
try:
session.use_plenary_catalog_view()
except AttributeError:
pass
def get_catalog(self, *args, **kwargs):
"""Pass through to provider CatalogLookupSession.get_catalog"""
# Implemented from kitosid template for -
# osid.resource.BinLookupSession.get_bin
return Catalog(
self._provider_manager,
self._get_provider_session('catalog_lookup_session').get_catalog(*args, **kwargs),
self._runtime,
self._proxy)
def get_catalogs_by_ids(self, *args, **kwargs):
"""Pass through to provider CatalogLookupSession.get_catalogs_by_ids"""
# Implemented from kitosid template for -
# osid.resource.BinLookupSession.get_bins_by_ids
catalogs = self._get_provider_session('catalog_lookup_session').get_catalogs_by_ids(*args, **kwargs)
cat_list = []
for cat in catalogs:
cat_list.append(Catalog(self._provider_manager, cat, self._runtime, self._proxy))
return CatalogList(cat_list)
def get_catalogs_by_genus_type(self, *args, **kwargs):
"""Pass through to provider CatalogLookupSession.get_catalogs_by_genus_type"""
# Implemented from kitosid template for -
# osid.resource.BinLookupSession.get_bins_by_genus_type
catalogs = self._get_provider_session('catalog_lookup_session').get_catalogs_by_genus_type(*args, **kwargs)
cat_list = []
for cat in catalogs:
cat_list.append(Catalog(self._provider_manager, cat, self._runtime, self._proxy))
return CatalogList(cat_list)
def get_catalogs_by_parent_genus_type(self, *args, **kwargs):
"""Pass through to provider CatalogLookupSession.get_catalogs_by_parent_genus_type"""
# Implemented from kitosid template for -
# osid.resource.BinLookupSession.get_bins_by_parent_genus_type
catalogs = self._get_provider_session('catalog_lookup_session').get_catalogs_by_parent_genus_type(*args, **kwargs)
cat_list = []
for cat in catalogs:
cat_list.append(Catalog(self._provider_manager, cat, self._runtime, self._proxy))
return CatalogList(cat_list)
def get_catalogs_by_record_type(self, *args, **kwargs):
"""Pass through to provider CatalogLookupSession.get_catalogs_by_record_type"""
# Implemented from kitosid template for -
# osid.resource.BinLookupSession.get_bins_by_record_type
catalogs = self._get_provider_session('catalog_lookup_session').get_catalogs_by_record_type(*args, **kwargs)
cat_list = []
for cat in catalogs:
cat_list.append(Catalog(self._provider_manager, cat, self._runtime, self._proxy))
return CatalogList(cat_list)
def get_catalogs_by_provider(self, *args, **kwargs):
"""Pass through to provider CatalogLookupSession.get_catalogs_by_provider"""
# Implemented from kitosid template for -
# osid.resource.BinLookupSession.get_bins_by_provider
catalogs = self._get_provider_session('catalog_lookup_session').get_catalogs_by_provider(*args, **kwargs)
cat_list = []
for cat in catalogs:
cat_list.append(Catalog(self._provider_manager, cat, self._runtime, self._proxy))
return CatalogList(cat_list)
def get_catalogs(self):
"""Pass through to provider CatalogLookupSession.get_catalogs"""
# Implemented from kitosid template for -
# osid.resource.BinLookupSession.get_bins_template
catalogs = self._get_provider_session('catalog_lookup_session').get_catalogs()
cat_list = []
for cat in catalogs:
cat_list.append(Catalog(self._provider_manager, cat, self._runtime, self._proxy))
return CatalogList(cat_list)
catalogs = property(fget=get_catalogs)
##
# The following methods are from osid.cataloging.CatalogQuerySession
def can_search_catalogs(self):
"""Pass through to provider CatalogQuerySession.can_search_catalogs"""
# Implemented from kitosid template for -
# osid.resource.BinQuerySession.can_search_bins_template
return self._get_provider_session('catalog_query_session').can_search_catalogs()
def get_catalog_query(self):
"""Pass through to provider CatalogQuerySession.get_catalog_query"""
# Implemented from kitosid template for -
# osid.resource.BinQuerySession.get_bin_query_template
return self._get_provider_session('catalog_query_session').get_catalog_query()
catalog_query = property(fget=get_catalog_query)
def get_catalogs_by_query(self, *args, **kwargs):
"""Pass through to provider CatalogQuerySession.get_catalogs_by_query"""
# Implemented from kitosid template for -
# osid.resource.BinQuerySession.get_bins_by_query_template
return self._get_provider_session('catalog_query_session').get_catalogs_by_query(*args, **kwargs)
##
# The following methods are from osid.cataloging.CatalogAdminSession
def can_create_catalogs(self):
"""Pass through to provider CatalogAdminSession.can_create_catalogs"""
# Implemented from kitosid template for -
# osid.resource.BinAdminSession.can_create_bins
return self._get_provider_session('catalog_admin_session').can_create_catalogs()
def can_create_catalog_with_record_types(self, *args, **kwargs):
"""Pass through to provider CatalogAdminSession.can_create_catalog_with_record_types"""
# Implemented from kitosid template for -
# osid.resource.BinAdminSession.can_create_bin_with_record_types
return self._get_provider_session('catalog_admin_session').can_create_catalog_with_record_types(*args, **kwargs)
def get_catalog_form_for_create(self, *args, **kwargs):
"""Pass through to provider CatalogAdminSession.get_catalog_form_for_create"""
# Implemented from kitosid template for -
# osid.resource.BinAdminSession.get_bin_form_for_create
return self._get_provider_session('catalog_admin_session').get_catalog_form_for_create(*args, **kwargs)
def create_catalog(self, *args, **kwargs):
"""Pass through to provider CatalogAdminSession.create_catalog"""
# Implemented from kitosid template for -
# osid.resource.BinAdminSession.create_bin
return Catalog(
self._provider_manager,
self._get_provider_session('catalog_admin_session').create_catalog(*args, **kwargs),
self._runtime,
self._proxy)
def can_update_catalogs(self):
"""Pass through to provider CatalogAdminSession.can_update_catalogs"""
# Implemented from kitosid template for -
# osid.resource.BinAdminSession.can_update_bins
return self._get_provider_session('catalog_admin_session').can_update_catalogs()
def get_catalog_form_for_update(self, *args, **kwargs):
"""Pass through to provider CatalogAdminSession.get_catalog_form_for_update"""
# Implemented from kitosid template for -
# osid.resource.BinAdminSession.get_bin_form_for_update
return self._get_provider_session('catalog_admin_session').get_catalog_form_for_update(*args, **kwargs)
def get_catalog_form(self, *args, **kwargs):
"""Pass through to provider CatalogAdminSession.get_catalog_form_for_update"""
# Implemented from | |
from __future__ import division
import os
import pickle
import scitbx.lbfgs
from libtbx import easy_pickle
from libtbx import adopt_init_args
from cctbx import xray
from scitbx.array_family import flex
import calculator as calculator_module
from libtbx import group_args
from ase.optimize.lbfgs import LBFGS
import numpy
from libtbx.test_utils import approx_equal
class convergence(object):
def __init__(self, params, fmodel=None, xray_structure=None):
self.r_start=None
if(fmodel is not None):
self.r_start = fmodel.r_work()
self.sites_cart_start = fmodel.xray_structure.sites_cart()
else:
self.sites_cart_start = xray_structure.sites_cart()
self.r_tolerance=params.refine.r_tolerance
self.max_bond_rmsd=params.refine.max_bond_rmsd
self.rmsd_tolerance=params.refine.rmsd_tolerance
self.use_convergence_test = params.refine.use_convergence_test
self.number_of_convergence_occurances=0
#
self.rws = flex.double()
self.rfs = flex.double()
self.gaps = flex.double()
self.restraints_weight_scales = flex.double()
def is_converged(self, fmodel, bond_rmsd=None, restraints_weight_scale=None):
#
if not self.use_convergence_test: return False
#
rw = fmodel.r_work()
rf = fmodel.r_free()
gap = rf-rw
self.rws .append(rw)
self.rfs .append(rf)
self.gaps .append(gap)
if(restraints_weight_scale is not None):
self.restraints_weight_scales.append(restraints_weight_scale)
#
if(restraints_weight_scale is not None):
rwc = self.restraints_weight_scales
i_last = self.rws.size()-1
if(i_last>3):
rwc_3 = rwc[i_last]
rwc_2 = rwc[i_last-1]
rwc_1 = rwc[i_last-2]
rws123 = [rwc_1, rwc_2, rwc_3]
for rwc_i in rws123:
if(rws123.count(rwc_i)>1):
return True
#
sites_cart = fmodel.xray_structure.sites_cart()
r_diff=abs(self.r_start-rw)
rmsd_diff=self.sites_cart_start.rms_difference(sites_cart)
self.sites_cart_start = sites_cart
self.r_start=rw
if(r_diff<self.r_tolerance and rmsd_diff<self.rmsd_tolerance and
(bond_rmsd is not None and bond_rmsd<self.max_bond_rmsd)):
self.number_of_convergence_occurances+=2
if(self.number_of_convergence_occurances==2 or rw<0.005):
return True and self.use_convergence_test
else: return False and self.use_convergence_test
def is_geometry_converged(self, sites_cart):
if not self.use_convergence_test: return False
rmsd_diff=self.sites_cart_start.rms_difference(sites_cart)
if(rmsd_diff<self.rmsd_tolerance):
return True
def geometry_exploded(self, fmodel, geometry_rmsd_manager):
result = False
cctbx_rm_bonds_rmsd = calculator_module.get_bonds_rmsd(
restraints_manager = geometry_rmsd_manager.geometry,
xrs = fmodel.xray_structure)
if(cctbx_rm_bonds_rmsd>self.max_bond_rmsd*2.0):
result = True
return result
def is_weight_scale_converged(self, restraints_weight_scale):
return restraints_weight_scale in self.restraints_weight_scales
class minimizer(object):
def __init__(self,
stpmax,
log_switch,
calculator,
max_iterations,
max_bond_rmsd,
gradient_only,
line_search,
results,
mode,
geometry_rmsd_manager=None,
preopt_params = None):
adopt_init_args(self, locals())
self.x = self.calculator.x
self.x_previous = None
self.number_of_function_and_gradients_evaluations=0
self.lbfgs_core_params = scitbx.lbfgs.core_parameters(
stpmin = 1.e-9,
stpmax = stpmax)
self.nstep = 0
if preopt_params is not None:
self.minimzer = self.prcg_min (
params = preopt_params
)
self.minimizer = scitbx.lbfgs.run(
target_evaluator=self,
gradient_only=gradient_only,
line_search=line_search,
log=log_switch,
core_params=self.lbfgs_core_params,
termination_params=scitbx.lbfgs.termination_parameters(
max_iterations=max_iterations),
exception_handling_params=scitbx.lbfgs.exception_handling_parameters(
ignore_line_search_failed_rounding_errors=True,
ignore_line_search_failed_step_at_lower_bound=True,
ignore_line_search_failed_step_at_upper_bound=False,#True,
ignore_line_search_failed_maxfev=True,
ignore_line_search_failed_xtol=True,
ignore_search_direction_not_descent=True
)
)
def _get_bond_rmsd(self):
b_mean = None
if(self.geometry_rmsd_manager is not None):
s = self.calculator.not_hd_selection
energies_sites = \
self.geometry_rmsd_manager.geometry.select(s).energies_sites(
sites_cart = flex.vec3_double(self.x).select(s),
compute_gradients = False)
b_mean = energies_sites.bond_deviations()[2]
return b_mean
def callback_after_step(self, minimizer=None):
if(self.geometry_rmsd_manager is not None or self.mode=="refine"):
b_mean = self._get_bond_rmsd()
if(self.mode=="refine" and
b_mean>self.max_bond_rmsd and
self.number_of_function_and_gradients_evaluations-3>20):
return True
def compute_functional_and_gradients(self):
self.number_of_function_and_gradients_evaluations += 1
# Ad hoc damping shifts; note arbitrary 1.0 below
#x_current = self.x
#if(self.x_previous is None):
# self.x_previous = x_current.deep_copy()
#else:
# xray.ext.damp_shifts(previous=self.x_previous, current=x_current,
# max_value=1.0)
# self.x_previous = x_current.deep_copy()
#
print " step: %3d bond rmsd: %8.6f"%(
self.number_of_function_and_gradients_evaluations, self._get_bond_rmsd())
return self.calculator.target_and_gradients(x = self.x)
def prcg_min(self,params):
import numpy as np
import os
"""
polak-ribiere conjugate gradient minimizer
with simple step scaling and steepest decent steps
"""
max_iter = params["maxiter"]
max_step= params["stpmax"]
switch_step= params["iswitch"] - 1
gconv = params["gconv"]
dim=len(self.x)
x_new=self.x.as_numpy_array()
x_old=x_new
g_old=np.zeros(dim)
gg=np.zeros(dim)
xx=np.zeros(dim)
step=np.zeros(dim)
gg=np.zeros(dim)
conv=False
step_old=np.zeros(dim)
self.x=flex.double(x_new.tolist())
for iter in range(max_iter):
self.eg=self.calculator.target_and_gradients(x = self.x )
g=np.array(list(self.eg[1]))
e=self.eg[0]
gnorm=np.linalg.norm(g)
# gnorm=self.eg[1].norm()/23.0605480121
#self.number_of_function_and_gradients_evaluations += 1
print 'iter= %i E=%12.5E G=%0.2f' % (iter+1,e,gnorm)
#
if gnorm <=gconv and iter >1:
print 'gnorm pre-convergenced!'
self.x=flex.double(x_new.tolist())
break
#
if iter<=switch_step:
print 'SD step'
step=-g
else:
print 'CG step'
gg=g-g_old
gdgg=np.vdot(g,gg)
gdg=np.vdot(g_old,g_old)
sdgg=np.vdot(step_old,gg)
sds=np.vdot(step_old,step_old)
alpha=sds/sdgg
beta=gdgg/gdg
step=-g + beta*step_old
step*=alpha
snorm=np.linalg.norm(step)
if snorm>=max_step:
step*=max_step/snorm
# print 'step norm',snorm
x_new =x_old + step
#
self.x=flex.double(x_new.tolist())
e_old=e
g_old=g
x_old=x_new
step_old=step
class clustering_update(object):
def __init__(self, pre_sites_cart, log, rmsd_tolerance):
self.pre_sites_cart = pre_sites_cart
self.log = log
self.rmsd_tolerance = rmsd_tolerance
def re_clustering_check(self, sites_cart):
rmsd_diff = pre_sites_cart.rms_difference(sites_cart)
if(rmsd_diff < self.rmsd_tolerance):
self.redo_clustering = False
else:
print >> self.log, " rmsd_diff: ", rmsd_diff, "--> need to redo clustering"
self.redo_clustering = True
self.pre_sites_cart = sites_cart
def re_clustering(self, calculator):
sites_cart = calculator.fmodel.xray_structure.sites_cart()
rmsd_diff = self.pre_sites_cart.rms_difference(sites_cart)
if(rmsd_diff > self.rmsd_tolerance):
print >> self.log, " rmsd_diff: ", rmsd_diff, "--> need to redo clustering"
calculator.restraints_manager.fragments.set_up_cluster_qm()
print >> self.log, " interacting pairs number: ", \
calculator.restraints_manager.fragments.interacting_pairs
self.pre_sites_cart = sites_cart
class restart_data(object):
def __init__(self, geometry_rmsd_manager, fmodel=None, xray_structure=None):
assert [xray_structure, fmodel].count(None) == 1
rst_data = {}
if(fmodel is not None): rst_data["fmodel"] = fmodel
else: rst_data["xrs"] = xray_structure
rst_data["geometry_rmsd_manager"] = geometry_rmsd_manager
self.rst_data = rst_data
def write_rst_file(self, rst_file, weight_cycle = None, refine_cycle = None,
micro_cycle = None, fmodel = None, weights = None,
conv_test = None, results = None, xray_structure = None):
self.rst_data["weight_cycle"] = weight_cycle
self.rst_data["refine_cycle"] = refine_cycle
self.rst_data["micro_cycle"] = micro_cycle
self.rst_data["rst_fmodel"] = fmodel
self.rst_data["rst_xray_structure"] = xray_structure
self.rst_data["weights"] = weights
self.rst_data["conv_test"] = conv_test
self.rst_data["results"] = results
easy_pickle.dump(file_name=rst_file, obj=self.rst_data)
class minimizer_ase(object):
def __init__(self, calculator, params, max_iterations, geometry_rmsd_manager):
self.params = params
self.max_iterations = max_iterations
self.calculator = calculator
self.geometry_rmsd_manager = geometry_rmsd_manager
self.ase_atoms = calculator.ase_atoms
self.ase_atoms.set_positions(flex.vec3_double(self.calculator.x))
self.opt = LBFGS(atoms = self.ase_atoms)
self.number_of_function_and_gradients_evaluations = 0
self.b_rmsd = self._get_bond_rmsd(
sites_cart = flex.vec3_double(self.calculator.x))
print " step: %3d bond rmsd: %8.6f"%(
self.number_of_function_and_gradients_evaluations, self.b_rmsd)
self.run(nstep = max_iterations)
# Syncing and cross-checking begin
e = 1.e-4
assert approx_equal(
self.ase_atoms.get_positions(), self.opt.atoms.get_positions(), e)
self.calculator.update(x = self.opt.atoms.get_positions())
assert approx_equal(self.calculator.x, self.opt.atoms.get_positions(), e)
if(params.refine.mode=="refine"):
assert approx_equal(flex.vec3_double(self.calculator.x),
self.calculator.fmodel.xray_structure.sites_cart(), e)
else:
assert approx_equal(flex.vec3_double(self.calculator.x),
self.calculator.xray_structure.sites_cart(), e)
b_rmsd = self._get_bond_rmsd(sites_cart = flex.vec3_double(self.calculator.x))
assert approx_equal(self.b_rmsd, b_rmsd, e)
# Syncing and cross-checking end
def _get_bond_rmsd(self, sites_cart):
b_mean = None
if(self.geometry_rmsd_manager is not None):
s = self.calculator.not_hd_selection
energies_sites = \
self.geometry_rmsd_manager.geometry.select(s).energies_sites(
sites_cart = sites_cart.select(s),
compute_gradients = False)
b_mean = energies_sites.bond_deviations()[2]
return b_mean
def step(self):
sites_cart = flex.vec3_double(self.opt.atoms.get_positions())
t,g = self.calculator.target_and_gradients(x = sites_cart)
forces = numpy.array(g) * (-1)
self.opt.step(forces)
self.number_of_function_and_gradients_evaluations += 1
self.calculator.update(x = self.opt.atoms.get_positions())
#
self.b_rmsd = self._get_bond_rmsd(
sites_cart = flex.vec3_double(self.opt.atoms.get_positions()))
print " step: %3d bond rmsd: %8.6f"%(
self.number_of_function_and_gradients_evaluations, self.b_rmsd)
if(self.params.refine.mode=="refine" and
self.b_rmsd>self.params.refine.max_bond_rmsd and
self.number_of_function_and_gradients_evaluations>20):
return False
#
return True
def run(self, nstep):
for i in range(nstep):
v = self.step()
if(not v): return
def run_minimize(calculator, params, results, geometry_rmsd_manager, mode):
assert mode in ["weight", "refine"]
result = None
try:
result = run_minimize_(
calculator = calculator,
params = params,
results = results,
geometry_rmsd_manager = geometry_rmsd_manager,
mode = mode)
except Exception as e:
print "minimization failed:", e
result = None
return result
def run_minimize_(calculator, params, results, geometry_rmsd_manager, mode):
minimized = None
if (mode == "weight"): max_iterations = params.refine.max_iterations_weight
elif(mode == "refine"): max_iterations = params.refine.max_iterations_refine
if(params.refine.use_ase_lbfgs):
minimized = minimizer_ase(
calculator = calculator,
params = params,
max_iterations = max_iterations,
geometry_rmsd_manager = geometry_rmsd_manager)
else:
log_switch = None
preopt = None
if (params.refine.pre_opt):
preopt={
'stpmax' :params.refine.pre_opt_stpmax,
'maxiter' :params.refine.pre_opt_iter,
'iswitch' :params.refine.pre_opt_switch,
'gconv' :params.refine.pre_opt_gconv,
}
if (params.refine.opt_log or params.debug): log_switch=results.log
if(max_iterations > 0):
minimized = minimizer(
log_switch = log_switch,
calculator = calculator,
stpmax = params.refine.stpmax,
gradient_only = params.refine.gradient_only,
line_search = params.refine.line_search,
max_iterations = max_iterations,
max_bond_rmsd = params.refine.max_bond_rmsd,
results = results,
mode = params.refine.mode,
geometry_rmsd_manager = geometry_rmsd_manager,
preopt_params = preopt)
return minimized
def run_collect(n_fev, results, fmodel, geometry_rmsd_manager, calculator):
cctbx_rm_bonds_rmsd = calculator_module.get_bonds_rmsd(
restraints_manager = geometry_rmsd_manager.geometry,
xrs = fmodel.xray_structure)
results.update(
r_work = fmodel.r_work(),
r_free = fmodel.r_free(),
b = cctbx_rm_bonds_rmsd,
xrs = fmodel.xray_structure,
restraints_weight_scale = calculator.weights.restraints_weight_scale,
n_fev = n_fev)
def refine(fmodel,
params,
results,
calculator,
geometry_rmsd_manager):
if(not params.refine.refine_sites): return
rst_file = params.rst_file
rst_data = restart_data(fmodel=fmodel,
geometry_rmsd_manager=geometry_rmsd_manager)
if(os.path.isfile(rst_file)):
with open(rst_file, 'rb') as handle:
rst_file_data = pickle.load(handle)
weight_cycle_start = rst_file_data["weight_cycle"]
refine_cycle_start = rst_file_data["refine_cycle"]
print >> results.log
print >> results.log, "*"*50
print >> results.log, "restarts from weight_cycle: %d, refine_cycle: %s"%(
weight_cycle_start, refine_cycle_start)
print >> results.log, "*"*50
print >> results.log
else:
weight_cycle_start = 1
refine_cycle_start = None
if(weight_cycle_start==1):
conv_test = convergence(fmodel = fmodel, params = params)
else:
conv_test = rst_file_data["conv_test"]
try:
clustering = calculator.restraints_manager.clustering
except :
clustering = False
if(clustering):
cluster_qm_update = clustering_update(
pre_sites_cart = calculator.fmodel.xray_structure.sites_cart(),
log = results.log,
rmsd_tolerance = params.refine.rmsd_tolerance * 100,
verbose=params.debug,
)
print >> results.log, "\ninteracting pairs number: ", \
calculator.restraints_manager.fragments.interacting_pairs
weight_cycle = weight_cycle_start
print >> results.log, "Start:"
results.show(prefix=" ")
if(refine_cycle_start is not None):
print >> results.log, \
"Found optimal weight. Proceed to further refinement with this weight."
fmodel = calculator.fmodel.deep_copy()
elif(not params.refine.skip_weight_search):
print >> results.log, "Optimal weight search:"
fmodel_copy = calculator.fmodel.deep_copy()
for weight_cycle in xrange(weight_cycle_start,
params.refine.number_of_weight_search_cycles+1):
if((weight_cycle!=1 and weight_cycle==weight_cycle_start)):
fmodel = calculator.fmodel.deep_copy()
if params.debug: print '>>> Using calculator fmodel'
else:
fmodel = fmodel_copy.deep_copy()
if params.debug: print '>>> Using fmodel_copy fmodel'
calculator.reset_fmodel(fmodel = fmodel)
if(clustering):
cluster_qm_update.re_clustering(calculator)
# Calculate weight
calculator.calculate_weight(verbose=params.debug)
# Collect state
rst_data.write_rst_file(
rst_file = rst_file,
refine_cycle = None,
weight_cycle = weight_cycle,
fmodel = fmodel,
weights = calculator.weights,
conv_test = conv_test,
results = results)
# Run minimization
n_fev = 0
for mc in xrange(params.refine.number_of_macro_cycles):
minimized = run_minimize(
calculator = calculator,
params = params,
results = results,
geometry_rmsd_manager = geometry_rmsd_manager,
mode = "weight")
if(minimized is not None):
calculator.reset_fmodel(fmodel = fmodel)
calculator.update_fmodel()
n_fev += minimized.number_of_function_and_gradients_evaluations
break
if(minimized | |
dataset via a shared URL; dataset is added to user's current history. """
# Set referer message.
referer = trans.request.referer
if referer:
referer_message = "<a href='%s'>return to the previous page</a>" % escape(referer)
else:
referer_message = "<a href='%s'>go to Galaxy's start page</a>" % url_for('/')
# Error checking.
if not dataset_id:
return trans.show_error_message("You must specify a dataset to import. You can %s." % referer_message, use_panels=True)
# Do import.
cur_history = trans.get_history(create=True)
status, message = self._copy_datasets(trans, [dataset_id], [cur_history], imported=True)
message = "Dataset imported. <br>You can <a href='%s'>start using the dataset</a> or %s." % (url_for('/'), referer_message)
return trans.show_message(message, type=status, use_panels=True)
@web.expose
@web.json
@web.require_login("use Galaxy datasets")
def get_name_and_link_async(self, trans, id=None):
""" Returns dataset's name and link. """
decoded_id = self.decode_id(id)
dataset = self.hda_manager.get_accessible(decoded_id, trans.user)
dataset = self.hda_manager.error_if_uploading(dataset)
return_dict = {"name" : dataset.name, "link" : url_for(controller='dataset', action="display_by_username_and_slug", username=dataset.history.user.username, slug=trans.security.encode_id(dataset.id))}
return return_dict
@web.expose
def get_embed_html_async(self, trans, id):
""" Returns HTML for embedding a dataset in a page. """
decoded_id = self.decode_id(id)
dataset = self.hda_manager.get_accessible(decoded_id, trans.user)
dataset = self.hda_manager.error_if_uploading(dataset)
if dataset:
return "Embedded Dataset '%s'" % dataset.name
@web.expose
@web.require_login("use Galaxy datasets")
def set_accessible_async(self, trans, id=None, accessible=False):
""" Does nothing because datasets do not have an importable/accessible attribute. This method could potentially set another attribute. """
return
@web.expose
@web.require_login("rate items")
@web.json
def rate_async(self, trans, id, rating):
""" Rate a dataset asynchronously and return updated community data. """
decoded_id = self.decode_id(id)
dataset = self.hda_manager.get_accessible(decoded_id, trans.user)
dataset = self.hda_manager.error_if_uploading(dataset)
if not dataset:
return trans.show_error_message("The specified dataset does not exist.")
# Rate dataset.
self.rate_item(trans.sa_session, trans.get_user(), dataset, rating)
return self.get_ave_item_rating_data(trans.sa_session, dataset)
@web.expose
def display_by_username_and_slug(self, trans, username, slug, filename=None, preview=True):
""" Display dataset by username and slug; because datasets do not yet have slugs, the slug is the dataset's id. """
id = slug
decoded_id = self.decode_id(id)
dataset = self.hda_manager.get_accessible(decoded_id, trans.user)
dataset = self.hda_manager.error_if_uploading(dataset)
if dataset:
# Filename used for composite types.
if filename:
return self.display(trans, dataset_id=slug, filename=filename)
truncated, dataset_data = self.hda_manager.text_data(dataset, preview)
dataset.annotation = self.get_item_annotation_str(trans.sa_session, dataset.history.user, dataset)
# If dataset is chunkable, get first chunk.
first_chunk = None
if dataset.datatype.CHUNKABLE:
first_chunk = dataset.datatype.get_chunk(trans, dataset, 0)
# If data is binary or an image, stream without template; otherwise, use display template.
# TODO: figure out a way to display images in display template.
if isinstance(dataset.datatype, datatypes.binary.Binary) or isinstance(dataset.datatype, datatypes.images.Image) or isinstance(dataset.datatype, datatypes.text.Html):
trans.response.set_content_type(dataset.get_mime())
return open(dataset.file_name)
else:
# Get rating data.
user_item_rating = 0
if trans.get_user():
user_item_rating = self.get_user_item_rating(trans.sa_session, trans.get_user(), dataset)
if user_item_rating:
user_item_rating = user_item_rating.rating
else:
user_item_rating = 0
ave_item_rating, num_ratings = self.get_ave_item_rating_data(trans.sa_session, dataset)
return trans.fill_template_mako("/dataset/display.mako", item=dataset, item_data=dataset_data,
truncated=truncated, user_item_rating=user_item_rating,
ave_item_rating=ave_item_rating, num_ratings=num_ratings,
first_chunk=first_chunk)
else:
raise web.httpexceptions.HTTPNotFound()
@web.expose
def get_item_content_async(self, trans, id):
""" Returns item content in HTML format. """
decoded_id = self.decode_id(id)
dataset = self.hda_manager.get_accessible(decoded_id, trans.user)
dataset = self.hda_manager.error_if_uploading(dataset)
if dataset is None:
raise web.httpexceptions.HTTPNotFound()
truncated, dataset_data = self.hda_manager.text_data(dataset, preview=True)
# Get annotation.
dataset.annotation = self.get_item_annotation_str(trans.sa_session, trans.user, dataset)
return trans.stream_template_mako("/dataset/item_content.mako", item=dataset, item_data=dataset_data, truncated=truncated)
@web.expose
def annotate_async(self, trans, id, new_annotation=None, **kwargs):
# TODO:?? why is this an access check only?
decoded_id = self.decode_id(id)
dataset = self.hda_manager.get_accessible(decoded_id, trans.user)
dataset = self.hda_manager.error_if_uploading(dataset)
if not dataset:
web.httpexceptions.HTTPNotFound()
if dataset and new_annotation:
# Sanitize annotation before adding it.
new_annotation = sanitize_html(new_annotation, 'utf-8', 'text/html')
self.add_item_annotation(trans.sa_session, trans.get_user(), dataset, new_annotation)
trans.sa_session.flush()
return new_annotation
@web.expose
def get_annotation_async(self, trans, id):
decoded_id = self.decode_id(id)
dataset = self.hda_manager.get_accessible(decoded_id, trans.user)
dataset = self.hda_manager.error_if_uploading(dataset)
if not dataset:
web.httpexceptions.HTTPNotFound()
annotation = self.get_item_annotation_str(trans.sa_session, trans.user, dataset)
if annotation and isinstance(annotation, text_type):
annotation = annotation.encode('ascii', 'replace') # paste needs ascii here
return annotation
@web.expose
def display_at(self, trans, dataset_id, filename=None, **kwd):
"""Sets up a dataset permissions so it is viewable at an external site"""
if not trans.app.config.enable_old_display_applications:
return trans.show_error_message("This method of accessing external display applications has been disabled by a Galaxy administrator.")
site = filename
data = trans.sa_session.query(trans.app.model.HistoryDatasetAssociation).get(dataset_id)
if not data:
raise paste.httpexceptions.HTTPRequestRangeNotSatisfiable("Invalid reference dataset id: %s." % str(dataset_id))
if 'display_url' not in kwd or 'redirect_url' not in kwd:
return trans.show_error_message('Invalid parameters specified for "display at" link, please contact a Galaxy administrator')
try:
redirect_url = kwd['redirect_url'] % quote_plus(kwd['display_url'])
except Exception:
redirect_url = kwd['redirect_url'] # not all will need custom text
if trans.app.security_agent.dataset_is_public(data.dataset):
return trans.response.send_redirect(redirect_url) # anon access already permitted by rbac
if self._can_access_dataset(trans, data):
trans.app.host_security_agent.set_dataset_permissions(data, trans.user, site)
return trans.response.send_redirect(redirect_url)
else:
return trans.show_error_message("You are not allowed to view this dataset at external sites. Please contact your Galaxy administrator to acquire management permissions for this dataset.")
@web.expose
def display_application(self, trans, dataset_id=None, user_id=None, app_name=None, link_name=None, app_action=None, action_param=None, action_param_extra=None, **kwds):
"""Access to external display applications"""
# Build list of parameters to pass in to display application logic (app_kwds)
app_kwds = {}
for name, value in dict(kwds).items(): # clone kwds because we remove stuff as we go.
if name.startswith("app_"):
app_kwds[name[len("app_"):]] = value
del kwds[name]
if kwds:
log.debug("Unexpected Keywords passed to display_application: %s" % kwds) # route memory?
# decode ids
data, user = decode_dataset_user(trans, dataset_id, user_id)
if not data:
raise paste.httpexceptions.HTTPRequestRangeNotSatisfiable("Invalid reference dataset id: %s." % str(dataset_id))
if user is None:
user = trans.user
if user:
user_roles = user.all_roles()
else:
user_roles = []
# Decode application name and link name
app_name = unquote_plus(app_name)
link_name = unquote_plus(link_name)
if None in [app_name, link_name]:
return trans.show_error_message("A display application name and link name must be provided.")
if self._can_access_dataset(trans, data, additional_roles=user_roles):
msg = []
preparable_steps = []
refresh = False
display_app = trans.app.datatypes_registry.display_applications.get(app_name)
if not display_app:
log.debug("Unknown display application has been requested: %s", app_name)
return paste.httpexceptions.HTTPNotFound("The requested display application (%s) is not available." % (app_name))
dataset_hash, user_hash = encode_dataset_user(trans, data, user)
try:
display_link = display_app.get_link(link_name, data, dataset_hash, user_hash, trans, app_kwds)
except Exception as e:
log.debug("Error generating display_link: %s", e)
# User can sometimes recover from, e.g. conversion errors by fixing input metadata, so use conflict
return paste.httpexceptions.HTTPConflict("Error generating display_link: %s" % e)
if not display_link:
log.debug("Unknown display link has been requested: %s", link_name)
return paste.httpexceptions.HTTPNotFound("Unknown display link has been requested: %s" % link_name)
if data.state == data.states.ERROR:
msg.append(('This dataset is in an error state, you cannot view it at an external display application.', 'error'))
elif data.deleted:
msg.append(('This dataset has been deleted, you cannot view it at an external display application.', 'error'))
elif data.state != data.states.OK:
msg.append(('You must wait for this dataset to be created before you can view it at an external display application.', 'info'))
refresh = True
else:
# We have permissions, dataset is not deleted and is in OK state, allow access
if display_link.display_ready():
if app_action in ['data', 'param']:
assert action_param, "An action param must be provided for a data or param action"
# data is used for things with filenames that could be passed off to a proxy
# in case some display app wants all files to be in the same 'directory',
# data can be forced to param, but not the other way (no filename for other direction)
# get param name from url param name
try:
action_param = display_link.get_param_name_by_url(action_param)
except ValueError as e:
log.debug(e)
return paste.httpexceptions.HTTPNotFound(str(e))
value = display_link.get_param_value(action_param)
assert value, "An invalid parameter name was provided: %s" % action_param
assert value.parameter.viewable, "This parameter is not viewable."
if value.parameter.type == 'data':
try:
if action_param_extra:
assert value.parameter.allow_extra_files_access, "Extra file content requested (%s), but allow_extra_files_access is False." % (action_param_extra)
file_name = os.path.join(value.extra_files_path, action_param_extra)
else:
file_name = value.file_name
content_length = os.path.getsize(file_name)
rval = open(file_name)
except OSError as e:
log.debug("Unable to access requested file in display application: %s", e)
return paste.httpexceptions.HTTPNotFound("This file is no longer available.")
else:
rval = str(value)
content_length = len(rval)
trans.response.set_content_type(value.mime_type(action_param_extra=action_param_extra))
trans.response.headers['Content-Length'] = content_length
return rval
elif app_action is None:
# redirect user to url generated by display link
# Fix for Safari caching display links, which can change if the underlying dataset has an attribute change, e.g. name, metadata, etc
trans.response.headers['Cache-Control'] = ['no-cache', 'max-age=0', 'no-store', 'must-revalidate']
return trans.response.send_redirect(display_link.display_url())
else:
msg.append(('Invalid action provided: %s' % app_action, 'error'))
else:
if app_action is None:
if trans.history != data.history:
msg.append(('You must import this dataset into your current history before you can view it at the desired display application.', 'error'))
else:
refresh = True
msg.append(('Launching this display application required additional datasets to | |
by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_the_location_to_upload_a_file_with_http_info(data_partition_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str data_partition_id: Specifies the data partition to use. This should either be the partition name or crm account ID associated with the partition. (required)
:return: FileSourceLocationResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['data_partition_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_the_location_to_upload_a_file" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'data_partition_id' is set
if self.api_client.client_side_validation and ('data_partition_id' not in params or
params['data_partition_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `data_partition_id` when calling `get_the_location_to_upload_a_file`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'data_partition_id' in params:
header_params['data-partition-id'] = params['data_partition_id'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Bearer'] # noqa: E501
return self.api_client.call_api(
'/api/file/v2/files/uploadURL', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='FileSourceLocationResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def gets_metadata_record_for_the_given_id(self, data_partition_id, id, **kwargs): # noqa: E501
"""Gets metadata record for the given id # noqa: E501
Gets the latest version of File metadata record identified by the given id. **Required roles**: 'users.datalake.viewers' or 'users.datalake.editors' or 'users.datalake.admins' or 'users.datalake.ops'. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.gets_metadata_record_for_the_given_id(data_partition_id, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str data_partition_id: Specifies the data partition to use. This should either be the partition name or crm account ID associated with the partition. (required)
:param str id: File metadata record Id. (required)
:return: FileRecordVersion
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.gets_metadata_record_for_the_given_id_with_http_info(data_partition_id, id, **kwargs) # noqa: E501
else:
(data) = self.gets_metadata_record_for_the_given_id_with_http_info(data_partition_id, id, **kwargs) # noqa: E501
return data
def gets_metadata_record_for_the_given_id_with_http_info(self, data_partition_id, id, **kwargs): # noqa: E501
"""Gets metadata record for the given id # noqa: E501
Gets the latest version of File metadata record identified by the given id. **Required roles**: 'users.datalake.viewers' or 'users.datalake.editors' or 'users.datalake.admins' or 'users.datalake.ops'. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.gets_metadata_record_for_the_given_id_with_http_info(data_partition_id, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str data_partition_id: Specifies the data partition to use. This should either be the partition name or crm account ID associated with the partition. (required)
:param str id: File metadata record Id. (required)
:return: FileRecordVersion
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['data_partition_id', 'id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method gets_metadata_record_for_the_given_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'data_partition_id' is set
if self.api_client.client_side_validation and ('data_partition_id' not in params or
params['data_partition_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `data_partition_id` when calling `gets_metadata_record_for_the_given_id`") # noqa: E501
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in params or
params['id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `id` when calling `gets_metadata_record_for_the_given_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['Id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
if 'data_partition_id' in params:
header_params['data-partition-id'] = params['data_partition_id'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Bearer'] # noqa: E501
return self.api_client.call_api(
'/api/file/v2/files/{Id}/metadata', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='FileRecordVersion', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def gets_url_to_download_the_file_associated_with_the_given_id_(self, data_partition_id, id, **kwargs): # noqa: E501
"""Gets a URL to download the file # noqa: E501
Gets a URL for downloading the file associated with the unique `id`. **Required roles**: 'users.datalake.viewers' or 'users.datalake.editors' or 'users.datalake.admins' or 'users.datalake.ops'. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.gets_url_to_download_the_file_associated_with_the_given_id_(data_partition_id, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str data_partition_id: Specifies the data partition to use. This should either be the partition name or crm account ID associated with the partition. (required)
:param str id: File Metadata record Id. (required)
:param str expiry_time: The Time for which Signed URL to be valid. Accepted Regex patterns are \"^[0-9]+M$\", \"^[0-9]+H$\", \"^[0-9]+D$\" denoting Integer values in Minutes, Hours, Days respectively. In absence of this parameter the URL would be valid for 7 Days.
:return: FileDownloadResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.gets_url_to_download_the_file_associated_with_the_given_id__with_http_info(data_partition_id, id, **kwargs) # noqa: E501
else:
(data) = self.gets_url_to_download_the_file_associated_with_the_given_id__with_http_info(data_partition_id, id, **kwargs) # noqa: E501
return data
def gets_url_to_download_the_file_associated_with_the_given_id__with_http_info(self, data_partition_id, id, **kwargs): # noqa: E501
"""Gets a URL to download the file # noqa: E501
Gets a URL for downloading the file associated with the unique `id`. **Required roles**: 'users.datalake.viewers' or 'users.datalake.editors' or 'users.datalake.admins' or 'users.datalake.ops'. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.gets_url_to_download_the_file_associated_with_the_given_id__with_http_info(data_partition_id, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str data_partition_id: Specifies the data partition to use. This should either be the partition name or crm account ID associated with the partition. (required)
:param str id: File Metadata record Id. (required)
:param str expiry_time: The Time for which Signed URL to be valid. Accepted Regex patterns are \"^[0-9]+M$\", \"^[0-9]+H$\", \"^[0-9]+D$\" denoting Integer values in Minutes, Hours, Days respectively. In absence of this parameter the URL would be valid for 7 Days.
:return: FileDownloadResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['data_partition_id', 'id', 'expiry_time'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method gets_url_to_download_the_file_associated_with_the_given_id_" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'data_partition_id' is set
if self.api_client.client_side_validation and ('data_partition_id' not in params or
params['data_partition_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `data_partition_id` when calling `gets_url_to_download_the_file_associated_with_the_given_id_`") # noqa: E501
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in params or
params['id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `id` when calling `gets_url_to_download_the_file_associated_with_the_given_id_`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['Id'] = params['id'] # noqa: E501
query_params = []
if 'expiry_time' in params:
query_params.append(('expiryTime', params['expiry_time'])) # noqa: E501
header_params = {}
if 'data_partition_id' in params:
header_params['data-partition-id'] = params['data_partition_id'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Bearer'] # noqa: E501
return self.api_client.call_api(
'/api/file/v2/files/{Id}/DownloadURL', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='FileDownloadResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def publish_file_metadata_for_a_file_(self, data_partition_id, **kwargs): # noqa: E501
"""Creates metadata for a file # noqa: E501
This API creates a metadata record for a file that is | |
<filename>xanthosvis/util_functions.py
import base64
import collections
import datetime
import io
import json
from zipfile import ZipFile
import numpy as np
import pandas as pd
import plotly.express as px
import plotly.graph_objs as go
def get_available_years(in_file, non_year_fields=None):
"""Get available years from file. Reads only the header from the file and returns years and months from file.
:params in_file: Processed file as a dataframe
:type in_file: dataframe
:param non_year_fields: list of non-year fields to drop from the file
:type non_year_fields: list
:return: list of years and list of months
"""
# drop non-year fields
if non_year_fields is None:
non_year_fields = ['id']
in_file.drop(columns=non_year_fields, inplace=True)
# Build years and months list
year_list = list()
year_list = [{'label': i if len(i) == 4 else i[0:4] + '-' + i[4:6], 'value': i} for i in in_file.columns]
month_list = list()
if len(in_file.columns[0]) == 6:
month_list = np.unique([i[4:6] for i in in_file.columns])
else:
month_list = None
return year_list, month_list
def get_available_months(months_list):
"""Parse out months from xanthos output format of '198001' which is year 1980 month 1
:params months_list: list of dates parsed from file
:type months_list: list
:return: list of months available for months form control
"""
# Parse list to determine which months it contains
months = list()
if "01" in months_list:
months.append({'label': "January", 'value': "01"})
if "02" in months_list:
months.append({'label': "February", 'value': "02"})
if "03" in months_list:
months.append({'label': "March", 'value': "03"})
if "04" in months_list:
months.append({'label': "April", 'value': "04"})
if "05" in months_list:
months.append({'label': "May", 'value': "05"})
if "06" in months_list:
months.append({'label': "June", 'value': "06"})
if "07" in months_list:
months.append({'label': "July", 'value': "07"})
if "08" in months_list:
months.append({'label': "August", 'value': "08"})
if "09" in months_list:
months.append({'label': "September", 'value': "09"})
if "10" in months_list:
months.append({'label': "October", 'value': "10"})
if "11" in months_list:
months.append({'label': "November", 'value': "11"})
if "12" in months_list:
months.append({'label': "December", 'value': "12"})
return months
def available_through_years(available_year_list, start_year):
"""Return a list of available through/ending years that are >= the start year.
:param available_year_list: List of available years from the input file
:type available_year_list: list
:param start_year: The start year chosen
:type start_year: int
:return: list of available through/ending years
"""
# Construct options for drop down based on input parameters
options = []
for i in available_year_list:
if int(i['value']) >= int(start_year):
options.append(i)
return options
def basin_to_gridcell_dict(df_reference):
"""Generate a dictionary of gridcell id to basin id {grid_id: basin_id}
:param df_reference: Input data reference dataframe containing grid to basin info
:type df_reference: dataframe
:return: dict. {grid_id: basin_id}
"""
# select target fields
df_reference = df_reference[['grid_id', 'basin_id']]
# set index that will become dictionary key
df_reference.set_index('grid_id', inplace=True)
return df_reference.to_dict()['basin_id']
def country_to_gridcell_dict(df_reference):
"""Generate a dictionary of gridcell id to basin id {grid_id: basin_id}
:param df_reference: Input data reference dataframe containing grid to country info
:type df_reference: dataframe
:return: dict. {grid_id: basin_id}
"""
# select target fields
df_reference = df_reference[['grid_id', 'country_id', 'country_name', 'area_hectares']]
# set index that will become dictionary key
df_reference.set_index('grid_id', inplace=True)
return df_reference.to_dict()
def prepare_data(df, df_ref):
"""Process dataframe to add the basin id from reference file.
:param df: Processed dataframe
:type df: dataframe
:param df_ref: Reference data frame from package
:type df_ref: dataframe
:return: dataframe; data with basin id
"""
# get dictionary of grid id to basin id
grid_basin_dict = basin_to_gridcell_dict(df_ref)
# get country mapping
grid_country_dict = country_to_gridcell_dict(df_ref)
# add basin id, country_name, country_id, and area via mappings
df['basin_id'] = df['id'].map(grid_basin_dict)
df['country_name'] = df['id'].map(grid_country_dict['country_name'])
df['country_id'] = df['id'].map(grid_country_dict['country_id'])
df['area'] = df['id'].map(grid_country_dict['area_hectares'])
return df
def data_per_basin(df, statistic, yr_list, df_ref, months, filename, units):
"""Generate a data frame representing data per basin for all years
represented by an input statistic.
:param df: Data with basin id
:type df: dataframe
:param statistic: statistic name from user input
:type statistic: str
:param yr_list: List of years to process
:type yr_list: list
:param df_ref Reference dataframe
:type df_ref dataframe
:param months parameter for filtering by months
:type months list
:param filename Name of input file for parsing
:type filename list
:param units Chosen units for output
:type units str
:return: dataframe; grouped by basin for statistic
"""
if months is not None and len(months) > 0:
yr_list = [c for c in yr_list if c[4:6] in months]
# sum data by basin by year
grp = df.groupby('basin_id').sum()
grp.drop(columns=['id'], inplace=True)
# calculate chosen statistic
if statistic == 'mean':
grp['var'] = grp[yr_list].mean(axis=1)
elif statistic == 'median':
grp['var'] = grp[yr_list].median(axis=1)
elif statistic == 'min':
grp['var'] = grp[yr_list].min(axis=1)
elif statistic == 'max':
grp['var'] = grp[yr_list].max(axis=1)
elif statistic == 'standard deviation':
grp['var'] = grp[yr_list].std(axis=1)
else:
msg = f"The statistic requested '{statistic}' is not a valid option."
raise ValueError(msg)
# Parse out and convert units if necessary
unit_type = get_units_from_name(filename)
if unit_type != units:
if unit_type == 'km³':
grp['var'] = (grp['var'] * 1000000) / (grp['area'] / 100)
if unit_type == 'mm':
grp['var'] = (grp['var'] / 1000000) * (grp['area'] / 100)
# Drop unneeded columns
grp.drop(columns=yr_list, inplace=True)
# Map basin and country fields using df_ref
grp.reset_index(inplace=True)
mapping = dict(df_ref[['basin_id', 'basin_name']].values)
mapping2 = df_ref.groupby('basin_id')[['country_id']].apply(lambda g: g.country_id.unique().tolist()).to_dict()
mapping3 = df_ref.groupby('basin_id')[['country_name']].apply(lambda g: g.country_name.unique().tolist()).to_dict()
grp['basin_name'] = grp.basin_id.map(mapping)
grp['country_id'] = grp.basin_id.map(mapping2)
grp['country_name'] = grp.basin_id.map(mapping3)
return grp
def data_per_cell(df, statistic, yr_list, df_ref, months, area_type, unit_type, units):
"""Generate a data frame representing data per grid cell for years/months chosen
:param df: Data with basin id
:type df: dataframe
:param statistic: statistic name from user input
:type statistic: str
:param yr_list: List of years to process
:type yr_list: list
:param df_ref Reference dataframe
:type df_ref dataframe
:param months months from dropdown
:type months list
:param area_type Type of area (country or basin)
:type area_type str
:param unit_type Units chosen by user
:type unit_type str
:param units Base unit parsed from file
:type units str
:return: dataframe; grouped by area for statistic
"""
if months is not None and len(months) > 0:
yr_list = [c for c in yr_list if c[4:6] in months]
# Set up column list for joins
column_list = list(df)
column_list.remove('id')
column_list.remove('basin_id')
# Sum data by basin by year
df_ref = df_ref.set_index('grid_id')
df = df.join(df_ref, 'id', 'left', 'a1')
# Calculate stat
if statistic == 'mean':
df['var'] = df[yr_list].mean(axis=1)
elif statistic == 'median':
df['var'] = df[yr_list].median(axis=1)
elif statistic == 'min':
df['var'] = df[yr_list].min(axis=1)
elif statistic == 'max':
df['var'] = df[yr_list].max(axis=1)
elif statistic == 'standard deviation':
df['var'] = df[yr_list].std(axis=1)
else:
msg = f"The statistic requested '{statistic}' is not a valid option."
raise ValueError(msg)
# Convert units if user has chosen different from file default
if unit_type != units:
if unit_type == 'km³':
df['var'] = (df['var'] * 1000000) / (df['area'] / 100)
if unit_type == 'mm':
df['var'] = (df['var'] / 1000000) * (df['area'] / 100)
return df
def data_per_country(df, statistic, yr_list, df_ref, months, filename, units):
"""Generate a data frame representing data per country for all years/months
represented by an input statistic.
:param df: Data with basin id
:type df: dataframe
:param statistic: statistic name from user input
:type statistic: str
:param yr_list: List of years to process
:type yr_list: list
:param df_ref Reference dataframe
:type df_ref dataframe
:param months months from dropdown
:type months list
:param filename Name of uploaded file
:type filename list
:param units Chosen unit type
:type units str
:return: dataframe; grouped by country for statistic
"""
if months is not None and len(months) > 0:
yr_list = [c for c in yr_list if c[4:6] in months]
# sum data by basin by year
grp = df.groupby('country_name').sum()
grp.drop(columns=['id'], inplace=True)
# calculate statistic
if statistic == 'mean':
grp['var'] = grp[yr_list].mean(axis=1)
elif statistic == 'median':
grp['var'] = grp[yr_list].median(axis=1)
elif statistic == 'min':
grp['var'] = grp[yr_list].min(axis=1)
elif statistic == 'max':
grp['var'] = grp[yr_list].max(axis=1)
elif statistic == 'standard deviation':
grp['var'] = grp[yr_list].std(axis=1)
else:
msg = f"The statistic requested '{statistic}' is not a valid option."
raise ValueError(msg)
# Drop unneeded columns
grp.drop(columns=yr_list, inplace=True)
# Convert units if necessary
unit_type = get_units_from_name(filename)
if unit_type != units:
if unit_type == 'km³':
grp['var'] = (grp['var'] * 1000000) / (grp['area'] / 100)
if unit_type == 'mm':
grp['var'] = (grp['var'] / 1000000) * (grp['area'] / 100)
# Map country values using df_ref
grp.reset_index(inplace=True)
mapping = dict(df_ref[['country_name', 'country_id']].values)
grp['country_id'] = | |
<filename>neutron_taas/services/taas/drivers/linux/ovs_taas.py<gh_stars>10-100
# Copyright (C) 2015 Ericsson AB
# Copyright (c) 2015 Gigamon
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.agent.common import ovs_lib
from neutron.agent.linux import utils
from neutron.conf.agent import common
from neutron_lib import constants as n_consts
from neutron_taas.services.taas.agents.extensions import taas as taas_base
import neutron_taas.services.taas.drivers.linux.ovs_constants \
as taas_ovs_consts
import neutron_taas.services.taas.drivers.linux.ovs_utils as taas_ovs_utils
from oslo_config import cfg
from oslo_log import log as logging
LOG = logging.getLogger(__name__)
TaaS_DRIVER_NAME = 'Taas OVS driver'
class OVSBridge_tap_extension(ovs_lib.OVSBridge):
def __init__(self, br_name, root_helper):
super(OVSBridge_tap_extension, self).__init__(br_name)
class OvsTaasDriver(taas_base.TaasAgentDriver):
def __init__(self):
super(OvsTaasDriver, self).__init__()
LOG.debug("Initializing Taas OVS Driver")
self.agent_api = None
self.root_helper = common.get_root_helper(cfg.CONF)
def initialize(self):
self.int_br = self.agent_api.request_int_br()
self.tun_br = self.agent_api.request_tun_br()
self.tap_br = OVSBridge_tap_extension('br-tap', self.root_helper)
# Prepare OVS bridges for TaaS
self.setup_ovs_bridges()
# Setup key-value manager for ingress BCMC flows
self.bcmc_kvm = taas_ovs_utils.key_value_mgr(4096)
def periodic_tasks(self, args=None):
#
# Regenerate the flow in br-tun's TAAS_SEND_FLOOD table
# to ensure all existing tunnel ports are included.
#
self.update_tunnel_flood_flow()
def setup_ovs_bridges(self):
#
# br-int : Integration Bridge
# br-tap : Tap Bridge
# br-tun : Tunnel Bridge
#
# Create br-tap
self.tap_br.create()
# Connect br-tap to br-int and br-tun
self.int_br.add_patch_port('patch-int-tap', 'patch-tap-int')
self.tap_br.add_patch_port('patch-tap-int', 'patch-int-tap')
self.tun_br.add_patch_port('patch-tun-tap', 'patch-tap-tun')
self.tap_br.add_patch_port('patch-tap-tun', 'patch-tun-tap')
# Get patch port IDs
patch_tap_int_id = self.tap_br.get_port_ofport('patch-tap-int')
patch_tap_tun_id = self.tap_br.get_port_ofport('patch-tap-tun')
patch_tun_tap_id = self.tun_br.get_port_ofport('patch-tun-tap')
# Purge all existing Taas flows from br-tap and br-tun
self.tap_br.delete_flows(table=0)
self.tap_br.delete_flows(table=taas_ovs_consts.TAAS_RECV_LOC)
self.tap_br.delete_flows(table=taas_ovs_consts.TAAS_RECV_REM)
self.tun_br.delete_flows(table=0,
in_port=patch_tun_tap_id)
self.tun_br.delete_flows(table=taas_ovs_consts.TAAS_SEND_UCAST)
self.tun_br.delete_flows(table=taas_ovs_consts.TAAS_SEND_FLOOD)
self.tun_br.delete_flows(table=taas_ovs_consts.TAAS_CLASSIFY)
self.tun_br.delete_flows(table=taas_ovs_consts.TAAS_DST_CHECK)
self.tun_br.delete_flows(table=taas_ovs_consts.TAAS_SRC_CHECK)
self.tun_br.delete_flows(table=taas_ovs_consts.TAAS_DST_RESPOND)
self.tun_br.delete_flows(table=taas_ovs_consts.TAAS_SRC_RESPOND)
#
# Configure standard TaaS flows in br-tap
#
self.tap_br.add_flow(table=0,
priority=1,
in_port=patch_tap_int_id,
actions="resubmit(,%s)" %
taas_ovs_consts.TAAS_RECV_LOC)
self.tap_br.add_flow(table=0,
priority=1,
in_port=patch_tap_tun_id,
actions="resubmit(,%s)" %
taas_ovs_consts.TAAS_RECV_REM)
self.tap_br.add_flow(table=0,
priority=0,
actions="drop")
self.tap_br.add_flow(table=taas_ovs_consts.TAAS_RECV_LOC,
priority=0,
actions="output:%s" % str(patch_tap_tun_id))
self.tap_br.add_flow(table=taas_ovs_consts.TAAS_RECV_REM,
priority=0,
actions="drop")
#
# Configure standard Taas flows in br-tun
#
self.tun_br.add_flow(table=0,
priority=1,
in_port=patch_tun_tap_id,
actions="resubmit(,%s)" %
taas_ovs_consts.TAAS_SEND_UCAST)
self.tun_br.add_flow(table=taas_ovs_consts.TAAS_SEND_UCAST,
priority=0,
actions="resubmit(,%s)" %
taas_ovs_consts.TAAS_SEND_FLOOD)
flow_action = self._create_tunnel_flood_flow_action()
if flow_action != "":
self.tun_br.add_flow(table=taas_ovs_consts.TAAS_SEND_FLOOD,
priority=0,
actions=flow_action)
self.tun_br.add_flow(table=taas_ovs_consts.TAAS_CLASSIFY,
priority=2,
reg0=0,
actions="resubmit(,%s)" %
taas_ovs_consts.TAAS_DST_CHECK)
self.tun_br.add_flow(table=taas_ovs_consts.TAAS_CLASSIFY,
priority=1,
reg0=1,
actions="resubmit(,%s)" %
taas_ovs_consts.TAAS_DST_CHECK)
self.tun_br.add_flow(table=taas_ovs_consts.TAAS_CLASSIFY,
priority=1,
reg0=2,
actions="resubmit(,%s)" %
taas_ovs_consts.TAAS_SRC_CHECK)
self.tun_br.add_flow(table=taas_ovs_consts.TAAS_DST_CHECK,
priority=0,
actions="drop")
self.tun_br.add_flow(table=taas_ovs_consts.TAAS_SRC_CHECK,
priority=0,
actions="drop")
self.tun_br.add_flow(table=taas_ovs_consts.TAAS_DST_RESPOND,
priority=2,
reg0=0,
actions="output:%s" % str(patch_tun_tap_id))
self.tun_br.add_flow(table=taas_ovs_consts.TAAS_DST_RESPOND,
priority=1,
reg0=1,
actions=(
"output:%s,"
"move:NXM_OF_VLAN_TCI[0..11]->NXM_NX_TUN_ID"
"[0..11],mod_vlan_vid:2,output:in_port" %
str(patch_tun_tap_id)))
self.tun_br.add_flow(table=taas_ovs_consts.TAAS_SRC_RESPOND,
priority=1,
actions=(
"learn(table=%s,hard_timeout=60,"
"priority=1,NXM_OF_VLAN_TCI[0..11],"
"load:NXM_OF_VLAN_TCI[0..11]->NXM_NX_TUN_ID"
"[0..11],load:0->NXM_OF_VLAN_TCI[0..11],"
"output:NXM_OF_IN_PORT[])" %
taas_ovs_consts.TAAS_SEND_UCAST))
return
def consume_api(self, agent_api):
self.agent_api = agent_api
def create_tap_service(self, tap_service):
taas_id = tap_service['taas_id']
port = tap_service['port']
# Get OVS port id for tap service port
ovs_port = self.int_br.get_vif_port_by_id(port['id'])
ovs_port_id = ovs_port.ofport
# Get VLAN id for tap service port
port_dict = self.int_br.get_port_tag_dict()
port_vlan_id = port_dict[ovs_port.port_name]
# Get patch port IDs
patch_int_tap_id = self.int_br.get_port_ofport('patch-int-tap')
patch_tap_int_id = self.tap_br.get_port_ofport('patch-tap-int')
# Add flow(s) in br-int
self.int_br.add_flow(table=0,
priority=25,
in_port=patch_int_tap_id,
dl_vlan=taas_id,
actions="mod_vlan_vid:%s,output:%s" %
(str(port_vlan_id), str(ovs_port_id)))
# Add flow(s) in br-tap
self.tap_br.add_flow(table=taas_ovs_consts.TAAS_RECV_LOC,
priority=1,
dl_vlan=taas_id,
actions="output:in_port")
self.tap_br.add_flow(table=taas_ovs_consts.TAAS_RECV_REM,
priority=1,
dl_vlan=taas_id,
actions="output:%s" % str(patch_tap_int_id))
# Add flow(s) in br-tun
for tunnel_type in n_consts.TUNNEL_NETWORK_TYPES:
self.tun_br.add_flow(table=n_consts.TUN_TABLE[tunnel_type],
priority=1,
tun_id=taas_id,
actions=(
"move:NXM_OF_VLAN_TCI[0..11]->"
"NXM_NX_REG0[0..11],move:NXM_NX_TUN_ID"
"[0..11]->NXM_OF_VLAN_TCI[0..11],"
"resubmit(,%s)" %
taas_ovs_consts.TAAS_CLASSIFY))
self.tun_br.add_flow(table=taas_ovs_consts.TAAS_DST_CHECK,
priority=1,
tun_id=taas_id,
actions="resubmit(,%s)" %
taas_ovs_consts.TAAS_DST_RESPOND)
#
# Disable mac-address learning in the Linux bridge to which
# the OVS port is attached (via the veth pair) if the system
# uses OVSHybridIptablesFirewallDriver (Linux bridge & OVS).
# This will effectively turn the bridge into a hub, ensuring
# that all incoming mirrored traffic reaches the tap interface
# (used for attaching a VM to the bridge) irrespective of the
# destination mac addresses in mirrored packets.
#
# Get hybrid plug info
vif_details = port.get('binding:vif_details')
is_hybrid_plug = vif_details.get('ovs_hybrid_plug')
if is_hybrid_plug:
ovs_port_name = ovs_port.port_name
linux_br_name = ovs_port_name.replace('qvo', 'qbr')
utils.execute(['brctl', 'setageing', linux_br_name, 0],
run_as_root=True, privsep_exec=True)
return
def delete_tap_service(self, tap_service):
taas_id = tap_service['taas_id']
# Get patch port ID
patch_int_tap_id = self.int_br.get_port_ofport('patch-int-tap')
# Delete flow(s) from br-int
self.int_br.delete_flows(table=0,
in_port=patch_int_tap_id,
dl_vlan=taas_id)
# Delete flow(s) from br-tap
self.tap_br.delete_flows(table=taas_ovs_consts.TAAS_RECV_LOC,
dl_vlan=taas_id)
self.tap_br.delete_flows(table=taas_ovs_consts.TAAS_RECV_REM,
dl_vlan=taas_id)
# Delete flow(s) from br-tun
for tunnel_type in n_consts.TUNNEL_NETWORK_TYPES:
self.tun_br.delete_flows(table=n_consts.TUN_TABLE[tunnel_type],
tun_id=taas_id)
self.tun_br.delete_flows(table=taas_ovs_consts.TAAS_DST_CHECK,
tun_id=taas_id)
self.tun_br.delete_flows(table=taas_ovs_consts.TAAS_SRC_CHECK,
tun_id=taas_id)
return
def create_tap_flow(self, tap_flow):
taas_id = tap_flow['taas_id']
port = tap_flow['port']
direction = tap_flow['tap_flow']['direction']
# Get OVS port id for tap flow port
ovs_port = self.int_br.get_vif_port_by_id(port['id'])
ovs_port_id = ovs_port.ofport
# Get patch port ID
patch_int_tap_id = self.int_br.get_port_ofport('patch-int-tap')
# Add flow(s) in br-int
if direction == 'OUT' or direction == 'BOTH':
self.int_br.add_flow(table=0,
priority=20,
in_port=ovs_port_id,
actions="normal,mod_vlan_vid:%s,output:%s" %
(str(taas_id), str(patch_int_tap_id)))
if direction == 'IN' or direction == 'BOTH':
port_mac = tap_flow['port_mac']
#
# Note: The ingress side flow (for unicast traffic) should
# include a check for the 'VLAN id of the Neutron
# network the port belongs to' + 'MAC address of the
# port', to comply with the requirement that port MAC
# addresses are unique only within a Neutron network.
# Unfortunately, at the moment there is no clean way
# to implement such a check, given OVS's handling of
# VLAN tags and Neutron's use of the NORMAL action in
# br-int.
#
# We are therefore temporarily disabling the VLAN id
# check until a mechanism is available to implement
# it correctly. The {broad,multi}cast flow, which is
# also dependent on the VLAN id, has been disabled
# for the same reason.
#
# Get VLAN id for tap flow port
# port_dict = self.int_br.get_port_tag_dict()
# port_vlan_id = port_dict[ovs_port.port_name]
self.int_br.add_flow(table=0,
priority=20,
# dl_vlan=port_vlan_id,
dl_dst=port_mac,
actions="normal,mod_vlan_vid:%s,output:%s" %
(str(taas_id), str(patch_int_tap_id)))
# self._add_update_ingress_bcmc_flow(port_vlan_id,
# taas_id,
# patch_int_tap_id)
# Add flow(s) in br-tun
for tunnel_type in n_consts.TUNNEL_NETWORK_TYPES:
self.tun_br.add_flow(table=n_consts.TUN_TABLE[tunnel_type],
priority=1,
tun_id=taas_id,
actions=(
"move:NXM_OF_VLAN_TCI[0..11]->"
"NXM_NX_REG0[0..11],move:NXM_NX_TUN_ID"
"[0..11]->NXM_OF_VLAN_TCI[0..11],"
"resubmit(,%s)" %
taas_ovs_consts.TAAS_CLASSIFY))
self.tun_br.add_flow(table=taas_ovs_consts.TAAS_SRC_CHECK,
priority=1,
tun_id=taas_id,
actions="resubmit(,%s)" %
taas_ovs_consts.TAAS_SRC_RESPOND)
return
def delete_tap_flow(self, tap_flow):
port = tap_flow['port']
direction = tap_flow['tap_flow']['direction']
# Get OVS port id for tap flow port
ovs_port = self.int_br.get_vif_port_by_id(port['id'])
ovs_port_id = ovs_port.ofport
# Delete flow(s) from br-int
if direction == 'OUT' or direction == 'BOTH':
self.int_br.delete_flows(table=0,
in_port=ovs_port_id)
if direction == 'IN' or direction == 'BOTH':
port_mac = tap_flow['port_mac']
#
# The VLAN id related checks have been temporarily disabled.
# Please see comment in create_tap_flow() for details.
#
# taas_id = tap_flow['taas_id']
# Get VLAN id for tap flow port
# port_dict = self.int_br.get_port_tag_dict()
# port_vlan_id = port_dict[ovs_port.port_name]
# Get patch port ID
# patch_int_tap_id = self.int_br.get_port_ofport('patch-int-tap')
self.int_br.delete_flows(table=0,
# dl_vlan=port_vlan_id,
dl_dst=port_mac)
# self._del_update_ingress_bcmc_flow(port_vlan_id,
# taas_id,
# patch_int_tap_id)
return
def update_tunnel_flood_flow(self):
flow_action = self._create_tunnel_flood_flow_action()
if flow_action != "":
self.tun_br.mod_flow(table=taas_ovs_consts.TAAS_SEND_FLOOD,
actions=flow_action)
def _create_tunnel_flood_flow_action(self):
args = ["ovs-vsctl", "list-ports", "br-tun"]
res = utils.execute(args, run_as_root=True, privsep_exec=True)
port_name_list = res.splitlines()
flow_action = ("move:NXM_OF_VLAN_TCI[0..11]->NXM_NX_TUN_ID[0..11],"
"mod_vlan_vid:1")
tunnel_ports_exist = False
for port_name in port_name_list:
if (port_name != 'patch-int') and (port_name != 'patch-tun-tap'):
flow_action += (",output:%d" %
self.tun_br.get_port_ofport(port_name))
tunnel_ports_exist = True
if tunnel_ports_exist:
return flow_action
else:
return ""
def _create_ingress_bcmc_flow_action(self, taas_id_list, out_port_id):
flow_action = "normal"
for taas_id in taas_id_list:
flow_action += (",mod_vlan_vid:%d,output:%d" %
(taas_id, out_port_id))
return flow_action
#
# Adds or updates a special flow in br-int to mirror (duplicate and
# redirect to 'out_port_id') all ingress broadcast/multicast traffic,
# associated with a VLAN, to possibly multiple tap service instances.
#
def _add_update_ingress_bcmc_flow(self, vlan_id, taas_id, out_port_id):
# Add a tap service instance affiliation with VLAN
self.bcmc_kvm.affiliate(vlan_id, taas_id)
# Find all tap service instances affiliated with VLAN
taas_id_list = self.bcmc_kvm.list_affiliations(vlan_id)
#
# Add/update flow to mirror ingress BCMC traffic, associated
# with VLAN, to all affiliated tap-service instances.
#
flow_action = self._create_ingress_bcmc_flow_action(taas_id_list,
out_port_id)
self.int_br.add_flow(table=0,
priority=20,
dl_vlan=vlan_id,
dl_dst="01:00:00:00:00:00/01:00:00:00:00:00",
actions=flow_action)
return
#
# Removes or updates a special flow in br-int to mirror (duplicate
# and redirect to 'out_port_id') all ingress broadcast/multicast
# traffic, associated with a VLAN, to possibly multiple tap-service
# instances.
#
def _del_update_ingress_bcmc_flow(self, vlan_id, taas_id, out_port_id):
# Remove a tap-service instance affiliation with VLAN
self.bcmc_kvm.unaffiliate(vlan_id, taas_id)
# Find all tap-service instances affiliated with VLAN
taas_id_list = self.bcmc_kvm.list_affiliations(vlan_id)
#
# If there are tap service instances affiliated with VLAN, update
# the flow to mirror ingress BCMC traffic, associated with VLAN,
# to all of them. Otherwise, remove the flow.
#
if taas_id_list:
flow_action = self._create_ingress_bcmc_flow_action(taas_id_list,
out_port_id)
self.int_br.add_flow(table=0,
priority=20,
dl_vlan=vlan_id,
dl_dst="01:00:00:00:00:00/01:00:00:00:00:00",
actions=flow_action)
else:
self.int_br.delete_flows(table=0,
| |
a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(DeletedEntityResponse, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'id_type_scope',
'id_type_code',
'code',
'property_keys',
'effective_at'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_person_properties" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'property_keys' is set
if self.api_client.client_side_validation and ('property_keys' not in local_var_params or # noqa: E501
local_var_params['property_keys'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `property_keys` when calling `delete_person_properties`") # noqa: E501
if self.api_client.client_side_validation and ('id_type_scope' in local_var_params and # noqa: E501
len(local_var_params['id_type_scope']) > 64): # noqa: E501
raise ApiValueError("Invalid value for parameter `id_type_scope` when calling `delete_person_properties`, length must be less than or equal to `64`") # noqa: E501
if self.api_client.client_side_validation and ('id_type_scope' in local_var_params and # noqa: E501
len(local_var_params['id_type_scope']) < 1): # noqa: E501
raise ApiValueError("Invalid value for parameter `id_type_scope` when calling `delete_person_properties`, length must be greater than or equal to `1`") # noqa: E501
if self.api_client.client_side_validation and 'id_type_scope' in local_var_params and not re.search(r'^[a-zA-Z0-9\-_]+$', local_var_params['id_type_scope']): # noqa: E501
raise ApiValueError("Invalid value for parameter `id_type_scope` when calling `delete_person_properties`, must conform to the pattern `/^[a-zA-Z0-9\-_]+$/`") # noqa: E501
if self.api_client.client_side_validation and ('id_type_code' in local_var_params and # noqa: E501
len(local_var_params['id_type_code']) > 64): # noqa: E501
raise ApiValueError("Invalid value for parameter `id_type_code` when calling `delete_person_properties`, length must be less than or equal to `64`") # noqa: E501
if self.api_client.client_side_validation and ('id_type_code' in local_var_params and # noqa: E501
len(local_var_params['id_type_code']) < 1): # noqa: E501
raise ApiValueError("Invalid value for parameter `id_type_code` when calling `delete_person_properties`, length must be greater than or equal to `1`") # noqa: E501
if self.api_client.client_side_validation and 'id_type_code' in local_var_params and not re.search(r'^[a-zA-Z0-9\-_]+$', local_var_params['id_type_code']): # noqa: E501
raise ApiValueError("Invalid value for parameter `id_type_code` when calling `delete_person_properties`, must conform to the pattern `/^[a-zA-Z0-9\-_]+$/`") # noqa: E501
if self.api_client.client_side_validation and ('code' in local_var_params and # noqa: E501
len(local_var_params['code']) > 64): # noqa: E501
raise ApiValueError("Invalid value for parameter `code` when calling `delete_person_properties`, length must be less than or equal to `64`") # noqa: E501
if self.api_client.client_side_validation and ('code' in local_var_params and # noqa: E501
len(local_var_params['code']) < 1): # noqa: E501
raise ApiValueError("Invalid value for parameter `code` when calling `delete_person_properties`, length must be greater than or equal to `1`") # noqa: E501
if self.api_client.client_side_validation and 'code' in local_var_params and not re.search(r'^[a-zA-Z0-9\-_]+$', local_var_params['code']): # noqa: E501
raise ApiValueError("Invalid value for parameter `code` when calling `delete_person_properties`, must conform to the pattern `/^[a-zA-Z0-9\-_]+$/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id_type_scope' in local_var_params:
path_params['idTypeScope'] = local_var_params['id_type_scope'] # noqa: E501
if 'id_type_code' in local_var_params:
path_params['idTypeCode'] = local_var_params['id_type_code'] # noqa: E501
if 'code' in local_var_params:
path_params['code'] = local_var_params['code'] # noqa: E501
query_params = []
if 'property_keys' in local_var_params and local_var_params['property_keys'] is not None: # noqa: E501
query_params.append(('propertyKeys', local_var_params['property_keys'])) # noqa: E501
collection_formats['propertyKeys'] = 'multi' # noqa: E501
if 'effective_at' in local_var_params and local_var_params['effective_at'] is not None: # noqa: E501
query_params.append(('effectiveAt', local_var_params['effective_at'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['text/plain', 'application/json', 'text/json']) # noqa: E501
header_params['Accept-Encoding'] = "gzip, deflate, br"
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
response_types_map = {
200: "DeletedEntityResponse",
400: "LusidValidationProblemDetails",
}
return self.api_client.call_api(
'/api/persons/{idTypeScope}/{idTypeCode}/{code}/properties', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def get_all_person_access_metadata(self, id_type_scope, id_type_code, code, **kwargs): # noqa: E501
"""[EXPERIMENTAL] GetAllPersonAccessMetadata: Get Access Metadata rules for a Person # noqa: E501
Pass the Scope and Code of the Person identifier along with the person code parameter to retrieve the associated Access Metadata # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_all_person_access_metadata(id_type_scope, id_type_code, code, async_req=True)
>>> result = thread.get()
:param id_type_scope: Scope of the person identifier. (required)
:type id_type_scope: str
:param id_type_code: Code of the person identifier. (required)
:type id_type_code: str
:param code: Code of the person under specified identifier type's scope and code. (required)
:type code: str
:param effective_at: The effectiveAt datetime at which to retrieve the Access Metadata
:type effective_at: str
:param as_at: The asAt datetime at which to retrieve the Access Metadata
:type as_at: datetime
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: dict(str, list[AccessMetadataValue])
"""
kwargs['_return_http_data_only'] = True
return self.get_all_person_access_metadata_with_http_info(id_type_scope, id_type_code, code, **kwargs) # noqa: E501
def get_all_person_access_metadata_with_http_info(self, id_type_scope, id_type_code, code, **kwargs): # noqa: E501
"""[EXPERIMENTAL] GetAllPersonAccessMetadata: Get Access Metadata rules for a Person # noqa: E501
Pass the Scope and Code of the Person identifier along with the person code parameter to retrieve the associated Access Metadata # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_all_person_access_metadata_with_http_info(id_type_scope, id_type_code, code, async_req=True)
>>> result = thread.get()
:param id_type_scope: Scope of the person identifier. (required)
:type id_type_scope: str
:param id_type_code: Code of the person identifier. (required)
:type id_type_code: str
:param code: Code of the person under specified identifier type's scope and code. (required)
:type code: str
:param effective_at: The effectiveAt datetime at which to retrieve the Access Metadata
:type effective_at: str
:param as_at: The asAt datetime at which to retrieve the Access Metadata
:type as_at: datetime
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(dict(str, list[AccessMetadataValue]), status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'id_type_scope',
'id_type_code',
'code',
'effective_at',
'as_at'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_all_person_access_metadata" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id_type_scope' is set
if self.api_client.client_side_validation and ('id_type_scope' not in local_var_params or # noqa: E501
local_var_params['id_type_scope'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `id_type_scope` when calling `get_all_person_access_metadata`") # noqa: E501
# verify the required parameter 'id_type_code' is set
if self.api_client.client_side_validation and ('id_type_code' not in local_var_params or # noqa: E501
local_var_params['id_type_code'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `id_type_code` when calling `get_all_person_access_metadata`") # noqa: E501
# verify the required parameter 'code' is set
if self.api_client.client_side_validation and ('code' not in local_var_params or # noqa: E501
local_var_params['code'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `code` when calling `get_all_person_access_metadata`") # noqa: E501
if self.api_client.client_side_validation and ('code' in local_var_params and # noqa: E501
| |
<gh_stars>1-10
# coding: utf-8
# ----------------------------------------------------------------------------
# <copyright company="Aspose" file="ai_name_api.py">
# Copyright (c) 2018-2020 Aspose Pty Ltd. All rights reserved.
# </copyright>
# <summary>
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# </summary>
# ----------------------------------------------------------------------------
from __future__ import absolute_import
from AsposeEmailCloudSdk.api.api_base import ApiBase
from AsposeEmailCloudSdk.models import *
class AiNameApi(ApiBase):
"""
Aspose.Email Cloud API. AiNameApi operations.
"""
def __init__(self, api_client):
super(AiNameApi, self).__init__(api_client)
def complete(self, request: AiNameCompleteRequest) -> AiNameWeightedVariants:
"""The call proposes k most probable names for given starting characters.
:param request: AiNameCompleteRequest object with parameters
:type request: AiNameCompleteRequest
:return: AiNameWeightedVariants
"""
# verify the required parameter 'name' is set
if request.name is None:
raise ValueError("Missing the required parameter `name` when calling `complete`")
collection_formats = {}
path = '/email/AiName/complete'
path_params = {}
query_params = []
path_parameter = '{' + self._lowercase_first_letter('name') + '}'
if path_parameter in path:
path = path.replace(path_parameter, request.name if request.name is not None else '')
else:
if request.name is not None:
query_params.append((self._lowercase_first_letter('name'), request.name))
path_parameter = '{' + self._lowercase_first_letter('language') + '}'
if path_parameter in path:
path = path.replace(path_parameter, request.language if request.language is not None else '')
else:
if request.language is not None:
query_params.append((self._lowercase_first_letter('language'), request.language))
path_parameter = '{' + self._lowercase_first_letter('location') + '}'
if path_parameter in path:
path = path.replace(path_parameter, request.location if request.location is not None else '')
else:
if request.location is not None:
query_params.append((self._lowercase_first_letter('location'), request.location))
path_parameter = '{' + self._lowercase_first_letter('encoding') + '}'
if path_parameter in path:
path = path.replace(path_parameter, request.encoding if request.encoding is not None else '')
else:
if request.encoding is not None:
query_params.append((self._lowercase_first_letter('encoding'), request.encoding))
path_parameter = '{' + self._lowercase_first_letter('script') + '}'
if path_parameter in path:
path = path.replace(path_parameter, request.script if request.script is not None else '')
else:
if request.script is not None:
query_params.append((self._lowercase_first_letter('script'), request.script))
path_parameter = '{' + self._lowercase_first_letter('style') + '}'
if path_parameter in path:
path = path.replace(path_parameter, request.style if request.style is not None else '')
else:
if request.style is not None:
query_params.append((self._lowercase_first_letter('style'), request.style))
form_params = []
local_var_files = []
header_params = {}
# HTTP header `Accept`
header_params['Accept'] = self._select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self._select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = ['JWT']
http_request_object = HttpRequest(path, path_params, query_params, header_params, form_params, None, local_var_files,
collection_formats, auth_settings)
return self._make_request(http_request_object, 'GET', 'AiNameWeightedVariants')
def expand(self, request: AiNameExpandRequest) -> AiNameWeightedVariants:
"""Expands a person's name into a list of possible alternatives using options for expanding instructions.
:param request: AiNameExpandRequest object with parameters
:type request: AiNameExpandRequest
:return: AiNameWeightedVariants
"""
# verify the required parameter 'name' is set
if request.name is None:
raise ValueError("Missing the required parameter `name` when calling `expand`")
collection_formats = {}
path = '/email/AiName/expand'
path_params = {}
query_params = []
path_parameter = '{' + self._lowercase_first_letter('name') + '}'
if path_parameter in path:
path = path.replace(path_parameter, request.name if request.name is not None else '')
else:
if request.name is not None:
query_params.append((self._lowercase_first_letter('name'), request.name))
path_parameter = '{' + self._lowercase_first_letter('language') + '}'
if path_parameter in path:
path = path.replace(path_parameter, request.language if request.language is not None else '')
else:
if request.language is not None:
query_params.append((self._lowercase_first_letter('language'), request.language))
path_parameter = '{' + self._lowercase_first_letter('location') + '}'
if path_parameter in path:
path = path.replace(path_parameter, request.location if request.location is not None else '')
else:
if request.location is not None:
query_params.append((self._lowercase_first_letter('location'), request.location))
path_parameter = '{' + self._lowercase_first_letter('encoding') + '}'
if path_parameter in path:
path = path.replace(path_parameter, request.encoding if request.encoding is not None else '')
else:
if request.encoding is not None:
query_params.append((self._lowercase_first_letter('encoding'), request.encoding))
path_parameter = '{' + self._lowercase_first_letter('script') + '}'
if path_parameter in path:
path = path.replace(path_parameter, request.script if request.script is not None else '')
else:
if request.script is not None:
query_params.append((self._lowercase_first_letter('script'), request.script))
path_parameter = '{' + self._lowercase_first_letter('style') + '}'
if path_parameter in path:
path = path.replace(path_parameter, request.style if request.style is not None else '')
else:
if request.style is not None:
query_params.append((self._lowercase_first_letter('style'), request.style))
form_params = []
local_var_files = []
header_params = {}
# HTTP header `Accept`
header_params['Accept'] = self._select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self._select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = ['JWT']
http_request_object = HttpRequest(path, path_params, query_params, header_params, form_params, None, local_var_files,
collection_formats, auth_settings)
return self._make_request(http_request_object, 'GET', 'AiNameWeightedVariants')
def expand_parsed(self, request: AiNameParsedRequest) -> AiNameWeightedVariants:
"""Expands a person's parsed name into a list of possible alternatives using options for expanding instructions.
:param request: Parsed name with options.
:type request: AiNameParsedRequest
:return: AiNameWeightedVariants
"""
# verify the required parameter 'request' is set
if request is None:
raise ValueError("Missing the required parameter `request` when calling `expand_parsed`")
collection_formats = {}
path = '/email/AiName/expand-parsed'
header_params = {}
# HTTP header `Accept`
header_params['Accept'] = self._select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self._select_header_content_type(
['application/json'])
body_params = request
# Authentication setting
auth_settings = ['JWT']
http_request_object = HttpRequest(path, None, None, header_params, None, body_params, None, None, auth_settings)
return self._make_request(http_request_object, 'PUT', 'AiNameWeightedVariants')
def format(self, request: AiNameFormatRequest) -> AiNameFormatted:
"""Formats a person's name in correct case and name order using options for formatting instructions.
:param request: AiNameFormatRequest object with parameters
:type request: AiNameFormatRequest
:return: AiNameFormatted
"""
# verify the required parameter 'name' is set
if request.name is None:
raise ValueError("Missing the required parameter `name` when calling `format`")
collection_formats = {}
path = '/email/AiName/format'
path_params = {}
query_params = []
path_parameter = '{' + self._lowercase_first_letter('name') + '}'
if path_parameter in path:
path = path.replace(path_parameter, request.name if request.name is not None else '')
else:
if request.name is not None:
query_params.append((self._lowercase_first_letter('name'), request.name))
path_parameter = '{' + self._lowercase_first_letter('language') + '}'
if path_parameter in path:
path = path.replace(path_parameter, request.language if request.language is not None else '')
else:
if request.language is not None:
query_params.append((self._lowercase_first_letter('language'), request.language))
path_parameter = '{' + self._lowercase_first_letter('location') + '}'
if path_parameter in path:
path = path.replace(path_parameter, request.location if request.location is not None else '')
else:
if request.location is not None:
query_params.append((self._lowercase_first_letter('location'), request.location))
path_parameter = '{' + self._lowercase_first_letter('encoding') + '}'
if path_parameter in path:
path = path.replace(path_parameter, request.encoding if request.encoding is not None else '')
else:
if request.encoding is not None:
query_params.append((self._lowercase_first_letter('encoding'), request.encoding))
path_parameter = '{' + self._lowercase_first_letter('script') + '}'
if path_parameter in path:
path = path.replace(path_parameter, request.script if request.script is not None else '')
else:
if request.script is not None:
query_params.append((self._lowercase_first_letter('script'), request.script))
path_parameter = '{' + self._lowercase_first_letter('format') + '}'
if path_parameter in path:
path = path.replace(path_parameter, request.format if request.format is not None else '')
else:
if request.format is not None:
query_params.append((self._lowercase_first_letter('format'), request.format))
path_parameter = '{' + self._lowercase_first_letter('style') + '}'
if path_parameter in path:
path = path.replace(path_parameter, request.style if request.style is not None else '')
else:
if request.style is not None:
query_params.append((self._lowercase_first_letter('style'), request.style))
form_params = []
local_var_files = []
header_params = {}
# HTTP header `Accept`
header_params['Accept'] = self._select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self._select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = ['JWT']
http_request_object = HttpRequest(path, path_params, query_params, header_params, form_params, None, local_var_files,
collection_formats, auth_settings)
return self._make_request(http_request_object, 'GET', 'AiNameFormatted')
def format_parsed(self, request: AiNameParsedRequest) -> AiNameFormatted:
"""Formats a person's parsed name in correct case and name order using options for formatting instructions.
:param request: Parsed name with options.
:type request: AiNameParsedRequest
:return: AiNameFormatted
"""
# verify the required parameter 'request' is set
if request is None:
raise ValueError("Missing the required parameter `request` when calling `format_parsed`")
collection_formats = {}
path = '/email/AiName/format-parsed'
header_params = {}
# | |
sample_ds['term'].values
model_lookup = _get_model_lookup(sample_ds, indicator_var=indicator_var)
indicator_lookup = {model_lookup[k]: k for k in model_lookup}
model_names = np.array(list(indicator_lookup.keys()))
n_observed_models = len(model_names)
indicator_vals = {}
for i in range(n_indicators):
ki_vals = np.zeros((n_observed_models,), dtype=int)
for j, m in enumerate(model_names):
ki_vals[j] = indicator_lookup[m][i]
term_name = term_names[i]
indicator_vals[term_name] = ki_vals
return indicator_vals
def _calculate_model_logp(sample_ds, sample_stats_ds, indicator_var='k'):
"""Calculate logp for each model."""
n_chains = sample_ds.sizes['chain']
model_lookup = _get_model_lookup(sample_ds, indicator_var=indicator_var)
indicator_lookup = {model_lookup[k]: k for k in model_lookup}
model_names = np.array(list(indicator_lookup.keys()))
n_observed_models = len(model_names)
model_lp = np.zeros((n_observed_models,))
model_counts = np.zeros((n_observed_models,), dtype=int)
for j in range(n_chains):
chain_k = sample_ds[indicator_var].isel(chain=j).data
chain_lp = sample_stats_ds['lp'].isel(chain=j).data
for i, m in enumerate(indicator_lookup):
model_k = indicator_lookup[m]
mask = np.all(chain_k == model_k, axis=1)
model_lp[i] += np.sum(chain_lp[mask])
model_counts[i] += np.sum(mask)
model_lp = model_lp / model_counts
return model_lp
def _calculate_model_posterior_probabilities(sample_ds, indicator_var='k'):
"""Calculate posterior probability for each model."""
n_chains = sample_ds.sizes['chain']
model_lookup = _get_model_lookup(sample_ds, indicator_var=indicator_var)
indicator_lookup = {model_lookup[k]: k for k in model_lookup}
model_names = np.array(list(indicator_lookup.keys()))
n_observed_models = len(model_names)
model_counts = np.zeros((n_observed_models,), dtype=int)
n_samples = 0
for j in range(n_chains):
chain_k = sample_ds[indicator_var].isel(chain=j).data
chain_k = [tuple(k) for k in chain_k]
n_samples += len(chain_k)
for i, m in enumerate(model_names):
km = indicator_lookup[m]
model_counts[i] += sum([k == km for k in chain_k])
return model_counts / n_samples
def stepwise_model_samples_summary(inference_data, show_indicators=True,
posterior=True, sort_by_probs=True,
indicator_var='k'):
"""Return summary of sampled models."""
if posterior:
sample_ds = inference_data.posterior
else:
sample_ds = inference_data.prior
model_lookup = _get_model_lookup(sample_ds, indicator_var=indicator_var)
indicator_lookup = {model_lookup[k]: k for k in model_lookup}
model_names = np.array(list(indicator_lookup.keys()))
if show_indicators:
indicator_vals = _get_model_indicator_arrays(
sample_ds, indicator_var=indicator_var)
else:
indicator_vals = None
model_probs = _calculate_model_posterior_probabilities(
sample_ds, indicator_var=indicator_var)
model_lp = _calculate_model_logp(
sample_ds, inference_data.sample_stats,
indicator_var=indicator_var)
if sort_by_probs:
model_order = np.argsort(-model_probs)
model_names = model_names[model_order]
model_probs = model_probs[model_order]
model_lp = model_lp[model_order]
if show_indicators:
for ki in indicator_vals:
indicator_vals[ki] = indicator_vals[ki][model_order]
data_vars = collections.OrderedDict({'model': model_names})
if show_indicators:
for ki in indicator_vals:
data_vars[ki] = indicator_vals[ki]
data_vars['lp__'] = model_lp
data_vars['p'] = model_probs
return pd.DataFrame(data_vars)
class StepwiseBayesRegression():
"""Bayes regression model with conjugate priors and fixed SNR.
Parameters
----------
a_tau : float, default: 1.0
Shape parameter of the gamma prior on the outcome precision.
b_tau : float, default: 10.0
Scale parameter of the gamma prior on the outcome precision.
nu_sq : float, default: 1.0
Signal-to-noise parameter used for normal prior on
the regression coefficients.
"""
def __init__(self, a_tau=1.0, b_tau=10.0, nu_sq=1.0,
force_intercept=True):
self.a_tau, self.b_tau = _check_gamma_hyperparameters(a_tau, b_tau)
self.nu_sq = _check_snr_parameter(nu_sq)
self.allow_exchanges = True
self.force_intercept = force_intercept
def sample_structures_prior(self, formula_like, data=None,
max_terms=None, n_chains=4, n_iter=1000,
n_jobs=-1, generate_prior_predictive=False,
random_state=None):
"""Sample structures for model from uniform priors.
Parameters
----------
formula_like : object
Formula-like object describing the largest possible model
to be allowed, e.g., a string of the form
"y ~ x1 + x2 + ... + xN", where y is the outcome variable
to be modelled and x1, x2, ..., xN are all of the possible
predictors considered for inclusion in the model.
data : dict-like
Data to be used in constructing the model.
n_chains : int, default: 4
Number of chains.
n_iter : int, default: 1000
Number of samples per chain.
max_terms : int
Maximum number of terms allowed in model.
random_state : integer, RandomState or None
If an integer, random_state is the seed used by the
random number generator. If a RandomState instance,
random_state is the random number generator. If None,
the random number generator is the RandomState instance
used by `np.random`.
Returns
-------
draws : dict
Dictionary containing the results of sampling.
"""
rng = check_random_state(random_state)
_, outcome_names, _, _ = \
_get_outcome_and_optional_terms(
formula_like, data=data,
force_intercept=self.force_intercept)
constant_data_names = None
if data is not None:
constant_data_names = [n for n in data
if n not in outcome_names]
n_chains = rdu.check_number_of_chains(n_chains)
n_iter = rdu.check_number_of_iterations(n_iter)
if n_jobs is None:
n_jobs = -1
elif n_chains == 1:
n_jobs = 1
random_seeds = rng.choice(1000000 * n_chains,
size=n_chains, replace=False)
def _sample(seed):
return _sample_prior_full(
formula_like, data=data,
a_tau=self.a_tau, b_tau=self.b_tau, nu_sq=self.nu_sq,
max_terms=max_terms, n_iter=n_iter,
force_intercept=self.force_intercept,
generate_prior_predictive=generate_prior_predictive,
random_state=seed)
samples = Parallel(n_jobs=n_jobs)(
delayed(_sample)(seed) for seed in random_seeds)
structure_samples = [sample[0] for sample in samples]
prior = {'samples': structure_samples,
'n_chains': len(samples),
'n_iter': n_iter,
'n_save': [n_iter] * n_chains,
'random_seeds': random_seeds}
prior_predictive = None
if generate_prior_predictive:
generated_samples = [sample[1] for sample in samples]
prior_predictive = {
'samples': generated_samples,
'n_chains': len(samples),
'n_iter': n_iter,
'n_save': [n_iter] * n_chains,
'random_seeds': random_seeds}
return convert_samples_dict_to_inference_data(
prior=prior,
prior_predictive=prior_predictive,
observed_data=data, constant_data=data, save_warmup=True,
observed_data_names=outcome_names,
constant_data_names=constant_data_names)
def sample_parameters_prior(self, formula_like, data=None, n_chains=4,
n_iter=1000, n_jobs=-1, random_state=None,
generate_prior_predictive=False):
"""Sample parameters for model from conditional priors.
Parameters
----------
formula_like : object
Formula-like object describing the terms in the given model
(i.e., containing only those terms present in the model, not
the full set of possible predictors).
data : dict-like
Data to be used in constructing the model.
n_chains : int, default: 4
Number of chains.
n_iter : int, default: 1000
Number of samples per chain.
random_state : integer, RandomState or None
If an integer, random_state is the seed used by the
random number generator. If a RandomState instance,
random_state is the random number generator. If None,
the random number generator is the RandomState instance
used by `np.random`.
Returns
-------
draws : dict
Dictionary containing the results of sampling.
"""
rng = check_random_state(random_state)
n_chains = rdu.check_number_of_chains(n_chains)
n_iter = rdu.check_number_of_iterations(n_iter)
if n_jobs is None:
n_jobs = -1
elif n_chains == 1:
n_jobs = 1
y, X = patsy.dmatrices(formula_like, data=data)
y, X = _check_design_matrices(y, X)
outcome_names = y.design_info.column_names
constant_data_names = None
if data is not None:
constant_data_names = [n for n in data if n not in outcome_names]
random_seeds = rng.choice(1000000 * n_chains,
size=n_chains, replace=False)
def _sample(seed):
return _sample_prior_fixed_model(
formula_like, data=data, a_tau=self.a_tau, b_tau=self.b_tau,
nu_sq=self.nu_sq, n_iter=n_iter,
generate_prior_predictive=generate_prior_predictive,
random_state=seed)
samples = Parallel(n_jobs=n_jobs)(
delayed(_sample)(seed) for seed in random_seeds)
parameter_samples = [sample[0] for sample in samples]
prior = {'samples': parameter_samples,
'n_chains': len(samples),
'n_iter': n_iter,
'n_save': [n_iter] * n_chains,
'random_seeds': random_seeds}
prior_predictive = None
if generate_prior_predictive:
generated_samples = [sample[1] for sample in samples]
prior_predictive = {
'samples': generated_samples,
'n_chains': len(samples),
'n_iter': n_iter,
'n_save': [n_iter] * n_chains,
'random_seeds': random_seeds}
return convert_samples_dict_to_inference_data(
prior=prior,
prior_predictive=prior_predictive,
observed_data=data, constant_data=data, save_warmup=True,
observed_data_names=outcome_names,
constant_data_names=constant_data_names)
def sample_structures_posterior(self, formula_like, data=None,
max_terms=None,
n_chains=4, n_iter=1000, thin=1,
warmup=None, n_jobs=-1,
verbose=False, restart_file=None,
init='random', random_state=None,
generate_posterior_predictive=False):
"""Sample parameters for model from conditional posteriors.
Parameters
----------
formula_like : object
Formula-like object describing the terms in the given model
(i.e., containing only those terms present in the model, not
the full set of possible predictors).
data : dict-like
Data to be used in constructing the model.
n_chains : int, default: 4
Number of chains.
n_iter : int, default: 1000
Number of samples per chain.
thin : int, default: 1
Interval used for thinning samples.
warmup : int, optional
Number of warm-up samples.
random_state : integer, RandomState or None
If an integer, random_state is the seed used by the
random number generator. If a RandomState instance,
random_state is the random number generator. If None,
the random number generator is the RandomState instance
used by `np.random`.
Returns
-------
draws : dict
Dictionary containing the results of sampling.
"""
return _sample_posterior_full(
formula_like, data=data,
a_tau=self.a_tau, b_tau=self.b_tau, nu_sq=self.nu_sq,
n_chains=n_chains, n_iter=n_iter, warmup=warmup, thin=thin,
verbose=verbose, n_jobs=n_jobs, max_terms=max_terms,
restart_file=restart_file, init=init,
allow_exchanges=self.allow_exchanges,
force_intercept=self.force_intercept,
generate_posterior_predictive=generate_posterior_predictive,
random_state=random_state)
def sample_parameters_posterior(self, formula_like, data=None,
n_chains=4, n_iter=1000, thin=1,
warmup=None, n_jobs=-1,
random_state=None,
generate_posterior_predictive=False):
"""Sample parameters for model from conditional posteriors.
Parameters
----------
formula_like : object
Formula-like object describing the terms in the given model
(i.e., containing only those terms present in the model, not
the full set of possible predictors).
data : dict-like
Data to be used in constructing the model.
n_chains : int, default: 4
Number of chains.
n_iter : int, default: 1000
Number of samples per chain.
thin : int, default: 1
Interval used for thinning samples.
warmup : int, optional
Number of warm-up samples.
random_state : integer, RandomState or None
If an integer, random_state is the seed used by the
random number generator. If a RandomState instance,
random_state is the random number generator. If None,
the random number generator is the RandomState instance
used by `np.random`.
Returns
-------
draws : dict
Dictionary containing the results of sampling.
"""
rng = check_random_state(random_state)
n_chains = rdu.check_number_of_chains(n_chains)
n_iter = rdu.check_number_of_iterations(n_iter)
if warmup is None:
| |
<reponame>casparschwa/beaconrunner
from eth2spec.config.config_util import apply_constants_config
from typing import (
Any, Callable, Dict, Set, Sequence, Tuple, Optional, TypeVar
)
from dataclasses import (
dataclass,
field,
)
from lru import LRU
from eth2spec.utils.ssz.ssz_impl import hash_tree_root
from eth2spec.utils.ssz.ssz_typing import (
View, boolean, Container, List, Vector, uint64,
Bytes1, Bytes4, Bytes32, Bytes48, Bytes96, Bitlist, Bitvector,
)
from eth2spec.utils import bls
bls.bls_active = False
from eth2spec.utils.hash_function import hash
SSZObject = TypeVar('SSZObject', bound=View)
fork = 'phase0'
class Slot(uint64):
pass
class Epoch(uint64):
pass
class CommitteeIndex(uint64):
pass
class ValidatorIndex(uint64):
pass
class Gwei(uint64):
pass
class Root(Bytes32):
pass
class Version(Bytes4):
pass
class DomainType(Bytes4):
pass
class ForkDigest(Bytes4):
pass
class Domain(Bytes32):
pass
class BLSPubkey(Bytes48):
pass
class BLSSignature(Bytes96):
pass
def ceillog2(x: uint64) -> int:
return (x - 1).bit_length()
GENESIS_SLOT = Slot(0)
GENESIS_EPOCH = Epoch(0)
FAR_FUTURE_EPOCH = Epoch(2**64 - 1)
BASE_REWARDS_PER_EPOCH = 4
DEPOSIT_CONTRACT_TREE_DEPTH = 2**5
JUSTIFICATION_BITS_LENGTH = 4
ENDIANNESS = 'little'
ETH1_FOLLOW_DISTANCE = 2**10
MAX_COMMITTEES_PER_SLOT = 2**6
TARGET_COMMITTEE_SIZE = 2**7
MAX_VALIDATORS_PER_COMMITTEE = 2**11
MIN_PER_EPOCH_CHURN_LIMIT = 2**2
CHURN_LIMIT_QUOTIENT = 2**16
SHUFFLE_ROUND_COUNT = 90
MIN_GENESIS_ACTIVE_VALIDATOR_COUNT = 2**14
MIN_GENESIS_TIME = 1578009600
HYSTERESIS_QUOTIENT = 4
HYSTERESIS_DOWNWARD_MULTIPLIER = 1
HYSTERESIS_UPWARD_MULTIPLIER = 5
MIN_DEPOSIT_AMOUNT = Gwei(2**0 * 10**9)
MAX_EFFECTIVE_BALANCE = Gwei(2**5 * 10**9)
EJECTION_BALANCE = Gwei(2**4 * 10**9)
EFFECTIVE_BALANCE_INCREMENT = Gwei(2**0 * 10**9)
GENESIS_FORK_VERSION = Version('0x00000000')
BLS_WITHDRAWAL_PREFIX = Bytes1('0x00')
MIN_GENESIS_DELAY = 86400
SECONDS_PER_SLOT = 12
SECONDS_PER_ETH1_BLOCK = 14
MIN_ATTESTATION_INCLUSION_DELAY = 2**0
SLOTS_PER_EPOCH = 2**5
MIN_SEED_LOOKAHEAD = 2**0
MAX_SEED_LOOKAHEAD = 2**2
MIN_EPOCHS_TO_INACTIVITY_PENALTY = 2**2
EPOCHS_PER_ETH1_VOTING_PERIOD = 2**5
SLOTS_PER_HISTORICAL_ROOT = 2**13
MIN_VALIDATOR_WITHDRAWABILITY_DELAY = 2**8
SHARD_COMMITTEE_PERIOD = Epoch(2**8)
EPOCHS_PER_HISTORICAL_VECTOR = 2**16
EPOCHS_PER_SLASHINGS_VECTOR = 2**13
HISTORICAL_ROOTS_LIMIT = 2**24
VALIDATOR_REGISTRY_LIMIT = 2**40
BASE_REWARD_FACTOR = 2**6
WHISTLEBLOWER_REWARD_QUOTIENT = 2**9
PROPOSER_REWARD_QUOTIENT = 2**3
INACTIVITY_PENALTY_QUOTIENT = 2**24
MIN_SLASHING_PENALTY_QUOTIENT = 2**5
MAX_PROPOSER_SLASHINGS = 2**4
MAX_ATTESTER_SLASHINGS = 2**1
MAX_ATTESTATIONS = 2**7
MAX_DEPOSITS = 2**4
MAX_VOLUNTARY_EXITS = 2**4
DOMAIN_BEACON_PROPOSER = DomainType('0x00000000')
DOMAIN_BEACON_ATTESTER = DomainType('0x01000000')
DOMAIN_RANDAO = DomainType('0x02000000')
DOMAIN_DEPOSIT = DomainType('0x03000000')
DOMAIN_VOLUNTARY_EXIT = DomainType('0x04000000')
DOMAIN_SELECTION_PROOF = DomainType('0x05000000')
DOMAIN_AGGREGATE_AND_PROOF = DomainType('0x06000000')
SAFE_SLOTS_TO_UPDATE_JUSTIFIED = 2**3
TARGET_AGGREGATORS_PER_COMMITTEE = 2**4
RANDOM_SUBNETS_PER_VALIDATOR = 2**0
EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION = 2**8
ATTESTATION_SUBNET_COUNT = 64
apply_constants_config(globals())
class Fork(Container):
previous_version: Version
current_version: Version
epoch: Epoch # Epoch of latest fork
class ForkData(Container):
current_version: Version
genesis_validators_root: Root
class Checkpoint(Container):
epoch: Epoch
root: Root
class Validator(Container):
pubkey: BLSPubkey
withdrawal_credentials: Bytes32 # Commitment to pubkey for withdrawals
effective_balance: Gwei # Balance at stake
slashed: boolean
# Status epochs
activation_eligibility_epoch: Epoch # When criteria for activation were met
activation_epoch: Epoch
exit_epoch: Epoch
withdrawable_epoch: Epoch # When validator can withdraw funds
class AttestationData(Container):
slot: Slot
index: CommitteeIndex
# LMD GHOST vote
beacon_block_root: Root
# FFG vote
source: Checkpoint
target: Checkpoint
class IndexedAttestation(Container):
attesting_indices: List[ValidatorIndex, MAX_VALIDATORS_PER_COMMITTEE]
data: AttestationData
signature: BLSSignature
class PendingAttestation(Container):
aggregation_bits: Bitlist[MAX_VALIDATORS_PER_COMMITTEE]
data: AttestationData
inclusion_delay: Slot
proposer_index: ValidatorIndex
class Eth1Data(Container):
deposit_root: Root
deposit_count: uint64
block_hash: Bytes32
class HistoricalBatch(Container):
block_roots: Vector[Root, SLOTS_PER_HISTORICAL_ROOT]
state_roots: Vector[Root, SLOTS_PER_HISTORICAL_ROOT]
class DepositMessage(Container):
pubkey: BLSPubkey
withdrawal_credentials: Bytes32
amount: Gwei
class DepositData(Container):
pubkey: BLSPubkey
withdrawal_credentials: Bytes32
amount: Gwei
signature: BLSSignature # Signing over DepositMessage
class BeaconBlockHeader(Container):
slot: Slot
proposer_index: ValidatorIndex
parent_root: Root
state_root: Root
body_root: Root
class SigningData(Container):
object_root: Root
domain: Domain
class AttesterSlashing(Container):
attestation_1: IndexedAttestation
attestation_2: IndexedAttestation
class Attestation(Container):
aggregation_bits: Bitlist[MAX_VALIDATORS_PER_COMMITTEE]
data: AttestationData
signature: BLSSignature
class Deposit(Container):
proof: Vector[Bytes32, DEPOSIT_CONTRACT_TREE_DEPTH + 1] # Merkle path to deposit root
data: DepositData
class VoluntaryExit(Container):
epoch: Epoch # Earliest epoch when voluntary exit can be processed
validator_index: ValidatorIndex
class BeaconState(Container):
# Versioning
genesis_time: uint64
genesis_validators_root: Root
slot: Slot
fork: Fork
# History
latest_block_header: BeaconBlockHeader
block_roots: Vector[Root, SLOTS_PER_HISTORICAL_ROOT]
state_roots: Vector[Root, SLOTS_PER_HISTORICAL_ROOT]
historical_roots: List[Root, HISTORICAL_ROOTS_LIMIT]
# Eth1
eth1_data: Eth1Data
eth1_data_votes: List[Eth1Data, EPOCHS_PER_ETH1_VOTING_PERIOD * SLOTS_PER_EPOCH]
eth1_deposit_index: uint64
# Registry
validators: List[Validator, VALIDATOR_REGISTRY_LIMIT]
balances: List[Gwei, VALIDATOR_REGISTRY_LIMIT]
# Randomness
randao_mixes: Vector[Bytes32, EPOCHS_PER_HISTORICAL_VECTOR]
# Slashings
slashings: Vector[Gwei, EPOCHS_PER_SLASHINGS_VECTOR] # Per-epoch sums of slashed effective balances
# Attestations
previous_epoch_attestations: List[PendingAttestation, MAX_ATTESTATIONS * SLOTS_PER_EPOCH]
current_epoch_attestations: List[PendingAttestation, MAX_ATTESTATIONS * SLOTS_PER_EPOCH]
# Finality
justification_bits: Bitvector[JUSTIFICATION_BITS_LENGTH] # Bit set for every recent justified epoch
previous_justified_checkpoint: Checkpoint # Previous epoch snapshot
current_justified_checkpoint: Checkpoint
finalized_checkpoint: Checkpoint
class SignedVoluntaryExit(Container):
message: VoluntaryExit
signature: BLSSignature
class SignedBeaconBlockHeader(Container):
message: BeaconBlockHeader
signature: BLSSignature
class ProposerSlashing(Container):
signed_header_1: SignedBeaconBlockHeader
signed_header_2: SignedBeaconBlockHeader
class BeaconBlockBody(Container):
randao_reveal: BLSSignature
eth1_data: Eth1Data # Eth1 data vote
graffiti: Bytes32 # Arbitrary data
# Operations
proposer_slashings: List[ProposerSlashing, MAX_PROPOSER_SLASHINGS]
attester_slashings: List[AttesterSlashing, MAX_ATTESTER_SLASHINGS]
attestations: List[Attestation, MAX_ATTESTATIONS]
deposits: List[Deposit, MAX_DEPOSITS]
voluntary_exits: List[SignedVoluntaryExit, MAX_VOLUNTARY_EXITS]
class BeaconBlock(Container):
slot: Slot
proposer_index: ValidatorIndex
parent_root: Root
state_root: Root
body: BeaconBlockBody
class SignedBeaconBlock(Container):
message: BeaconBlock
signature: BLSSignature
class Eth1Block(Container):
timestamp: uint64
deposit_root: Root
deposit_count: uint64
# All other eth1 block fields
class AggregateAndProof(Container):
aggregator_index: ValidatorIndex
aggregate: Attestation
selection_proof: BLSSignature
class SignedAggregateAndProof(Container):
message: AggregateAndProof
signature: BLSSignature
def integer_squareroot(n: uint64) -> uint64:
"""
Return the largest integer ``x`` such that ``x**2 <= n``.
"""
x = n
y = (x + 1) // 2
while y < x:
x = y
y = (x + n // x) // 2
return x
def xor(bytes_1: Bytes32, bytes_2: Bytes32) -> Bytes32:
"""
Return the exclusive-or of two 32-byte strings.
"""
return Bytes32(a ^ b for a, b in zip(bytes_1, bytes_2))
def int_to_bytes(n: uint64, length: uint64) -> bytes:
"""
Return the ``length``-byte serialization of ``n`` in ``ENDIANNESS``-endian.
"""
return n.to_bytes(length, ENDIANNESS)
def bytes_to_int(data: bytes) -> uint64:
"""
Return the integer deserialization of ``data`` interpreted as ``ENDIANNESS``-endian.
"""
return int.from_bytes(data, ENDIANNESS)
def is_active_validator(validator: Validator, epoch: Epoch) -> bool:
"""
Check if ``validator`` is active.
"""
return validator.activation_epoch <= epoch < validator.exit_epoch
def is_eligible_for_activation_queue(validator: Validator) -> bool:
"""
Check if ``validator`` is eligible to be placed into the activation queue.
"""
return (
validator.activation_eligibility_epoch == FAR_FUTURE_EPOCH
and validator.effective_balance == MAX_EFFECTIVE_BALANCE
)
def is_eligible_for_activation(state: BeaconState, validator: Validator) -> bool:
"""
Check if ``validator`` is eligible for activation.
"""
return (
# Placement in queue is finalized
validator.activation_eligibility_epoch <= state.finalized_checkpoint.epoch
# Has not yet been activated
and validator.activation_epoch == FAR_FUTURE_EPOCH
)
def is_slashable_validator(validator: Validator, epoch: Epoch) -> bool:
"""
Check if ``validator`` is slashable.
"""
return (not validator.slashed) and (validator.activation_epoch <= epoch < validator.withdrawable_epoch)
def is_slashable_attestation_data(data_1: AttestationData, data_2: AttestationData) -> bool:
"""
Check if ``data_1`` and ``data_2`` are slashable according to Casper FFG rules.
"""
return (
# Double vote
(data_1 != data_2 and data_1.target.epoch == data_2.target.epoch) or
# Surround vote
(data_1.source.epoch < data_2.source.epoch and data_2.target.epoch < data_1.target.epoch)
)
def is_valid_indexed_attestation(state: BeaconState, indexed_attestation: IndexedAttestation) -> bool:
"""
Check if ``indexed_attestation`` is not empty, has sorted and unique indices and has a valid aggregate signature.
"""
# Verify indices are sorted and unique
indices = indexed_attestation.attesting_indices
if len(indices) == 0 or not indices == sorted(set(indices)):
return False
# Verify aggregate signature
pubkeys = [state.validators[i].pubkey for i in indices]
domain = get_domain(state, DOMAIN_BEACON_ATTESTER, indexed_attestation.data.target.epoch)
signing_root = compute_signing_root(indexed_attestation.data, domain)
return bls.FastAggregateVerify(pubkeys, signing_root, indexed_attestation.signature)
def is_valid_merkle_branch(leaf: Bytes32, branch: Sequence[Bytes32], depth: uint64, index: uint64, root: Root) -> bool:
"""
Check if ``leaf`` at ``index`` verifies against the Merkle ``root`` and ``branch``.
"""
value = leaf
for i in range(depth):
if index // (2**i) % 2:
value = hash(branch[i] + value)
else:
value = hash(value + branch[i])
return value == root
def compute_shuffled_index(index: uint64, index_count: uint64, seed: Bytes32) -> uint64:
"""
Return the shuffled index corresponding to ``seed`` (and ``index_count``).
"""
assert index < index_count
# Swap or not (https://link.springer.com/content/pdf/10.1007%2F978-3-642-32009-5_1.pdf)
# See the 'generalized domain' algorithm on page 3
for current_round in range(SHUFFLE_ROUND_COUNT):
pivot = bytes_to_int(hash(seed + int_to_bytes(current_round, length=1))[0:8]) % index_count
flip = (pivot + index_count - index) % index_count
position = max(index, flip)
source = hash(seed + int_to_bytes(current_round, length=1) + int_to_bytes(position // 256, length=4))
byte = source[(position % 256) // 8]
bit = (byte >> (position % 8)) % 2
index = flip if bit else index
return index
def compute_proposer_index(state: BeaconState, indices: Sequence[ValidatorIndex], seed: Bytes32) -> ValidatorIndex:
"""
Return from ``indices`` a random index sampled by effective balance.
"""
assert len(indices) > 0
MAX_RANDOM_BYTE = 2**8 - 1
i = 0
while True:
candidate_index = indices[compute_shuffled_index(i % len(indices), len(indices), seed)]
random_byte = hash(seed + int_to_bytes(i // 32, length=8))[i % 32]
effective_balance = state.validators[candidate_index].effective_balance
if effective_balance * MAX_RANDOM_BYTE >= MAX_EFFECTIVE_BALANCE * random_byte:
return candidate_index
i += 1
def compute_committee(indices: Sequence[ValidatorIndex],
seed: Bytes32,
index: uint64,
count: uint64) -> Sequence[ValidatorIndex]:
"""
Return the committee corresponding to ``indices``, ``seed``, ``index``, and committee ``count``.
"""
start = (len(indices) * index) // count
end = (len(indices) * (index + 1)) // count
return [indices[compute_shuffled_index(i, len(indices), seed)] for i in range(start, end)]
def compute_epoch_at_slot(slot: Slot) -> Epoch:
"""
Return the epoch number at ``slot``.
"""
return Epoch(slot // SLOTS_PER_EPOCH)
def compute_start_slot_at_epoch(epoch: Epoch) -> Slot:
"""
Return the start slot of ``epoch``.
"""
return Slot(epoch * SLOTS_PER_EPOCH)
def compute_activation_exit_epoch(epoch: Epoch) -> Epoch:
"""
Return the epoch during which validator activations and exits initiated in ``epoch`` take effect.
"""
return Epoch(epoch + 1 + MAX_SEED_LOOKAHEAD)
def compute_fork_data_root(current_version: Version, genesis_validators_root: Root) -> Root:
"""
Return the 32-byte fork data root for the ``current_version`` and ``genesis_validators_root``.
This is used primarily in signature domains to avoid collisions across forks/chains.
"""
return hash_tree_root(ForkData(
current_version=current_version,
genesis_validators_root=genesis_validators_root,
| |
fiftees_all = []
sixtees_all = []
seventees_all = []
eightees_all = []
ninetees_all = []
tens_all = []
zeroes_all = []
for i in id_dict:
for j in id_dict[i]:
if i == 'fiftees':
fiftees_all.append(id_dict[i][j])
if i == 'sixtees':
sixtees_all.append(id_dict[i][j])
if i == 'seventees':
seventees_all.append(id_dict[i][j])
if i == 'eightees':
eightees_all.append(id_dict[i][j])
if i == 'ninetees':
ninetees_all.append(id_dict[i][j])
if i == 'tens':
tens_all.append(id_dict[i][j])
if i == 'zeroes':
zeroes_all.append(id_dict[i][j])
return [merge_and_rerank(fiftees_all), merge_and_rerank(sixtees_all), merge_and_rerank(seventees_all),
merge_and_rerank(eightees_all), merge_and_rerank(ninetees_all), merge_and_rerank(tens_all),
merge_and_rerank(zeroes_all)]
def merge_pre(mode):
path = 'G:/thesis-gaussian_id/MDI/libmaxdiv/maxdiv/output/final_experiments/time_only/full_region/ERA5_summed/decades/'
path1 = 'G:/thesis-master/MDI/libmaxdiv/maxdiv/output/final_experiments/time_only/full_region/ERA5_summed/decades/'
id_, full_ = find_both_file(path, path1, mode)
id_dict, full_dict = get_decade_data(path, path1, id_, full_)
id_ = get_merged_decades_data(id_dict)
full_ = get_merged_decades_data(full_dict)
return id_, full_
def merge_and_rerank(decade):
de = []
ind = -1
for i in decade:
for j in i:
ind += 1
j[4] = ind
de.append(j)
return de
def get_merged_decades_data(id_dict):
fiftees_all = []
sixtees_all = []
seventees_all = []
eightees_all = []
ninetees_all = []
tens_all = []
zeroes_all = []
for i in id_dict:
for j in id_dict[i]:
if i == 'fiftees':
fiftees_all.append(id_dict[i][j])
if i == 'sixtees':
sixtees_all.append(id_dict[i][j])
if i == 'seventees':
seventees_all.append(id_dict[i][j])
if i == 'eightees':
eightees_all.append(id_dict[i][j])
if i == 'ninetees':
ninetees_all.append(id_dict[i][j])
if i == 'tens':
tens_all.append(id_dict[i][j])
if i == 'zeroes':
zeroes_all.append(id_dict[i][j])
return [merge_and_rerank(fiftees_all), merge_and_rerank(sixtees_all), merge_and_rerank(seventees_all),
merge_and_rerank(eightees_all), merge_and_rerank(ninetees_all), merge_and_rerank(tens_all),
merge_and_rerank(zeroes_all)]
# merge them together instead of list of lists form
def merge_whole(decade):
de = []
for i in decade:
for j in i:
de.append(j)
return de
def Iou_shaomu(R, B):
start1 = R[0]
len1 = R[1] - R[0]
start2 = B[0]
len2 = B[1] - B[0]
intersection = max(0, min(start1 + len1, start2 + len2) - max(start1, start2))
return float(intersection) / (len1 + len2 - intersection)
def iou_filter_perdecade(id_ts_fif):
Iou_ = []
# after = []
for i in range(len(id_ts_fif)):
for j in range(len(id_ts_fif)):
if id_ts_fif[i] != id_ts_fif[j]:
iou = Iou_shaomu(id_ts_fif[i], id_ts_fif[j])
if iou > 0.:
if ([id_ts_fif[i], id_ts_fif[j], iou] and [id_ts_fif[j], id_ts_fif[i], iou] not in Iou_) == True:
Iou_.append([id_ts_fif[i], id_ts_fif[j], iou])
# if id_ts_fif[i] not in after:
# if id_ts_fif[j] not in after:
# if id_ts_fif[i][2] > id_ts_fif[j][2]:
# after.append(id_ts_fif[i])
# else:
# after.append(id_ts_fif[j])
a = {}
for i in Iou_:
key = str(i[0])
a[key] = []
for i in Iou_:
key = str(i[0])
if i[0] not in a[key]:
a[key].append(i[0])
for i in Iou_:
key = str(i[0])
if i[1] not in a[key]:
a[key].append(i[1])
for key in a:
a[key] = sorted(a[key], key=lambda x: x[2], reverse=True)
drop = []
for key in a:
for item in a[key][1:]:
if item not in drop:
drop.append(item)
for i in drop:
id_ts_fif.remove(i)
#26 April updated version
id_ts_fif = sorted(id_ts_fif, key=lambda x: x[2], reverse=True)
for i in range(len(id_ts_fif)):
id_ts_fif[i][4] = i
return id_ts_fif
def iou_filter(whole):
whole_new = [[], [], [], [], [], [], []]
id_ce_fif = copy.deepcopy(whole[0:400])
id_ce_six = copy.deepcopy(whole[400:800])
id_ce_sev = copy.deepcopy(whole[800:1200])
id_ce_eig = copy.deepcopy(whole[1200:1600])
id_ce_nin = copy.deepcopy(whole[1600:2000])
id_ce_zer = copy.deepcopy(whole[2000:2400])
id_ce_ten = copy.deepcopy(whole[2400:2800])
id_ce_fif = iou_filter_perdecade(id_ce_fif)
id_ce_six = iou_filter_perdecade(id_ce_six)
id_ce_sev = iou_filter_perdecade(id_ce_sev)
id_ce_eig = iou_filter_perdecade(id_ce_eig)
id_ce_nin = iou_filter_perdecade(id_ce_nin)
id_ce_zer = iou_filter_perdecade(id_ce_zer)
id_ce_ten = iou_filter_perdecade(id_ce_ten)
whole_new[0] = id_ce_fif
whole_new[1] = id_ce_six
whole_new[2] = id_ce_sev
whole_new[3] = id_ce_eig
whole_new[4] = id_ce_nin
whole_new[5] = id_ce_zer
whole_new[6] = id_ce_ten
return whole_new
def np_rank(merge_decade):
id_iou = {'fiftees': [], 'sixtees': [], 'seventees': [], 'eightees': [], 'ninetees': [], 'zeroes': [], 'tens': []}
full_iou = {'fiftees': [], 'sixtees': [], 'seventees': [], 'eightees': [], 'ninetees': [], 'zeroes': [], 'tens': []}
for decade in merge_decade:
for i in merge_decade[decade]:
id_iou[decade].append(i[0][4])
full_iou[decade].append(i[1][4])
np_id_iou = []
np_full_iou = []
for decade in id_iou:
np_id_iou.append(np.array(id_iou[decade]))
for decade in full_iou:
np_full_iou.append(np.array(full_iou[decade]))
return np_id_iou,np_full_iou
def get_whole_result(id_, full_, iou_threshold):
cap_list = []
for i in id_:
for j in full_:
score = Iou_shaomu(i, j)
if score > iou_threshold:
cap_list.append([i, j, score])
return cap_list
def merge_process_decade(capzero):
compare_t = copy.deepcopy(capzero)
comt = {}
for i in compare_t:
name = str(i[0][0]) + ' ' + str(i[0][1])
comt[name] = []
for i in compare_t:
for j in capzero:
if i != j:
if i[0] == j[0]:
name = str(i[0][0]) + ' ' + str(i[0][1])
if j not in comt[name]:
comt[name].append(j)
pop = []
new = []
for key in comt:
comt[key] = sorted(comt[key], key=lambda x: x[1][2], reverse=True)
if len(comt[key]) > 0:
new.append(comt[key][0])
# comt[key] = comt[key][0]
# if len(comt[key])==0:
# pop.append(key)
# for i in pop:
# comt.pop(i)
return new
def plot_mer_per_de(merge_decade, mode, reverse, criterion,s_coef, s_p, p_coef, p_p):
import matplotlib as mpl
cmap = mpl.cm.cool
x = {'fiftees': [], 'sixtees': [], 'seventees': [], 'eightees': [], 'ninetees': [], 'zeroes': [], 'tens': []}
y = {'fiftees': [], 'sixtees': [], 'seventees': [], 'eightees': [], 'ninetees': [], 'zeroes': [], 'tens': []}
rank_dis = {'fiftees': [], 'sixtees': [], 'seventees': [], 'eightees': [], 'ninetees': [], 'zeroes': [], 'tens': []}
iou_score = {'fiftees': [], 'sixtees': [], 'seventees': [], 'eightees': [], 'ninetees': [], 'zeroes': [],
'tens': []}
iou_score_mf = {'fiftees': [], 'sixtees': [], 'seventees': [], 'eightees': [], 'ninetees': [], 'zeroes': [],
'tens': []}
for decade in merge_decade:
for i in merge_decade[decade]:
x[decade].append(i[0][4])
y[decade].append(i[1][4])
rank_dis[decade].append(abs(i[1][4] - i[0][4]))
iou_score[decade].append(i[2])
iou_score_mf[decade].append(i[2] * 50)
if reverse == True:
xlabel = 'Rank of ID method results'
ylabel = 'Rank of Full method results'
else:
ylabel = 'Rank of ID method results'
xlabel = 'Rank of Full method results'
if criterion == 'ce':
cri_ = 'For Cross Entropy criterion'
if criterion == 'kl':
cri_ = 'For Kullback-Leibler criterion'
if str(mode) == 'default':
fig = plt.figure(figsize=(15, 8))
plt.subplots_adjust(hspace = 0.5)
fig.suptitle('Visualization for Merged decades' + '(' + cri_ + ')', fontsize=15)
sub1 = fig.add_subplot(241)
sub1.scatter(x['fiftees'], y['fiftees'], s=40, cmap='Greens')
per_s1 = '{:.2%}'.format(s_coef[0])
s_p1 = '{:.3f}'.format(s_p[0])
per_p1 = '{:.2%}'.format(p_coef[0])
p_p1 = '{:.3f}'.format(p_p[0])
sub1.set_xlabel('\nSpearmans coef: {}, P-value: {}\n\nPearson coef: {}, P-value: {}'.format(per_s1,s_p1,per_p1,p_p1))
sub2 = fig.add_subplot(242)
sub2.scatter(x['sixtees'], y['sixtees'], s=40, cmap='Greens')
per_s2 = '{:.2%}'.format(s_coef[1])
s_p2 = '{:.3f}'.format(s_p[1])
per_p2 = '{:.2%}'.format(p_coef[1])
p_p2 = '{:.3f}'.format(p_p[1])
sub2.set_xlabel('\nSpearmans coef: {}, P-value: {}\n\nPearson coef: {}, P-value: {}'.format(per_s2,s_p2,per_p2,p_p2))
sub3 = fig.add_subplot(243)
sub3.scatter(x['seventees'], y['seventees'], s=40, cmap='Greens')
per_s3 = '{:.2%}'.format(s_coef[2])
s_p3 = '{:.3f}'.format(s_p[2])
per_p3 = '{:.2%}'.format(p_coef[2])
p_p3 = '{:.3f}'.format(p_p[2])
sub3.set_xlabel('\nSpearmans coef: {}, P-value: {}\n\nPearson coef: {}, P-value: {}'.format(per_s3,s_p3,per_p3,p_p3))
sub4 = fig.add_subplot(244)
sub4.scatter(x['eightees'], y['eightees'], s=40, cmap='Greens')
per_s4 = '{:.2%}'.format(s_coef[3])
s_p4 = '{:.3f}'.format(s_p[3])
per_p4 = '{:.2%}'.format(p_coef[3])
p_p4 = '{:.3f}'.format(p_p[3])
sub4.set_xlabel('\nSpearmans coef: {}, P-value: {}\n\nPearson coef: {}, P-value: {}'.format(per_s4,s_p4,per_p4,p_p4))
sub5 = fig.add_subplot(245)
sub5.scatter(x['ninetees'], y['ninetees'], s=40, cmap='Greens')
per_s5 = '{:.2%}'.format(s_coef[4])
s_p5 = '{:.3f}'.format(s_p[4])
per_p5 = '{:.2%}'.format(p_coef[4])
p_p5 = '{:.3f}'.format(p_p[4])
sub5.set_xlabel('\nSpearmans coef: {}, P-value: {}\n\nPearson coef: {}, P-value: {}'.format(per_s5,s_p5,per_p5,p_p5))
sub6 = fig.add_subplot(246)
sub6.scatter(x['zeroes'], y['zeroes'], s=40, cmap='Greens')
per_s6 = '{:.2%}'.format(s_coef[5])
s_p6 = '{:.3f}'.format(s_p[5])
per_p6 = '{:.2%}'.format(p_coef[5])
p_p6 = '{:.3f}'.format(p_p[5])
sub6.set_xlabel('\nSpearmans coef: {}, P-value: {}\n\nPearson coef: {}, P-value: {}'.format(per_s6,s_p6,per_p6,p_p6))
sub7 = fig.add_subplot(247)
sub7.scatter(x['tens'], y['tens'], s=40, cmap='Greens')
per_s7 = '{:.2%}'.format(s_coef[6])
s_p7 = '{:.3f}'.format(s_p[6])
per_p7 = '{:.2%}'.format(p_coef[6])
p_p7 = '{:.3f}'.format(p_p[6])
sub7.set_xlabel('\nSpearmans coef: {}, P-value: {}\n\nPearson coef: {}, P-value: {}'.format(per_s7,s_p7,per_p7,p_p7))
avg_sp = '{:.2%}'.format(np.mean(s_coef))
avg_pearson = '{:.2%}'.format(np.mean(p_coef))
fig.text(0.5, -0.05, xlabel, ha="center", va="center", fontsize=13.5)
fig.text(0.08, 0.5, ylabel, ha="center", va="center", rotation=90, fontsize=13.5)
fig.text(0.8, 0.25,'Averaged Spearmans coef: {}\n\nAveraged Pearson coef: {}'.format(avg_sp,avg_pearson), ha="center", va="center", fontsize=11)
if str(mode) == 'color_iou':
fig = plt.figure(figsize=(15, 8))
plt.subplots_adjust(hspace=0.5)
fig.suptitle('Visualization for Merged decades' + '(' + cri_ + ')', fontsize=15)
sub1 = fig.add_subplot(241)
sub1.scatter(x['fiftees'], y['fiftees'], s=40, c=iou_score['fiftees'], cmap=cmap)
per_s1 = '{:.2%}'.format(s_coef[0])
s_p1 = '{:.3f}'.format(s_p[0])
per_p1 = '{:.2%}'.format(p_coef[0])
p_p1 = '{:.3f}'.format(p_p[0])
sub1.set_xlabel('\nSpearmans coef: {}, P-value: {}\n\nPearson coef: {}, P-value: {}'.format(per_s1, s_p1, per_p1, p_p1))
sub2 = fig.add_subplot(242)
sub2.scatter(x['sixtees'], y['sixtees'], s=40, c=iou_score['sixtees'], cmap=cmap)
per_s2 = '{:.2%}'.format(s_coef[1])
s_p2 = '{:.3f}'.format(s_p[1])
per_p2 = '{:.2%}'.format(p_coef[1])
p_p2 = '{:.3f}'.format(p_p[1])
sub2.set_xlabel('\nSpearmans coef: {}, P-value: {}\n\nPearson coef: {}, P-value: {}'.format(per_s2, s_p2, per_p2, p_p2))
sub3 = fig.add_subplot(243)
sub3.scatter(x['seventees'], y['seventees'], s=40, c=iou_score['seventees'], cmap=cmap)
per_s3 = '{:.2%}'.format(s_coef[2])
s_p3 = '{:.3f}'.format(s_p[2])
per_p3 = '{:.2%}'.format(p_coef[2])
p_p3 = '{:.3f}'.format(p_p[2])
sub3.set_xlabel('\nSpearmans coef: {}, P-value: {}\n\nPearson coef: {}, P-value: {}'.format(per_s3, s_p3, per_p3, p_p3))
sub4 = fig.add_subplot(244)
sub4.scatter(x['eightees'], y['eightees'], s=40, c=iou_score['eightees'], cmap=cmap)
per_s4 = '{:.2%}'.format(s_coef[3])
s_p4 = '{:.3f}'.format(s_p[3])
per_p4 = '{:.2%}'.format(p_coef[3])
p_p4 = '{:.3f}'.format(p_p[3])
sub4.set_xlabel('\nSpearmans coef: {}, P-value: {}\n\nPearson coef: {}, P-value: {}'.format(per_s4, s_p4, per_p4, p_p4))
sub5 = fig.add_subplot(245)
sub5.scatter(x['ninetees'], y['ninetees'], s=40, c=iou_score['ninetees'], cmap=cmap)
per_s5 = '{:.2%}'.format(s_coef[4])
s_p5 = '{:.3f}'.format(s_p[4])
per_p5 = '{:.2%}'.format(p_coef[4])
p_p5 = '{:.3f}'.format(p_p[4])
sub5.set_xlabel('\nSpearmans coef: {}, P-value: {}\n\nPearson coef: {}, P-value: {}'.format(per_s5, s_p5, per_p5, p_p5))
sub6 = fig.add_subplot(246)
sub6.scatter(x['zeroes'], y['zeroes'], s=40, c=iou_score['zeroes'], cmap=cmap)
per_s6 = '{:.2%}'.format(s_coef[5])
s_p6 = '{:.3f}'.format(s_p[5])
per_p6 | |
<gh_stars>10-100
"""
Tools related to comparing time series, typically model-obs or model-model.
"""
import numpy as np
import pandas as pd
import xarray as xr
import matplotlib.pyplot as plt
import logging as log
import matplotlib.gridspec as gridspec
from stompy import filters
from matplotlib import dates
from scipy.stats import spearmanr
from stompy.model import hydro_model as hm
from stompy import (xr_utils, utils)
def period_union(sources):
t_min=t_max=None
for da in sources:
if t_min is None or da.time[0]<t_min:
t_min=da.time.values[0]
if t_max is None or da.time[-1]>t_max:
t_max=da.time.values[-1]
return t_min,t_max
def period_intersection(sources):
t_min=t_max=None
for da in sources:
if t_min is None or da.time[0]>t_min:
t_min=da.time.values[0]
if t_max is None or da.time[-1]<t_max:
t_max=da.time.values[-1]
return t_min,t_max
def combine_sources(all_sources,dt=np.timedelta64(900,'s'),min_period=True):
"""
Resample multiple DataArray time series to common timebase.
all_sources: list of xr.DataArray()
dt: each input is resample at this time step.
min_period: True => time period defined by the intersection of all the sources
otherwise use the union of all source time periods
"""
t_min=None
t_max=None
for src in all_sources:
if len(src.time)==0:
continue
if (t_min is None) or (t_min>src.time.min()):
t_min=src.time.min()
if (t_max is None) or (t_max<src.time.max()):
t_max=src.time.max()
new_sources=[]
for src in all_sources:
if isinstance(src, hm.BC):
# Now get the real data.
src.data_start=t_min
src.data_stop=t_max
new_sources.append(src.data())
else:
new_sources.append(src)
sources=new_sources
# For many plots and metrics need a common timeline --
# Get them on common time frames
empty=[len(da)==0 for da in all_sources]
if min_period:
if np.any(empty):
print("Empty time series")
return None
t_min,t_max=period_intersection(all_sources)
else:
if np.all(empty):
print("All empty time series")
return None
t_min,t_max=period_union(all_sources)
dt=np.timedelta64(900,"s") # compare at 15 minute intervals.
resample_bins=np.arange(utils.floor_dt64(t_min,dt),
utils.ceil_dt64(t_max,dt)+dt,
dt)
if len(resample_bins)<2:
log.warning("No overlapping data")
return None
bin_labels=resample_bins[:-1]
# All data arrays get renamed to the field name of the first one
field_name=all_sources[0].name
def resample(da):
# groupby_bins allows for specifying the exact bins and labels,
# simplifying concatenation below.
da=da.rename(field_name)
# having trouble with groupby_bins
#
da['dnum']=('time',),utils.to_dnum(da.time)
bins=utils.to_dnum(resample_bins)
# dim='time' is needed for vector-valued data to indicate not to
# take the mean across vector components, just within bins on the
# time axis
da_r=(# ada.groupby_bins(da.time,resample_bins,labels=bin_labels)
da.groupby_bins('dnum',bins,labels=bin_labels)
.mean(dim='time')
#.rename(time_bins='time')
.rename(dnum_bins='time')
.to_dataset())
return da_r
resampled=[resample(da) for da in all_sources]
combined=xr_utils.concat_permissive(resampled,dim='source')[field_name]
return combined
def assemble_comparison_data(models,observations,model_labels=None,
period='model',
extract_options={}):
"""
Extract data from one or more model runs to match one or more observations
models: list of HydroModel instances
observations: list of DataArrays representing time series
the first observation must have lon and lat fields
defining where to extract data from in the model.
alternatively, can pass BC object, allowing the auto-download and
translate code for BCs to be reused for managing validation data.
the first observation determines what data is extracted from the
model. if a dataarray, it should have a name of water_level or flow.
if a BC object, then the class of the object (FlowBC,StageBC) determines
what to extract from the model.
returns a tuple: ( [list of dataarrays], combined dataset )
"""
if model_labels is None:
if len(models)==1:
model_labels=["Model"]
else:
model_labels=[]
for m in i,models in enumerate(models):
try:
model_labels.append( model.label )
except AttributeError:
model_labels.append("Model %d"%(i+1))
else:
assert len(model_labels)>=len(models),"Not enough model labels supplied"
# Collect inferred options for extracting model data, which
# can later be overridden by extract_options
loc_extract_opts=dict()
# Convert BC instances into dataarrays
new_obs=[]
for oi,obs in enumerate(observations):
if isinstance(obs,hm.BC):
# Have to decide at this point what period of data to request
if period=='model': # the first model, no chaining
period=[models[0].run_start,models[0].run_stop]
bc=obs
bc.data_start=period[0]
bc.data_stop=period[1]
obs=bc.data()
if oi==0:
# This BC/dataarray will define where model data is extracted.
# so try to get location information if it exists
loc_extract_opts['name']=bc.name
# could get fancy and try to query the gazetteer, but for now
# just assume BC had a good name, that will match the output
new_obs.append(obs)
orig_obs=observations
observations=new_obs
# Extract relevant variable and location from model
base_obs=observations[0] # defines the variable and location for extracting model data
base_var=base_obs.name # e.g. 'water_level', 'flow'
try:
loc_extract_opts['lon']=base_obs.lon
loc_extract_opts['lat']=base_obs.lat
except AttributeError:
pass
try:
loc_extract_opts['x']=base_obs.x
loc_extract_opts['y']=base_obs.y
except AttributeError:
pass
if base_var=='water_level':
loc_extract_opts['data_vars']=['water_level']
# there are numerous very similar standard names, mostly depending
# on the datum. the models never know the true datum, so it's
# arbitrary exactly which standard name is used.
elif base_var=='flow':
loc_extract_opts['data_vars']=['cross_section_discharge']
# Not that many people use this... but it's the correct one.
elif base_var=='salinity':
loc_extract_opts['data_vars']=['salinity']
elif base_var=='inorganic_nitrogen_(nitrate_and_nitrite)':
loc_extract_opts['data_vars']=['ZNit','NO3'] # want to extract both to calculate age and compare with nitrogen
else:
raise Exception("Not ready to extract variable %s"%base_var)
loc_extract_opts.update(extract_options)
model_data=[] # a data array per model
for model,label in zip(models,model_labels):
if base_var=='flow':
ds=model.extract_section(**loc_extract_opts)
else:
ds=model.extract_station(**loc_extract_opts)
if ds is None:
print("No data extracted from model. omitting")
continue
assert len(loc_extract_opts['data_vars'])>=1,"otherwise missing some data"
tgt_vars=loc_extract_opts['data_vars']
for tgt_var in tgt_vars:
try:
da=ds[tgt_var]
except KeyError:
# see if the variable can be found based on standard-name
for dv in ds.data_vars:
if ds[dv].attrs.get('standard_name','')==tgt_var:
da=ds[dv]
da.name=tgt_var
break
else:
raise Exception("Could not find %s by name or standard_name"%(tgt_var))
da.name=base_var # having the same name helps later
da=da.assign_coords(label=label)
model_data.append(da)
# Annotate the sources with labels
for i,da in enumerate(observations):
if 'name' in da.attrs:
label=da.attrs['name']
else:
label="Obs %d"%i
da=da.assign_coords(label=label)
observations[i]=da
all_sources=model_data+observations
combined=combine_sources(all_sources)
return all_sources,combined
def calc_metrics(x,ref,combine=False):
"""
x, ref: DataArrays with common dimension.
if that dimension is time, some additional time-series metrics
are calculated (namely lag).
straight arrays can be passed in, in which case no time-related
processing will be done.
"""
if not isinstance(x,xr.DataArray):
x=xr.DataArray(x)
if not isinstance(ref,xr.DataArray):
ref=xr.DataArray(ref)
x_orig=x
ref_orig=ref
if combine:
combined=combine_sources([x,ref])
x=combined.isel(source=0)
ref=combined.isel(source=1)
metrics={}
metrics['bias']=np.nanmean( (x-ref).values )
valid=np.isfinite( (x+ref).values )
metrics['r'] = np.corrcoef( x.values[valid],ref.values[valid])[0,1]
if 'time' in x.dims and 'time' in ref.dims:
metrics['lag']= utils.find_lag_xr(x_orig,ref_orig)
metrics['lag_s']=metrics['lag']/np.timedelta64(1,'s')
metrics['amp']=np.std(x.values[valid]) / np.std(ref.values[valid])
metrics['wilmott']=utils.model_skill(x.values,ref.values)
metrics['murphy']=utils.murphy_skill(x.values,ref.values)
metrics['spearman_rho'],metrics['spearman_p']=spearmanr(x.values[valid],ref.values[valid])
return metrics
def fix_date_labels(ax,nticks=3):
xfmt = dates.DateFormatter('%Y-%m-%d')
xax=ax.xaxis
xax.set_major_formatter(xfmt)
xax.set_major_locator(dates.AutoDateLocator(minticks=nticks,maxticks=nticks+1,
interval_multiples=False))
def calibration_figure_3panel(all_sources,combined=None,
metric_x=1,metric_ref=0,
offset_source=None,scatter_x_source=0,
num=None,trim_time=False,
lowpass=True,
styles=None,
offset_method='mean'):
"""
all_sources: list of DataArrays to compare.
combined: those same dataarrays interpolated to common time, or none to automatically
do this.
metric_x: index of the 'model' data in combined.
metric_ref: index of the 'observed' data in combined.
offset_source: if not None, specify the index of the source to which other
sources will be shifted to
scatter_x_ref: which item in combined to use for the x axis of the scatter.
lowpass: if True, the lower left panel is a lowpass of the data, otherwise
it will be used for the text metrics instead of overlaying them on the scatter.
These default to having the reference observations as the first element, and the
primary model output second.
trim_time: truncate all sources to the shortest common time period
offset_method: 'mean' calculates offsets between stations by mean. 'median'
by median, which can be better when a source has noise or model crashes and
corrupts values at the end.
"""
N=np.arange(len(all_sources))
if metric_ref<0:
metric_ref=N[metric_ref]
if scatter_x_source<0:
scatter_x_source=N[scatter_x_source]
if trim_time:
t_min,t_max=period_intersection(all_sources)
new_sources=[]
for src in all_sources:
tsel=(src.time.values>=t_min)&(src.time.values<=t_max)
new_sources.append( src.isel(time=tsel) )
all_sources=new_sources
if combined is None:
combined=combine_sources(all_sources,min_period=trim_time)
if combined is None:
log.warning("Combined sources was None -- likely no overlap between data sets")
return None
labels=list(combined.label.values)
gs = gridspec.GridSpec(5, 3)
fig=plt.figure(figsize=(9,7),num=num)
plt.tight_layout()
ts_ax = fig.add_subplot(gs[:-3, :])
lp_ax = fig.add_subplot(gs[-3:-1, :-1])
scat_ax=fig.add_subplot(gs[-3:-1, 2])
if lowpass:
txt_ax= fig.add_subplot(gs[-1,:])
else:
txt_ax=lp_ax
if offset_method=='mean':
offsets=combined.mean(dim='time').values
elif offset_method=='median':
offsets=combined.median(dim='time').values
else:
raise Exception("offset_method=%s is not understood"%offset_method)
if offset_source is not None:
offsets-=offsets[offset_source]
else:
# no offset to means.
offsets*=0
if styles is None:
styles=[{}]*len(all_sources)
if 1: # Tidal time scale plot:
ax=ts_ax
for src_i,src in enumerate(all_sources):
# When reading live output, it's possible for the length of
# the time dimension and the data to get out of sync. slc
# clips to the shorter of the two.
label=labels[src_i]
if offsets[src_i]!=0.0:
label="%s %+.2f"%(label,-offsets[src_i])
slc=slice(None,min(src.time.shape[0],src.values.shape[0]))
ax.plot(src.time.values[slc],src.values[slc]-offsets[src_i],
label=label,
**styles[src_i])
ax.legend(fontsize=8,loc='upper left')
# Scatter:
if 1:
ax=scat_ax
for i in range(len(combined.source)):
if i==scatter_x_source: continue
kw={}
style=styles[i]
for k in ['color','zorder']:
if k in style:
kw[k]=style[k]
ax.plot(combined.isel(source=scatter_x_source)-offsets[scatter_x_source],
combined.isel(source=i)-offsets[i],
'.',ms=1.5,**kw)
ax.set_xlabel(labels[scatter_x_source])
# Metrics
if metric_x is not None:
ax=txt_ax
if metric_x=='all':
metric_x=[i for i in range(len(all_sources)) if i!=metric_ref]
else:
metric_x=np.atleast_1d(metric_x)
df=pd.DataFrame()
recs=[]
for mx in metric_x:
| |
polygons fully outside of the image plane lead to exceptions
# drawing of poly that is fully out of image
poly = ia.Polygon([(100, 100), (100+10, 100), (100+10, 100+10), (100, 100+10)])
image_poly = poly.draw_on_image(image,
color=[32, 128, 32], color_perimeter=[0, 255, 0],
alpha=1.0, alpha_perimeter=1.0,
raise_if_out_of_image=False)
assert np.array_equal(image_poly, image)
# drawing of poly that is fully out of image, with raise_if_out_of_image=True
poly = ia.Polygon([(100, 100), (100+10, 100), (100+10, 100+10), (100, 100+10)])
got_exception = False
try:
_ = poly.draw_on_image(image,
color=[32, 128, 32], color_perimeter=[0, 255, 0],
alpha=1.0, alpha_perimeter=1.0,
raise_if_out_of_image=True)
except Exception as exc:
assert "Cannot draw polygon" in str(exc)
got_exception = True
assert got_exception
# face invisible via alpha
poly = ia.Polygon([(2, 2), (8, 2), (8, 8), (2, 8)])
image_poly = poly.draw_on_image(image,
color=[32, 128, 32], color_perimeter=[0, 255, 0],
alpha=0.0, alpha_perimeter=1.0,
raise_if_out_of_image=False)
assert image_poly.dtype.type == np.uint8
assert image_poly.shape == (10, 10, 3)
assert np.sum(image) == 3 * np.sum(np.arange(100)) # draw did not change original image (copy=True)
for c_idx, value in enumerate([0, 255, 0]):
assert np.all(image_poly[2:9, 2:3, c_idx] == np.zeros((7, 1), dtype=np.uint8) + value) # left boundary
assert np.all(image_poly[3:8, 3:8, :] == image[3:8, 3:8, :])
# boundary invisible via alpha
poly = ia.Polygon([(2, 2), (8, 2), (8, 8), (2, 8)])
image_poly = poly.draw_on_image(image,
color=[32, 128, 32], color_perimeter=[0, 255, 0],
alpha=1.0, alpha_perimeter=0.0,
raise_if_out_of_image=False)
assert image_poly.dtype.type == np.uint8
assert image_poly.shape == (10, 10, 3)
assert np.sum(image) == 3 * np.sum(np.arange(100)) # draw did not change original image (copy=True)
expected = np.tile(np.uint8([32, 128, 32]).reshape((1, 1, 3)), (6, 6, 1))
assert np.all(image_poly[2:8, 2:8, :] == expected)
# copy=False
# test deactivated as the function currently does not offer a copy argument
"""
image_cp = np.copy(image)
poly = ia.Polygon([(2, 2), (8, 2), (8, 8), (2, 8)])
image_poly = poly.draw_on_image(image_cp,
color_face=[32, 128, 32], color_boundary=[0, 255, 0],
alpha_face=1.0, alpha_boundary=1.0,
raise_if_out_of_image=False)
assert image_poly.dtype.type == np.uint8
assert image_poly.shape == (10, 10, 3)
assert np.all(image_cp == image_poly)
assert not np.all(image_cp == image)
for c_idx, value in enumerate([0, 255, 0]):
assert np.all(image_poly[2:9, 2:3, c_idx] == np.zeros((6, 1, 3), dtype=np.uint8) + value) # left boundary
assert np.all(image_cp[2:9, 2:3, c_idx] == np.zeros((6, 1, 3), dtype=np.uint8) + value) # left boundary
expected = np.tile(np.uint8([32, 128, 32]).reshape((1, 1, 3)), (5, 5, 1))
assert np.all(image_poly[3:8, 3:8, :] == expected)
assert np.all(image_cp[3:8, 3:8, :] == expected)
"""
def test_Polygon_extract_from_image():
image = np.arange(20*20*2).reshape(20, 20, 2).astype(np.int32)
# inside image and completely covers it
poly = ia.Polygon([(0, 0), (10, 0), (10, 10), (0, 10)])
subimage = poly.extract_from_image(image)
assert np.array_equal(subimage, image[0:10, 0:10, :])
# inside image, subpart of it (not all may be extracted)
poly = ia.Polygon([(1, 1), (9, 1), (9, 9), (1, 9)])
subimage = poly.extract_from_image(image)
assert np.array_equal(subimage, image[1:9, 1:9, :])
# inside image, two image areas that don't belong to the polygon but have to be extracted
poly = ia.Polygon([(0, 0), (10, 0), (10, 5), (20, 5),
(20, 20), (10, 20), (10, 5), (0, 5)])
subimage = poly.extract_from_image(image)
expected = np.copy(image)
expected[:5, 10:, :] = 0 # top right block
expected[5:, :10, :] = 0 # left bottom block
assert np.array_equal(subimage, expected)
# partially out of image
poly = ia.Polygon([(-5, 0), (5, 0), (5, 10), (-5, 10)])
subimage = poly.extract_from_image(image)
expected = np.zeros((10, 10, 2), dtype=np.int32)
expected[0:10, 5:10, :] = image[0:10, 0:5, :]
assert np.array_equal(subimage, expected)
# fully out of image
poly = ia.Polygon([(30, 0), (40, 0), (40, 10), (30, 10)])
subimage = poly.extract_from_image(image)
expected = np.zeros((10, 10, 2), dtype=np.int32)
assert np.array_equal(subimage, expected)
# inside image, subpart of it
# float coordinates, rounded so that the whole image will be extracted
poly = ia.Polygon([(0.4, 0.4), (9.6, 0.4), (9.6, 9.6), (0.4, 9.6)])
subimage = poly.extract_from_image(image)
assert np.array_equal(subimage, image[0:10, 0:10, :])
# inside image, subpart of it
# float coordinates, rounded so that x/y 0<=i<9 will be extracted (instead of 0<=i<10)
poly = ia.Polygon([(0.5, 0.5), (9.4, 0.5), (9.4, 9.4), (0.5, 9.4)])
subimage = poly.extract_from_image(image)
assert np.array_equal(subimage, image[0:9, 0:9, :])
# inside image, subpart of it
# float coordinates, rounded so that x/y 1<=i<9 will be extracted (instead of 0<=i<10)
poly = ia.Polygon([(0.51, 0.51), (9.4, 0.51), (9.4, 9.4), (0.51, 9.4)])
subimage = poly.extract_from_image(image)
assert np.array_equal(subimage, image[1:9, 1:9, :])
def test_Polygon_change_first_point_by_coords():
poly = ia.Polygon([(0, 0), (1, 0), (1, 1)])
poly_reordered = poly.change_first_point_by_coords(x=0, y=0)
assert np.allclose(poly.exterior, poly_reordered.exterior)
poly = ia.Polygon([(0, 0), (1, 0), (1, 1)])
poly_reordered = poly.change_first_point_by_coords(x=1, y=0)
# make sure that it does not reorder inplace
assert np.allclose(poly.exterior, np.float32([[0, 0], [1, 0], [1, 1]]))
assert np.allclose(poly_reordered.exterior, np.float32([[1, 0], [1, 1], [0, 0]]))
poly = ia.Polygon([(0, 0), (1, 0), (1, 1)])
poly_reordered = poly.change_first_point_by_coords(x=1, y=1)
assert np.allclose(poly_reordered.exterior, np.float32([[1, 1], [0, 0], [1, 0]]))
# inaccurate point, but close enough
poly = ia.Polygon([(0, 0), (1, 0), (1, 1)])
poly_reordered = poly.change_first_point_by_coords(x=1.0, y=0.01, max_distance=0.1)
assert np.allclose(poly_reordered.exterior, np.float32([[1, 0], [1, 1], [0, 0]]))
# inaccurate point, but close enough (infinite max distance)
poly = ia.Polygon([(0, 0), (1, 0), (1, 1)])
poly_reordered = poly.change_first_point_by_coords(x=1.0, y=0.01, max_distance=None)
assert np.allclose(poly_reordered.exterior, np.float32([[1, 0], [1, 1], [0, 0]]))
# point too far away
poly = ia.Polygon([(0, 0), (1, 0), (1, 1)])
got_exception = False
try:
_ = poly.change_first_point_by_coords(x=1.0, y=0.01, max_distance=0.001)
except Exception as exc:
assert "Closest found point " in str(exc)
got_exception = True
assert got_exception
# reorder with two points
poly = ia.Polygon([(0, 0), (1, 0)])
poly_reordered = poly.change_first_point_by_coords(x=1, y=0)
assert np.allclose(poly_reordered.exterior, np.float32([[1, 0], [0, 0]]))
# reorder with one point
poly = ia.Polygon([(0, 0)])
poly_reordered = poly.change_first_point_by_coords(x=0, y=0)
assert np.allclose(poly_reordered.exterior, np.float32([[0, 0]]))
def test_Polygon_change_first_point_by_index():
poly = ia.Polygon([(0, 0), (1, 0), (1, 1)])
poly_reordered = poly.change_first_point_by_index(0)
assert np.allclose(poly.exterior, poly_reordered.exterior)
poly = ia.Polygon([(0, 0), (1, 0), (1, 1)])
poly_reordered = poly.change_first_point_by_index(1)
# make sure that it does not reorder inplace
assert np.allclose(poly.exterior, np.float32([[0, 0], [1, 0], [1, 1]]))
assert np.allclose(poly_reordered.exterior, np.float32([[1, 0], [1, 1], [0, 0]]))
poly = ia.Polygon([(0, 0), (1, 0), (1, 1)])
poly_reordered = poly.change_first_point_by_index(2)
assert np.allclose(poly_reordered.exterior, np.float32([[1, 1], [0, 0], [1, 0]]))
# reorder with two points
poly = ia.Polygon([(0, 0), (1, 0)])
poly_reordered = poly.change_first_point_by_index(1)
assert np.allclose(poly_reordered.exterior, np.float32([[1, 0], [0, 0]]))
# reorder with one point
poly = ia.Polygon([(0, 0)])
poly_reordered = poly.change_first_point_by_index(0)
assert np.allclose(poly_reordered.exterior, np.float32([[0, 0]]))
# idx out of bounds
poly = ia.Polygon([(0, 0), (1, 0), (1, 1)])
got_exception = False
try:
_ = poly.change_first_point_by_index(3)
except AssertionError:
got_exception = True
assert got_exception
poly = ia.Polygon([(0, 0), (1, 0), (1, 1)])
got_exception = False
try:
_ = poly.change_first_point_by_index(-1)
except AssertionError:
got_exception = True
assert got_exception
poly = ia.Polygon([(0, 0)])
got_exception = False
try:
_ = poly.change_first_point_by_index(1)
except AssertionError:
got_exception = True
assert got_exception
poly = ia.Polygon([])
got_exception = False
try:
_ = poly.change_first_point_by_index(0)
except AssertionError:
got_exception = True
assert got_exception
def test_Polygon_to_shapely_line_string():
poly = ia.Polygon([(0, 0), (1, 0), (1, 1)])
ls = poly.to_shapely_line_string()
assert np.allclose(ls.coords, np.float32([[0, 0], [1, 0], [1, 1]]))
# two point polygon
poly = ia.Polygon([(0, 0), (1, 0)])
ls = poly.to_shapely_line_string()
assert np.allclose(ls.coords, np.float32([[0, 0], [1, 0]]))
# one point polygon
poly = ia.Polygon([(0, 0)])
got_exception = False
try:
_ = poly.to_shapely_line_string()
except Exception as exc:
assert "Conversion to shapely line string requires at least two points" in str(exc)
got_exception = True
assert got_exception
# zero point polygon
poly = ia.Polygon([])
got_exception = False
try:
_ = poly.to_shapely_line_string()
except Exception as exc:
assert "Conversion to shapely line string requires at least two points" in str(exc)
got_exception = True
assert got_exception
# closed line string
poly = ia.Polygon([(0, 0), (1, 0), (1, 1)])
ls = poly.to_shapely_line_string(closed=True)
assert np.allclose(ls.coords, np.float32([[0, 0], [1, 0], [1, 1], [0, 0]]))
# interpolation
poly = ia.Polygon([(0, 0), (1, 0), (1, 1)])
ls = poly.to_shapely_line_string(interpolate=1)
assert np.allclose(ls.coords, np.float32([[0, 0], [0.5, 0], [1, 0], [1, 0.5], [1, 1], [0.5, 0.5]]))
# interpolation with 2 steps
poly = ia.Polygon([(0, 0), (1, 0), (1, 1)])
ls = poly.to_shapely_line_string(interpolate=2)
assert np.allclose(ls.coords, np.float32([
[0, 0], [1/3, 0], [2/3, 0],
[1, 0], [1, 1/3], [1, 2/3],
[1, 1], [2/3, 2/3], [1/3, 1/3]
]))
# interpolation with closed=True
poly = ia.Polygon([(0, 0), (1, 0), (1, 1)])
ls = poly.to_shapely_line_string(closed=True, interpolate=1)
assert np.allclose(ls.coords, np.float32([[0, 0], [0.5, 0], [1, 0], [1, | |
#OnosCtrl.install_app(olt_app_file)
@classmethod
def uninstall_cord_config_app(cls):
log_test.info('Uninstalling org.opencord.config 1.2 version app')
OnosCtrl(cls.cord_config_app).deactivate()
OnosCtrl.uninstall_app(cls.cord_config_app, onos_ip = cls.controller)
@classmethod
def install_igmpproxy(cls):
log_test.info('In install igmp proxy function ***************')
for app in cls.app_files:
OnosCtrl.install_app(app, onos_ip = cls.controller)
OnosCtrl(app).activate()
@classmethod
def igmp_proxy_setup(cls):
did = OnosCtrl.get_device_id()
cls.proxy_device_id = did
cls.olt = OltConfig(olt_conf_file = cls.olt_conf_file)
cls.port_map, _ = cls.olt.olt_port_map()
#log_test.info('port map is %s'%cls.port_map)
if cls.port_map:
##Per subscriber, we use 1 relay port
try:
proxy_port = cls.port_map[cls.port_map['relay_ports'][0]]
except:
proxy_port = cls.port_map['uplink']
cls.proxy_interface_port = proxy_port
cls.proxy_interfaces = (cls.port_map[cls.proxy_interface_port],)
else:
cls.proxy_interface_port = 100
cls.proxy_interfaces = (g_subscriber_port_map[cls.proxy_interface_port],)
cls.proxy_interfaces_last = cls.proxy_interfaces
if cls.port_map:
##generate a ip/mac client virtual interface config for onos
interface_list = []
for port in cls.port_map['ports']:
port_num = cls.port_map[port]
if port_num == cls.port_map['uplink']:
continue
ip = cls.get_host_ip(port_num)
mac = cls.get_mac(port)
interface_list.append((port_num, ip, mac))
#configure igmp proxy virtual interface
proxy_ip = cls.get_host_ip(interface_list[0][0])
proxy_mac = cls.get_mac(cls.port_map[cls.proxy_interface_port])
interface_list.append((cls.proxy_interface_port, proxy_ip, proxy_mac))
cls.onos_interface_load(interface_list)
@classmethod
def onos_interface_load(cls, interface_list):
interface_dict = { 'ports': {} }
for port_num, ip, mac in interface_list:
port_map = interface_dict['ports']
port = '{}/{}'.format(cls.proxy_device_id, port_num)
port_map[port] = { 'interfaces': [] }
interface_list = port_map[port]['interfaces']
interface_map = { 'ips' : [ '{}/{}'.format(ip, 24) ],
'mac' : mac,
'name': 'vir-{}'.format(port_num)
}
interface_list.append(interface_map)
#cls.onos_load_config(interface_dict)
cls.configs['interface_config'] = interface_dict
@classmethod
def onos_igmp_proxy_config_load(cls, FastLeave = "false"):
#cls.proxy_interface_port = 12
proxy_connect_point = '{}/{}'.format(cls.proxy_device_id, cls.proxy_interface_port)
igmpproxy_dict = { "apps": {
"org.onosproject.provider.lldp": {
"suppression": {
"deviceTypes": ["ROADM"],
"annotation": "{\"no-lldp\":null}"
}
},
"org.opencord.igmpproxy": {
"igmpproxy": {
"globalConnectPointMode": "true",
"globalConnectPoint": proxy_connect_point,
"UnsolicitedTimeOut": "2",
"MaxResp": "10",
"KeepAliveInterval": "120",
"KeepAliveCount": "3",
"LastQueryInterval": "2",
"LastQueryCount": "2",
"FastLeave": FastLeave,
"PeriodicQuery": "true",
"IgmpCos": "7",
"withRAUpLink": "true",
"withRADownLink": "true"
}
},
"org.opencord.mcast": {
"multicast": {
"ingressVlan": "222",
"egressVlan": "17"
}
}
}
}
device_dict = {'devices':{
cls.proxy_device_id: {
'basic': {
'driver': 'default'
},
'accessDevice': {
'uplink': '2',
'vlan': '222',
'defaultVlan': '1'
}
}
}
}
log_test.info('Igmp proxy dict is %s'%igmpproxy_dict)
cls.onos_load_config("org.opencord.igmpproxy",igmpproxy_dict)
cls.onos_load_config("org.opencord.igmpproxy",device_dict)
cls.configs['relay_config'] = igmpproxy_dict
cls.configs['device_config'] = device_dict
def random_mcast_ip(self,start_ip = '172.16.17.32', end_ip = '192.168.127.12'):
start = list(map(int, start_ip.split(".")))
end = list(map(int, end_ip.split(".")))
temp = start
ip_range = []
ip_range.append(start_ip)
while temp != end:
start[3] += 1
for i in (3, 2, 1):
if temp[i] == 255:
temp[i] = 0
temp[i-1] += 1
ip_range.append(".".join(map(str, temp)))
return random.choice(ip_range)
def randomsourceip(self,start_ip = '10.10.0.1', end_ip = '10.10.0.100'):
start = list(map(int, start_ip.split(".")))
end = list(map(int, end_ip.split(".")))
temp = start
ip_range = []
ip_range.append(start_ip)
while temp != end:
start[3] += 1
for i in (3, 2, 1):
if temp[i] == 255:
temp[i] = 0
temp[i-1] += 1
ip_range.append(".".join(map(str, temp)))
return random.choice(ip_range)
@classmethod
def get_host_ip(cls, port):
if cls.host_ip_map.has_key(port):
return cls.host_ip_map[port]
cls.host_ip_map[port] = '192.168.1.{}'.format(port)
return cls.host_ip_map[port]
@classmethod
def get_mac(cls, iface):
if cls.interface_to_mac_map.has_key(iface):
return cls.interface_to_mac_map[iface]
mac = get_mac(iface, pad = 0)
cls.interface_to_mac_map[iface] = mac
return mac
@classmethod
def start_onos(cls, network_cfg = None):
if cls.onos_restartable is False:
log_test.info('ONOS restart is disabled. Skipping ONOS restart')
return
if cls.VOLTHA_ENABLED is True:
log_test.info('ONOS restart skipped as VOLTHA is running')
return
if network_cfg is None:
network_cfg = cls.device_dict
if type(network_cfg) is tuple:
res = []
for v in network_cfg:
res += v.items()
config = dict(res)
else:
config = network_cfg
log_test.info('Restarting ONOS with new network configuration')
return cord_test_onos_restart(config = config)
@classmethod
def remove_onos_config(cls):
try:
os.unlink('{}/network-cfg.json'.format(cls.onos_config_path))
except: pass
@classmethod
def start_cpqd(cls, mac = '00:11:22:33:44:55'):
dpid = mac.replace(':', '')
cpqd_file = os.sep.join( (cls.cpqd_path, 'cpqd.sh') )
cpqd_cmd = '{} {}'.format(cpqd_file, dpid)
ret = os.system(cpqd_cmd)
assert_equal(ret, 0)
time.sleep(10)
device_id = 'of:{}{}'.format('0'*4, dpid)
return device_id
@classmethod
def start_ovs(cls):
ovs_file = os.sep.join( (cls.ovs_path, 'of-bridge.sh') )
ret = os.system(ovs_file)
assert_equal(ret, 0)
time.sleep(30)
@classmethod
def ovs_cleanup(cls):
##For every test case, delete all the OVS groups
cmd = 'ovs-ofctl del-groups br-int -OOpenFlow11 >/dev/null 2>&1'
try:
cord_test_shell(cmd)
##Since olt config is used for this test, we just fire a careless local cmd as well
os.system(cmd)
finally:
return
@classmethod
def onos_aaa_load(cls):
if cls.aaa_loaded:
return
aaa_dict = {'apps' : { 'org.opencord.aaa' : { 'AAA' : { 'radiusSecret': 'radius_password',
'radiusIp': '172.17.0.2' } } } }
radius_ip = os.getenv('ONOS_AAA_IP') or '172.17.0.2'
aaa_dict['apps']['org.opencord.aaa']['AAA']['radiusIp'] = radius_ip
cls.onos_load_config('org.opencord.aaa', aaa_dict)
cls.aaa_loaded = True
@classmethod
def onos_dhcp_table_load(cls, config = None):
dhcp_dict = {'apps' : { 'org.onosproject.dhcp' : { 'dhcp' : copy.copy(cls.dhcp_server_config) } } }
dhcp_config = dhcp_dict['apps']['org.onosproject.dhcp']['dhcp']
if config:
for k in config.keys():
if dhcp_config.has_key(k):
dhcp_config[k] = config[k]
cls.onos_load_config('org.onosproject.dhcp', dhcp_dict)
@classmethod
def onos_load_config(cls, app, config):
status, code = OnosCtrl.config(config)
if status is False:
log_test.info('JSON config request for app %s returned status %d' %(app, code))
assert_equal(status, True)
time.sleep(2)
def dhcp_sndrcv(self, dhcp, update_seed = False):
cip, sip = dhcp.discover(update_seed = update_seed)
assert_not_equal(cip, None)
assert_not_equal(sip, None)
log_test.info('Got dhcp client IP %s from server %s for mac %s' %
(cip, sip, dhcp.get_mac(cip)[0]))
return cip,sip
def dhcp_request(self, subscriber, seed_ip = '10.10.10.1', update_seed = False):
config = {'startip':'10.10.10.20', 'endip':'10.10.10.200',
'ip':'10.10.10.2', 'mac': "ca:fe:ca:fe:ca:fe",
'subnet': '255.255.255.0', 'broadcast':'10.10.10.255', 'router':'10.10.10.1'}
self.onos_dhcp_table_load(config)
dhcp = DHCPTest(seed_ip = seed_ip, iface = subscriber.iface)
cip, sip = self.dhcp_sndrcv(dhcp, update_seed = update_seed)
return cip, sip
def recv_channel_cb(self, pkt):
##First verify that we have received the packet for the joined instance
chan = self.subscriber.caddr(pkt[IP].dst)
assert_equal(chan in self.subscriber.join_map.keys(), True)
recv_time = monotonic.monotonic() * 1000000
join_time = self.subscriber.join_map[chan][self.subscriber.STATS_JOIN].start
delta = recv_time - join_time
self.subscriber.join_rx_stats.update(packets=1, t = delta, usecs = True)
self.subscriber.channel_update(chan, self.subscriber.STATS_RX, 1, t = delta)
log_test.debug('Packet received in %.3f usecs for group %s after join' %(delta, pkt[IP].dst))
self.test_status = True
def traffic_verify(self, subscriber):
if subscriber.has_service('TRAFFIC'):
url = 'http://www.google.com'
resp = requests.get(url)
self.test_status = resp.ok
if resp.ok == False:
log_test.info('Subscriber %s failed get from url %s with status code %d'
%(subscriber.name, url, resp.status_code))
else:
log_test.info('GET request from %s succeeded for subscriber %s'
%(url, subscriber.name))
return self.test_status
def tls_verify(self, subscriber):
def tls_fail_cb():
log_test.info('TLS verification failed')
if subscriber.has_service('TLS'):
#OnosCtrl('org.opencord.aaa').deactivate()
#time.sleep(2)
#OnosCtrl('org.opencord.aaa').activate()
#time.sleep(5)
tls = TLSAuthTest(fail_cb = tls_fail_cb, intf = subscriber.rx_intf)
log_test.info('Running subscriber %s tls auth test' %subscriber.name)
tls.runTest()
assert_equal(tls.failTest, False)
self.test_status = True
return self.test_status
else:
self.test_status = True
return self.test_status
def dhcp_verify(self, subscriber):
if subscriber.has_service('DHCP'):
cip, sip = self.dhcp_request(subscriber, update_seed = True)
log_test.info('Subscriber %s got client ip %s from server %s' %(subscriber.name, cip, sip))
subscriber.src_list = [cip]
self.test_status = True
return self.test_status
else:
subscriber.src_list = ['10.10.10.{}'.format(subscriber.rx_port)]
self.test_status = True
return self.test_status
def dhcp_jump_verify(self, subscriber):
if subscriber.has_service('DHCP'):
cip, sip = self.dhcp_request(subscriber, seed_ip = '10.10.200.1')
log_test.info('Subscriber %s got client ip %s from server %s' %(subscriber.name, cip, sip))
subscriber.src_list = [cip]
self.test_status = True
return self.test_status
else:
subscriber.src_list = ['10.10.10.{}'.format(subscriber.rx_port)]
self.test_status = True
return self.test_status
def dhcp_next_verify(self, subscriber):
if subscriber.has_service('DHCP'):
cip, sip = self.dhcp_request(subscriber, seed_ip = '10.10.150.1')
log_test.info('Subscriber %s got client ip %s from server %s' %(subscriber.name, cip, sip))
subscriber.src_list = [cip]
self.test_status = True
return self.test_status
else:
subscriber.src_list = ['10.10.10.{}'.format(subscriber.rx_port)]
self.test_status = True
return self.test_status
def igmp_verify(self, subscriber):
chan = 0
if subscriber.has_service('IGMP'):
##We wait for all the subscribers to join before triggering leaves
if subscriber.rx_port > 1:
time.sleep(5)
subscriber.channel_join(chan, delay = 0)
self.num_joins += 1
while self.num_joins < self.num_subscribers:
time.sleep(5)
log_test.info('All subscribers have joined the channel')
for i in range(10):
subscriber.channel_receive(chan, cb = subscriber.recv_channel_cb, count = 10)
log_test.info('Leaving channel %d for subscriber %s' %(chan, subscriber.name))
subscriber.channel_leave(chan)
time.sleep(5)
log_test.info('Interface %s Join RX stats for subscriber %s, %s' %(subscriber.iface, subscriber.name,subscriber.join_rx_stats))
#Should not receive packets for this subscriber
self.recv_timeout = True
subscriber.recv_timeout = True
subscriber.channel_receive(chan, cb = subscriber.recv_channel_cb, count = 10)
subscriber.recv_timeout = False
self.recv_timeout = False
log_test.info('Joining channel %d for subscriber %s' %(chan, subscriber.name))
subscriber.channel_join(chan, delay = 0)
self.test_status = True
return self.test_status
def igmp_jump_verify(self, subscriber):
if subscriber.has_service('IGMP'):
for i in xrange(subscriber.num):
log_test.info('Subscriber %s jumping channel' %subscriber.name)
chan = subscriber.channel_jump(delay=0)
subscriber.channel_receive(chan, cb = subscriber.recv_channel_cb, count = 1)
log_test.info('Verified receive for channel %d, subscriber %s' %(chan, subscriber.name))
time.sleep(3)
log_test.info('Interface %s Jump RX stats for subscriber %s, %s' %(subscriber.iface, subscriber.name, subscriber.join_rx_stats))
self.test_status = True
return self.test_status
def igmp_next_verify(self, subscriber):
if subscriber.has_service('IGMP'):
for i in xrange(subscriber.num):
if i:
chan = subscriber.channel_join_next(delay=0, leave_flag = self.leave_flag)
else:
chan = subscriber.channel_join(i, delay=0)
log_test.info('Joined next channel %d for subscriber %s' %(chan, subscriber.name))
subscriber.channel_receive(chan, cb = subscriber.recv_channel_cb, count=1)
log_test.info('Verified receive for channel %d, subscriber %s' %(chan, subscriber.name))
time.sleep(3)
log_test.info('Interface %s Join Next RX stats for subscriber %s, %s' %(subscriber.iface, subscriber.name, subscriber.join_rx_stats))
self.test_status = True
return self.test_status
def voltha_igmp_next_verify(self, subscriber):
if subscriber.has_service('IGMP'):
for c in xrange(self.VOLTHA_IGMP_ITERATIONS):
for | |
be able
# to have assumptions on P and Q that dP/dy = dQ/dx.
def ode_1st_exact(eq, func, order, match):
r"""
Solves 1st order exact ordinary differential equations.
A 1st order differential equation is called exact if it is the total
differential of a function. That is, the differential equation
.. math:: P(x, y) \,\partial{}x + Q(x, y) \,\partial{}y = 0
is exact if there is some function `F(x, y)` such that `P(x, y) =
\partial{}F/\partial{}x` and `Q(x, y) = \partial{}F/\partial{}y`. It can
be shown that a necessary and sufficient condition for a first order ODE
to be exact is that `\partial{}P/\partial{}y = \partial{}Q/\partial{}x`.
Then, the solution will be as given below::
>>> from sympy import Function, Eq, Integral, symbols, pprint
>>> x, y, t, x0, y0, C1= symbols('x,y,t,x0,y0,C1')
>>> P, Q, F= map(Function, ['P', 'Q', 'F'])
>>> pprint(Eq(Eq(F(x, y), Integral(P(t, y), (t, x0, x)) +
... Integral(Q(x0, t), (t, y0, y))), C1))
x y
/ /
| |
F(x, y) = | P(t, y) dt + | Q(x0, t) dt = C1
| |
/ /
x0 y0
Where the first partials of `P` and `Q` exist and are continuous in a
simply connected region.
A note: SymPy currently has no way to represent inert substitution on an
expression, so the hint ``1st_exact_Integral`` will return an integral
with `dy`. This is supposed to represent the function that you are
solving for.
Examples
========
>>> from sympy import Function, dsolve, cos, sin
>>> from sympy.abc import x
>>> f = Function('f')
>>> dsolve(cos(f(x)) - (x*sin(f(x)) - f(x)**2)*f(x).diff(x),
... f(x), hint='1st_exact')
x*cos(f(x)) + f(x)**3/3 == C1
References
==========
- http://en.wikipedia.org/wiki/Exact_differential_equation
- <NAME> & <NAME>, "Ordinary Differential Equations",
Dover 1963, pp. 73
# indirect doctest
"""
x = func.args[0]
f = func.func
r = match # d+e*diff(f(x),x)
e = r[r['e']]
d = r[r['d']]
global y # This is the only way to pass dummy y to _handle_Integral
y = r['y']
C1 = get_numbered_constants(eq, num=1)
# Refer <NAME>, "Symbolic Integration - The Stormy Decade",
# Communications of the ACM, Volume 14, Number 8, August 1971, pp. 558
# which gives the method to solve an exact differential equation.
sol = C.Integral(d, x) + C.Integral((e - (C.Integral(d, x).diff(y))), y)
return Eq(sol, C1)
def ode_1st_homogeneous_coeff_best(eq, func, order, match):
r"""
Returns the best solution to an ODE from the two hints
``1st_homogeneous_coeff_subs_dep_div_indep`` and
``1st_homogeneous_coeff_subs_indep_div_dep``.
This is as determined by :py:meth:`~sympy.solvers.ode.ode_sol_simplicity`.
See the
:py:meth:`~sympy.solvers.ode.ode_1st_homogeneous_coeff_subs_indep_div_dep`
and
:py:meth:`~sympy.solvers.ode.ode_1st_homogeneous_coeff_subs_dep_div_indep`
docstrings for more information on these hints. Note that there is no
``ode_1st_homogeneous_coeff_best_Integral`` hint.
Examples
========
>>> from sympy import Function, dsolve, pprint
>>> from sympy.abc import x
>>> f = Function('f')
>>> pprint(dsolve(2*x*f(x) + (x**2 + f(x)**2)*f(x).diff(x), f(x),
... hint='1st_homogeneous_coeff_best', simplify=False))
/ 2 \
| 3*x |
log|----- + 1|
| 2 |
\f (x) /
log(f(x)) = log(C1) - --------------
3
References
==========
- http://en.wikipedia.org/wiki/Homogeneous_differential_equation
- <NAME> & <NAME>, "Ordinary Differential Equations",
Dover 1963, pp. 59
# indirect doctest
"""
# There are two substitutions that solve the equation, u1=y/x and u2=x/y
# They produce different integrals, so try them both and see which
# one is easier.
sol1 = ode_1st_homogeneous_coeff_subs_indep_div_dep(eq,
func, order, match)
sol2 = ode_1st_homogeneous_coeff_subs_dep_div_indep(eq,
func, order, match)
simplify = match.get('simplify', True)
if simplify:
# why is odesimp called here? Should it be at the usual spot?
constants = sol1.free_symbols.difference(eq.free_symbols)
sol1 = odesimp(
sol1, func, order, constants,
"1st_homogeneous_coeff_subs_indep_div_dep")
constants = sol2.free_symbols.difference(eq.free_symbols)
sol2 = odesimp(
sol2, func, order, constants,
"1st_homogeneous_coeff_subs_dep_div_indep")
return min([sol1, sol2], key=lambda x: ode_sol_simplicity(x, func,
trysolving=not simplify))
def ode_1st_homogeneous_coeff_subs_dep_div_indep(eq, func, order, match):
r"""
Solves a 1st order differential equation with homogeneous coefficients
using the substitution `u_1 = \frac{\text{<dependent
variable>}}{\text{<independent variable>}}`.
This is a differential equation
.. math:: P(x, y) + Q(x, y) dy/dx = 0
such that `P` and `Q` are homogeneous and of the same order. A function
`F(x, y)` is homogeneous of order `n` if `F(x t, y t) = t^n F(x, y)`.
Equivalently, `F(x, y)` can be rewritten as `G(y/x)` or `H(x/y)`. See
also the docstring of :py:meth:`~sympy.solvers.ode.homogeneous_order`.
If the coefficients `P` and `Q` in the differential equation above are
homogeneous functions of the same order, then it can be shown that the
substitution `y = u_1 x` (i.e. `u_1 = y/x`) will turn the differential
equation into an equation separable in the variables `x` and `u`. If
`h(u_1)` is the function that results from making the substitution `u_1 =
f(x)/x` on `P(x, f(x))` and `g(u_2)` is the function that results from the
substitution on `Q(x, f(x))` in the differential equation `P(x, f(x)) +
Q(x, f(x)) f'(x) = 0`, then the general solution is::
>>> from sympy import Function, dsolve, pprint
>>> from sympy.abc import x
>>> f, g, h = map(Function, ['f', 'g', 'h'])
>>> genform = g(f(x)/x) + h(f(x)/x)*f(x).diff(x)
>>> pprint(genform)
/f(x)\ /f(x)\ d
g|----| + h|----|*--(f(x))
\ x / \ x / dx
>>> pprint(dsolve(genform, f(x),
... hint='1st_homogeneous_coeff_subs_dep_div_indep_Integral'))
f(x)
----
x
/
|
| -h(u1)
log(x) = C1 + | ---------------- d(u1)
| u1*h(u1) + g(u1)
|
/
Where `u_1 h(u_1) + g(u_1) \ne 0` and `x \ne 0`.
See also the docstrings of
:py:meth:`~sympy.solvers.ode.ode_1st_homogeneous_coeff_best` and
:py:meth:`~sympy.solvers.ode.ode_1st_homogeneous_coeff_subs_indep_div_dep`.
Examples
========
>>> from sympy import Function, dsolve
>>> from sympy.abc import x
>>> f = Function('f')
>>> pprint(dsolve(2*x*f(x) + (x**2 + f(x)**2)*f(x).diff(x), f(x),
... hint='1st_homogeneous_coeff_subs_dep_div_indep', simplify=False))
/ 3 \
|3*f(x) f (x)|
log|------ + -----|
| x 3 |
\ x /
log(x) = log(C1) - -------------------
3
References
==========
- http://en.wikipedia.org/wiki/Homogeneous_differential_equation
- <NAME> & <NAME>, "Ordinary Differential Equations",
Dover 1963, pp. 59
# indirect doctest
"""
x = func.args[0]
f = func.func
u = Dummy('u')
u1 = Dummy('u1') # u1 == f(x)/x
r = match # d+e*diff(f(x),x)
C1 = get_numbered_constants(eq, num=1)
xarg = match.get('xarg', 0)
yarg = match.get('yarg', 0)
int = C.Integral(
(-r[r['e']]/(r[r['d']] + u1*r[r['e']])).subs({x: 1, r['y']: u1}),
(u1, None, f(x)/x))
sol = logcombine(Eq(log(x), int + log(C1)), force=True)
sol = sol.subs(f(x), u).subs(((u, u - yarg), (x, x - xarg), (u, f(x))))
return sol
def ode_1st_homogeneous_coeff_subs_indep_div_dep(eq, func, order, match):
r"""
Solves a 1st order differential equation with homogeneous coefficients
using the substitution `u_2 = \frac{\text{<independent
variable>}}{\text{<dependent variable>}}`.
This is a differential equation
.. math:: P(x, y) + Q(x, y) dy/dx = 0
such that `P` and `Q` are homogeneous and of the same order. A function
`F(x, y)` is homogeneous of order `n` if `F(x t, y t) = t^n F(x, y)`.
Equivalently, `F(x, y)` can be rewritten as `G(y/x)` or `H(x/y)`. See
also the docstring of :py:meth:`~sympy.solvers.ode.homogeneous_order`.
If the coefficients `P` and `Q` in the differential equation above are
homogeneous functions of the same order, then it can be shown that the
substitution `x = u_2 y` (i.e. `u_2 = x/y`) will turn the differential
equation into an equation separable in the variables `y` and `u_2`. If
`h(u_2)` is the function that results from making the substitution `u_2 =
x/f(x)` on `P(x, f(x))` and `g(u_2)` is the function that results from the
substitution on `Q(x, f(x))` in the differential equation `P(x, f(x)) +
Q(x, f(x)) f'(x) = 0`, then the general solution is:
>>> from sympy import Function, dsolve, pprint
>>> from sympy.abc import x
>>> f, g, h = map(Function, ['f', 'g', 'h'])
>>> genform = g(x/f(x)) + h(x/f(x))*f(x).diff(x)
>>> pprint(genform)
/ x \ / x \ d
g|----| + h|----|*--(f(x))
\f(x)/ \f(x)/ dx
>>> pprint(dsolve(genform, f(x),
... hint='1st_homogeneous_coeff_subs_indep_div_dep_Integral'))
x
----
f(x)
/
|
| -g(u2)
| ---------------- d(u2)
| u2*g(u2) + h(u2)
|
/
<BLANKLINE>
f(x) = C1*e
Where `u_2 g(u_2) + h(u_2) \ne 0` and `f(x) \ne 0`.
See also the docstrings of
:py:meth:`~sympy.solvers.ode.ode_1st_homogeneous_coeff_best` and
:py:meth:`~sympy.solvers.ode.ode_1st_homogeneous_coeff_subs_dep_div_indep`.
Examples
========
>>> from sympy import Function, pprint, dsolve
>>> from sympy.abc import x
>>> f = Function('f')
>>> pprint(dsolve(2*x*f(x) + (x**2 + f(x)**2)*f(x).diff(x), f(x),
... hint='1st_homogeneous_coeff_subs_indep_div_dep',
... simplify=False))
/ 2 \
| 3*x |
log|----- + 1|
| 2 |
\f (x) /
log(f(x)) = log(C1) - | |
<reponame>gfokkema/vnet-manager
from ipaddress import IPv4Interface, IPv6Interface, ip_interface, ip_network, ip_address
from re import fullmatch
from logging import getLogger
from os.path import isdir, isfile, join
from copy import deepcopy
from vnet_manager.utils.mac import random_mac_generator
from vnet_manager.conf import settings
logger = getLogger(__name__)
class ValidateConfig:
"""
Validates the config generated by get_config() and updates some values if missing
"""
def __init__(self, config: dict):
"""
:param dict config: The config generated by get_config()
"""
self._all_ok = True
self._validators_ran = 0
self._new_config = deepcopy(config)
self.default_message = ". Please check your settings"
self.config = config
def __str__(self) -> str:
return "VNet config validator, current_state: {}, amount of validators run: {}".format(
"OK" if self._all_ok else "NOT OK", self._validators_ran
)
@property
def config_validation_successful(self) -> bool:
"""
This property can be called to see if any unrecoverable errors in the config have been found
"""
return self._all_ok
@property
def updated_config(self) -> dict:
"""
This property contains a updated config dict, with all values that have been fixed by this validator
"""
return self._new_config
@property
def validators_ran(self) -> int:
"""
Return the amount of validators that have been run
"""
return self._validators_ran
def validate(self):
"""
Run all validation functions
"""
self._all_ok = True
self.validate_switch_config()
self.validate_machine_config()
if "veths" in self.config:
self.validate_veth_config()
def validate_switch_config(self):
"""
Validates the switch part of the config
"""
self._validators_ran += 1
if "switches" not in self.config:
logger.error("Config item 'switches' missing{}".format(self.default_message))
self._all_ok = False
elif not isinstance(self.config["switches"], int):
logger.error(
"Config item 'switches: {}' does not seem to be an integer{}".format(self.config["switches"], self.default_message)
)
self._all_ok = False
def validate_machine_config(self):
# TODO: Refactor
# pylint: disable=too-many-branches
"""
Validates the machines part of the config
"""
self._validators_ran += 1
if "machines" not in self.config:
logger.error("Config item 'machines' missing{}".format(self.default_message))
self._all_ok = False
elif not isinstance(self.config["machines"], dict):
logger.error("Machines config is not a dict, this means the user config is incorrect{}".format(self.default_message))
self._all_ok = False
else:
for name, values in self.config["machines"].items():
if "type" not in values:
logger.error("Type not found for machine {}{}".format(name, self.default_message))
self._all_ok = False
elif values["type"] not in settings.SUPPORTED_MACHINE_TYPES:
logger.error(
"Type {} for machine {} unsupported. I only support the following types: {}{}".format(
values["type"], name, settings.SUPPORTED_MACHINE_TYPES, self.default_message
)
)
self._all_ok = False
# Files
if "files" in values:
if not isinstance(values["files"], dict):
logger.error("Files directive for machine {} is not a dict{}".format(name, self.default_message))
self._all_ok = False
else:
# Check the files
self.validate_machine_files_parameters(name)
# Interfaces
if "interfaces" not in values:
logger.error("Machine {} does not appear to have any interfaces{}".format(name, self.default_message))
self._all_ok = False
elif not isinstance(values["interfaces"], dict):
logger.error(
"The interfaces for machine {} are not given as a dict, this usually means a typo in the config{}".format(
name, self.default_message
)
)
self._all_ok = False
else:
self.validate_interface_config(name)
# VLANs?
if "vlans" not in values:
logger.debug("Machine {} does not appear to have any VLAN interfaces, that's okay".format(name))
elif not isinstance(values["vlans"], dict):
logger.error(
"Machine {} has a VLAN config but it does not "
"appear to be a dict, this usually means a typo in the config{}".format(name, self.default_message)
)
self._all_ok = False
else:
self.validate_vlan_config(name)
# Bridges?
if "bridges" not in values:
logger.debug("Machine {} does not appear to have any Bridge interfaces, that's okay".format(name))
elif not isinstance(values["bridges"], dict):
logger.error(
"Machine {} has a bridge config defined, but it is not a dictionary, "
"this usally means a typo in the config{}".format(name, self.default_message)
)
self._all_ok = False
else:
self.validate_machine_bridge_config(name)
def validate_vlan_config(self, machine):
"""
Validates the VLAN config of a particular machine
:param machine: str: the machine to validate the VLAN config for
"""
vlans = self.config["machines"][machine]["vlans"]
for name, values in vlans.items():
if "id" not in values:
logger.error("VLAN {} on machine {} is missing it's vlan id{}".format(name, machine, self.default_message))
self._all_ok = False
else:
try:
self._new_config["machines"][machine]["vlans"][name]["id"] = int(values["id"])
except ValueError:
logger.error(
"Unable to cast VLAN {} with ID {} from machine {} to a integer{}".format(
name, values["id"], machine, self.default_message
)
)
self._all_ok = False
if "link" not in values:
logger.error("VLAN {} on machine {} is missing it's link attribute{}".format(name, machine, self.default_message))
self._all_ok = False
elif not isinstance(values["link"], str):
logger.error(
"Link {} for VLAN {} on machine {}, does not seem to be a string{}".format(
values["link"], name, machine, self.default_message
)
)
self._all_ok = False
# This check requires a valid interface config, so we only do it if the previous checks have been successful
elif self._all_ok and values["link"] not in self.config["machines"][machine]["interfaces"]:
logger.error(
"Link {} for VLAN {} on machine {} does not correspond to any interfaces on the same machine{}".format(
values["link"], name, machine, self.default_message
)
)
self._all_ok = False
if "addresses" not in values:
logger.debug("VLAN {} on machine {} does not have any addresses, that's okay".format(name, machine))
elif not isinstance(values["addresses"], list):
logger.error(
"Addresses on VLAN {} for machine {}, does not seem to be a list{}".format(name, machine, self.default_message)
)
self._all_ok = False
else:
for address in values["addresses"]:
try:
ip_interface(address)
except ValueError as e:
logger.error(
"Address {} for VLAN {} on machine {} does not seem to be a valid address, got parse error {}".format(
address, name, machine, e
)
)
self._all_ok = False
def validate_machine_files_parameters(self, machine: str):
"""
Validates the files config of a particular machine
Assumes the files dict exists for that machine
:param str machine: The machine to validates the files config for
"""
files = self.config["machines"][machine]["files"]
for host_file in files.keys():
# First check if the user gave a relative dir from the config dir
if isdir(join(self.config["config_dir"], host_file)) or isfile(join(self.config["config_dir"], host_file)):
logger.debug(
"Updating relative host_file path {} to full path {}".format(host_file, join(self.config["config_dir"], host_file))
)
self._new_config["machines"][machine]["files"][join(self.config["config_dir"], host_file)] = self._new_config["machines"][
machine
]["files"].pop(host_file)
# Check for absolute paths
elif not isdir(host_file) or not isfile(host_file):
logger.error(
"Host file {} for machine {} does not seem to be a dir or a file{}".format(host_file, machine, self.default_message)
)
self._all_ok = False
def validate_interface_config(self, machine: str):
# TODO: Refactor
# pylint: disable=too-many-branches
"""
Validates the interface config of a particular machine
Assumes the interfaces dict exists for that machine
:param str machine: the machine to validate the interfaces config for
"""
interfaces = self.config["machines"][machine]["interfaces"]
for int_name, int_vals in interfaces.items():
if "ipv4" not in int_vals:
logger.debug(
"No IPv4 found for interface {} on machine {}. That's okay, no IPv4 will be configured".format(int_name, machine)
)
else:
# Validate the given IP
try:
IPv4Interface(int_vals["ipv4"])
except ValueError as e:
logger.error("Unable to parse IPv4 address {} for machine {}. Parse error: {}".format(int_vals["ipv4"], machine, e))
self._all_ok = False
if "ipv6" not in int_vals:
logger.debug(
"No IPv6 found for interface {} on machine {}, that's okay no IPv6 address will be configured".format(int_name, machine)
)
else:
# Validate the given IP
try:
IPv6Interface(int_vals["ipv6"])
except ValueError as e:
logger.error("Unable to parse IPv6 address {} for machine {}. Parse error: {}".format(int_vals["ipv6"], machine, e))
self._all_ok = False
if "mac" not in int_vals:
logger.debug("MAC not found for interface {} on machine {}, generating a random one".format(int_name, machine))
self._new_config["machines"][machine]["interfaces"][int_name]["mac"] = random_mac_generator()
# From: https://stackoverflow.com/a/7629690/8632038
elif not fullmatch(r"^([0-9A-Fa-f]{2}[:-]){5}([0-9A-Fa-f]{2})$", int_vals["mac"]):
logger.error(
"MAC {} for interface {} on machine {}, does not seem to be valid{}".format(
int_vals["mac"], int_name, machine, self.default_message
)
)
self._all_ok = False
if "bridge" not in int_vals:
logger.error("bridge keyword missing on interface {} for machine {}{}".format(int_name, machine, self.default_message))
self._all_ok = False
elif not isinstance(int_vals["bridge"], int) or int_vals["bridge"] > self.config["switches"] - 1:
logger.error(
"Invalid bridge number detected for interface {} on machine {}. "
"The bridge keyword should correspond to the interface number of the vnet bridge to connect to "
"(starting at iface number 0)".format(int_name, machine)
)
self._all_ok = False
if "routes" in int_vals:
if not isinstance(int_vals["routes"], list):
logger.error(
"routes passed to interface {} for machine {}, found type {}, expected type 'list'{}".format(
int_name, machine, type(int_vals["routes"]).__name__, self.default_message
)
)
self._all_ok = False
else:
self.validate_interface_routes(int_vals["routes"], int_name, machine)
def validate_interface_routes(self, routes: list, int_name: str, machine: str):
for idx, route in enumerate(routes):
if "to" not in route:
logger.error(
"'to' keyword missing from route {} on interface {} for machine {}{}".format(
idx + 1, int_name, machine, self.default_message
)
)
self._all_ok = False
else:
try:
ip_network(route["to"])
except ValueError:
if route["to"] == "default":
logger.debug(
"Updating 'default' to destination for route | |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Functions to calculate frequency spectra."""
import copy
import warnings
import contextlib
import os
from stingray.gti import cross_gtis
from stingray.crossspectrum import AveragedCrossspectrum
from stingray.powerspectrum import AveragedPowerspectrum
from stingray.utils import show_progress
from stingray.gti import time_intervals_from_gtis
from stingray.events import EventList
import numpy as np
from astropy import log
from astropy.logger import AstropyUserWarning
from .base import (
hen_root,
common_name,
_assign_value_if_none,
interpret_bintime,
)
from .io import sort_files, save_pds, load_data
from .io import HEN_FILE_EXTENSION, get_file_type
def average_periodograms(fspec_iterable, total=None):
"""Sum a list (or iterable) of power density spectra.
Examples
--------
>>> pds = AveragedPowerspectrum()
>>> pds.freq = np.asarray([1, 2, 3])
>>> pds.power = np.asarray([3, 3, 3])
>>> pds.power_err = np.asarray([0.1, 0.1, 0.1])
>>> pds.m = 1
>>> pds.fftlen = 128
>>> pds1 = copy.deepcopy(pds)
>>> pds1.m = 2
>>> tot_pds = average_periodograms([pds, pds1])
>>> np.allclose(tot_pds.power, pds.power)
True
>>> np.allclose(tot_pds.power_err, pds.power_err / np.sqrt(3))
True
>>> tot_pds.m
3
"""
for i, contents in enumerate(show_progress(fspec_iterable, total=total)):
freq = contents.freq
pds = contents.power
epds = contents.power_err
nchunks = contents.m
rebin = 1
norm = contents.norm
fftlen = contents.fftlen
if i == 0:
rebin0, norm0, freq0 = rebin, norm, freq
tot_pds = pds * nchunks
tot_epds = epds ** 2 * nchunks
tot_npds = nchunks
tot_contents = copy.copy(contents)
else:
assert np.all(
rebin == rebin0
), "Files must be rebinned in the same way"
np.testing.assert_array_almost_equal(
freq,
freq0,
decimal=int(-np.log10(1 / fftlen) + 2),
err_msg="Frequencies must coincide",
)
assert norm == norm0, "Files must have the same normalization"
tot_pds += pds * nchunks
tot_epds += epds ** 2 * nchunks
tot_npds += nchunks
tot_contents.power = tot_pds / tot_npds
tot_contents.power_err = np.sqrt(tot_epds) / tot_npds
tot_contents.m = tot_npds
return tot_contents
def _wrap_fun_cpds(arglist):
f1, f2, outname, kwargs = arglist
return calc_cpds(f1, f2, outname=outname, **kwargs)
def _wrap_fun_pds(argdict):
fname = argdict["fname"]
argdict.pop("fname")
return calc_pds(fname, **argdict)
def sync_gtis(lc1, lc2):
"""Sync gtis between light curves or event lists.
Has to work with new and old versions of stingray.
Examples
--------
>>> from stingray.events import EventList
>>> from stingray.lightcurve import Lightcurve
>>> ev1 = EventList(
... time=np.sort(np.random.uniform(1, 10, 3)), gti=[[1, 10]])
>>> ev2 = EventList(time=np.sort(np.random.uniform(0, 9, 4)), gti=[[0, 9]])
>>> e1, e2 = sync_gtis(ev1, ev2)
>>> np.allclose(e1.gti, [[1, 9]])
True
>>> np.allclose(e2.gti, [[1, 9]])
True
>>> lc1 = Lightcurve(
... time=[0.5, 1.5, 2.5], counts=[2, 2, 3], dt=1, gti=[[0, 3]])
>>> lc2 = Lightcurve(
... time=[1.5, 2.5, 3.5, 4.5], counts=[2, 2, 3, 3], dt=1, gti=[[1, 5]])
>>> lc1._apply_gtis = lc1.apply_gtis
>>> lc2._apply_gtis = lc2.apply_gtis
>>> l1, l2 = sync_gtis(lc1, lc2)
>>> np.allclose(l1.gti, [[1, 3]])
True
>>> np.allclose(l2.gti, [[1, 3]])
True
"""
gti = cross_gtis([lc1.gti, lc2.gti])
lc1.gti = gti
lc2.gti = gti
if hasattr(lc1, "_apply_gtis"):
# Compatibility with old versions of stingray
lc1.apply_gtis = lc1._apply_gtis
lc2.apply_gtis = lc2._apply_gtis
if hasattr(lc1, "apply_gtis"):
lc1.apply_gtis()
lc2.apply_gtis()
# compatibility with old versions of stingray
if hasattr(lc1, "tseg") and lc1.tseg != lc2.tseg:
lc1.tseg = np.max(lc1.gti) - np.min(lc1.gti)
lc2.tseg = np.max(lc1.gti) - np.min(lc1.gti)
return lc1, lc2
def _format_lc_data(data, type, fftlen=512.0, bintime=1.0):
if type == "events":
events = data
gtilength = events.gti[:, 1] - events.gti[:, 0]
events.gti = events.gti[gtilength >= fftlen]
lc_data = list(events.to_lc_list(dt=bintime))
else:
lc = data
if bintime > lc.dt:
lcrebin = np.rint(bintime / lc.dt)
log.info("Rebinning lcs by a factor %d" % lcrebin)
lc = lc.rebin(bintime)
# To fix problem with float128
lc.counts = lc.counts.astype(float)
lc_data = lc
return lc_data
def _distribute_events(events, chunk_length):
"""Split event list in chunks.
Examples
--------
>>> ev = EventList([1, 2, 3, 4, 5, 6], gti=[[0.5, 6.5]])
>>> ev.pi = np.ones_like(ev.time)
>>> ev.mjdref = 56780.
>>> ev_lists = list(_distribute_events(ev, 2))
>>> np.allclose(ev_lists[0].time, [1, 2])
True
>>> np.allclose(ev_lists[1].time, [3, 4])
True
>>> np.allclose(ev_lists[2].time, [5, 6])
True
>>> np.allclose(ev_lists[0].gti, [[0.5, 2.5]])
True
>>> ev_lists[0].mjdref == ev.mjdref
True
>>> ev_lists[2].mjdref == ev.mjdref
True
>>> np.allclose(ev_lists[1].pi, [1, 1])
True
"""
gti = events.gti
start_times, stop_times = time_intervals_from_gtis(gti, chunk_length)
for start, end in zip(start_times, stop_times):
first, last = np.searchsorted(events.time, [start, end])
new_ev = EventList(
events.time[first:last], gti=np.asarray([[start, end]])
)
for attr in events.__dict__.keys():
if attr == "gti":
continue
val = getattr(events, attr)
if np.size(val) == np.size(events.time):
val = val[first:last]
setattr(new_ev, attr, val)
yield new_ev
def _provide_periodograms(events, fftlen, dt, norm):
for new_ev in _distribute_events(events, fftlen):
# Hack: epsilon slightly below zero, to allow for a GTI to be recognized as such
new_ev.gti[:, 1] += dt / 10
pds = AveragedPowerspectrum(
new_ev, dt=dt, segment_size=fftlen, norm=norm, silent=True
)
pds.fftlen = fftlen
yield pds
def _provide_cross_periodograms(events1, events2, fftlen, dt, norm):
length = events1.gti[-1, 1] - events1.gti[0, 0]
total = int(length / fftlen)
ev1_iter = _distribute_events(events1, fftlen)
ev2_iter = _distribute_events(events2, fftlen)
for new_ev in zip(ev1_iter, ev2_iter):
new_ev1, new_ev2 = new_ev
new_ev1.gti[:, 1] += dt / 10
new_ev2.gti[:, 1] += dt / 10
with contextlib.redirect_stdout(open(os.devnull, "w")):
pds = AveragedCrossspectrum(
new_ev1,
new_ev2,
dt=dt,
segment_size=fftlen,
norm=norm,
silent=True,
)
pds.fftlen = fftlen
yield pds
def calc_pds(
lcfile,
fftlen,
save_dyn=False,
bintime=1,
pdsrebin=1,
normalization="leahy",
back_ctrate=0.0,
noclobber=False,
outname=None,
save_all=False,
test=False,
):
"""Calculate the PDS from an input light curve file.
Parameters
----------
lcfile : str
The light curve file
fftlen : float
The length of the chunks over which FFTs will be calculated, in seconds
Other Parameters
----------------
save_dyn : bool
If True, save the dynamical power spectrum
bintime : float
The bin time. If different from that of the light curve, a rebinning is
performed
pdsrebin : int
Rebin the PDS of this factor.
normalization: str
'Leahy', 'frac', 'rms', or any normalization accepted by ``stingray``.
Default 'Leahy'
back_ctrate : float
The non-source count rate
noclobber : bool
If True, do not overwrite existing files
outname : str
If speficied, output file name. If not specified or None, the new file
will have the same root as the input light curve and the '_pds' suffix
"""
root = hen_root(lcfile)
if outname is None:
outname = root + "_pds" + HEN_FILE_EXTENSION
if noclobber and os.path.exists(outname):
warnings.warn("File exists, and noclobber option used. Skipping")
return
ftype, data = get_file_type(lcfile)
mjdref = data.mjdref
instr = data.instr
length = data.gti[-1, 1] - data.gti[0, 0]
if hasattr(data, "dt"):
bintime = max(data.dt, bintime)
nbins = int(length / bintime)
if ftype == "events" and (test or nbins > 10 ** 7):
print("Long observation. Using split analysis")
length = data.gti[-1, 1] - data.gti[0, 0]
total = int(length / fftlen)
pds = average_periodograms(
_provide_periodograms(
data, fftlen, bintime, norm=normalization.lower()
),
total=total,
)
else:
lc_data = _format_lc_data(data, ftype, bintime=bintime, fftlen=fftlen)
pds = AveragedPowerspectrum(
lc_data, segment_size=fftlen, norm=normalization.lower()
)
if pdsrebin is not None and pdsrebin != 1:
pds = pds.rebin(pdsrebin)
pds.instr = instr
pds.fftlen = fftlen
pds.back_phots = back_ctrate * fftlen
pds.mjdref = mjdref
log.info("Saving PDS to %s" % outname)
save_pds(pds, outname, save_all=save_all)
return outname
def calc_cpds(
lcfile1,
lcfile2,
fftlen,
save_dyn=False,
bintime=1,
pdsrebin=1,
outname="cpds" + HEN_FILE_EXTENSION,
normalization="leahy",
back_ctrate=0.0,
noclobber=False,
save_all=False,
test=False,
):
"""Calculate the CPDS from a pair of input light curve files.
Parameters
----------
lcfile1 : str
The first light curve file
lcfile2 : str
The second light curve file
fftlen : float
The length of the chunks over which FFTs will be calculated, in seconds
Other Parameters
----------------
save_dyn : bool
If True, save the dynamical power spectrum
bintime : float
The bin time. If different from that of the light curve, a rebinning is
performed
pdsrebin : int
Rebin the PDS of this factor.
normalization : str
'Leahy', 'frac', 'rms', or any normalization accepted by ``stingray``.
Default 'Leahy'
back_ctrate : float
The non-source count rate
noclobber : bool
If True, do not overwrite existing files
outname : str
Output file name for the cpds. Default: cpds.[nc|p]
"""
if noclobber and os.path.exists(outname):
warnings.warn("File exists, and noclobber option used. Skipping")
return
log.info("Loading file %s..." % lcfile1)
ftype1, lc1 = get_file_type(lcfile1)
log.info("Loading file %s..." % lcfile2)
ftype2, lc2 = get_file_type(lcfile2)
instr1 = lc1.instr
instr2 = lc2.instr
if ftype1 != ftype2:
raise ValueError(
"Please use similar data files for the two time "
"series (e.g. both events or both light curves)"
)
if hasattr(lc1, "dt"):
assert lc1.dt == lc2.dt, "Light curves are sampled differently"
lc1, lc2 = sync_gtis(lc1, lc2)
if lc1.mjdref != lc2.mjdref:
lc2 = lc2.change_mjdref(lc1.mjdref)
mjdref = lc1.mjdref
length = lc1.gti[-1, 1] - lc1.gti[0, 0]
if | |
LISTS#
#====================================================================#
#Variables and lists for basic processing
Dir = list() #List of all datafile directories inside folders
Dirlist = list() #List of all folder directories (not including filenames)
IEDFVariablelist = list() #List of all variable names in pcmc.prof in header order
Geometrylist = list() #List containing commonly used geometries [LEGACY: NOT USED]
Globalvarlist = list() #List of all commonly shared variable names between all folders
Globalnumvars = list() #Number of commonly shared variables between all folders.
#Variables for mesh_size lists and SI conversion
ISYMlist = list() #list of ISYM values in folder order in Dirlist
IXZlist = list() #List of IXZ values in folder order in Dirlist
R_mesh = list() #List of radial mesh cells for initmesh.out in folder order in Dirlist
Z_mesh = list() #List of axial mesh cells for initmesh.out in folder order in Dirlist
Raxis = list() #Radial SI [cm] axis for plotting
Zaxis = list() #Axial SI [cm] axis for plotting
Depth = list() #icp.nam Depth input [cm] in folder order in Dirlist
Radius = list() #icp.nam Radius input [cm] in folder order in Dirlist
Height = list() #icp.nam Height input [cm] in folder order in Dirlist
dr = list() #Radial mesh resolution [cm/cell] in folder order in Dirlist
dz = list() #Axial mesh resolution [cm/cell] in folder order in Dirlist
dy = list() #Depth mesh resolution [cm/cell] in folder order in Dirlist
#Variables and lists for icp.nam parameters
VRFM,VRFM2 = list(),list() # Array of reals (In Material Order)
FREQM,FREQM2 = list(),list() # Array of reals (In Material Order)
FREQC = list() # Array of reals (In Material Order)
FREQMAX,FREQMIN = list(),list() # Array of reals (In Material Order)
FREQGLOB,FREQALL = list(),list() # real
IRFPOW = list() # real
PRESOUT = list() # real
IMOVIE_FRAMES = list() # real
NUMMETALS=0; CMETALS,IETRODEM = list(),list() # int; Array of strings and ints, respectively
NUMCOILS=0; CCOILS = list() # int; Array of strings
IMATSTATS=0; CMATSTATS = list() # int; Array of strings
IPCMCSPEC=0; CPCMCSPEC = list() # int; Array of strings
IEBINSPCMC=0; EMAXIPCMC=0 # int; int
#Lists for icp.dat variables
header_icpdat = list() #[SpeciesName, Charge, MolecularWeight, StickingCoeff, - Array of strings
# Transport, ReturnFrac, ReturnName]
AtomicSpecies = list() #All species contained within chemistry set - Array of strings
FluidSpecies = list() #All neutral fluid species (for fluid dynamics) - Array of strings
NeutSpecies = list() #All neutral and metastable species - Array of strings
PosSpecies = list() #All positive ion species - Array of strings
NegSpecies = list() #All negative ion species - Array of strings
#Lists to store raw data
rawdata_2D = list() #ASCII format TECPLOT2D.pdt data string list - Variable,Radius,Axis
rawdata_kin = list() #ASCII format kin.pdt data string list - Variable,Radius,Axis
rawdata_phasemovie = list() #ASCII format movie1.pdt data string list - Variable,Radius,Axis
rawdata_itermovie = list() #ASCII format movie_icp.pdt data string list - Variable,Radius,Axis
rawdata_IEDF = list() #ASCII format iprofile_tec2d.pdt data string list - Variable,Radius,Axis
rawdata_mcs = list() #ASCII format mcs.pdt data string list - Variable,Radius,Axis
Data = list() #Data[folder][Variable][Data] -Data = 2D (R,Z) of reals
DataKin = list() #Data[folder][Variable][Data] -Data = 1D (Avg) of reals
DataIEDF = list() #Data[folder][Variable][Data] -Data = 2D (R,Z) of reals
DataEEDF = list() #Data[folder][Variable][Data] -Data = 2D (R,Z) of Reals
IterMovieData = list() #ITERMovieData[folder][Timestep][Variable][Data] -Data = 2D (R,Z) of Reals
PhaseMovieData = list() #PhaseMovieData[folder][Timestep][Variable][Data] -Data = 2D (R,Z) of Reals
Moviephaselist = list() #'CYCL = n' -int
MovieIterlist = list() #'ITER = n' -int
EEDF_TDlist = list() #'???' -???
header_itermovie = list() #Header num rows for movie_icp.pdt -1D array of ints
header_phasemovie = list() #Header num rows for movie1.pdt -1D array of ints
header_IEDFlist = list() #Header num rows for iprofile_tec2d.pdt -1D array of ints
header_kinlist = list() #Header num rows for kin.pdt -1D array of ints
header_2Dlist = list() #Header num rows for TECPLOT2D.pdt -1D array of ints
#====================================================================#
#WELCOME SPLASH AND INFORMATION#
#====================================================================#
print( '')
print( '--------------------------------------------------------------------')
print( ' __ __ _______ __ _______ __ __ ___ ')
print( ' | | | | | ____|| | | ____|| \ | | / \ ')
print( ' | |__| | | |__ | | | |__ | \| | / ^ \ ')
print( ' | __ | | __| | | | __| | . ` | / /_\ \ ')
print( ' | | | | | |____ | `----.| |____ | |\ | / _____ \ ')
print( ' |__| |__| |_______||_______||_______||__| \__|/__/ \__\ ')
print( ' v3.1.4')
print( '--------------------------------------------------------------------')
print( '')
print( 'The following diagnostics were requested:')
print( '-----------------------------------------')
if savefig_plot2D == True:
print('# 2D Steady-State Image Processing')
if savefig_convergence == True:
print('# 2D Convergence Movie Processing')
if True in [savefig_phaseresolve2D,savefig_PROES]:
print('# 2D Phase-Resolved Movie Processing')
if True in [savefig_phaseresolve1D]:
print('# 1D Phase-Resolved Profile Processing')
if True in [savefig_monoprofiles,savefig_multiprofiles,savefig_compareprofiles,savefig_temporalprofiles]:
print('# 1D Steady-State Profile Processing')
if True in [print_generaltrends,print_Knudsennumber,print_Reynolds, print_totalpower,print_DCbias,print_thrust]:
print('# 1D Specific Trend Analysis')
if savefig_trendphaseaveraged == True:
print('# 1D Steady-State Trend Processing')
if savefig_sheathdynamics == True:
print('# 1D Phase-Resolved Trend Processing')
if True in [savefig_IEDFangular,savefig_IEDFtrends,savefig_EEDF]:
print('# Angular Energy Distribution Processing')
print( '-----------------------------------------')
print( '')
#====================================================================#
#OBTAINING FILE DIRECTORIES#
#====================================================================#
#Obtain system RAM. (and rename enviroment variable)
mem_bytes = os.sysconf('SC_PAGE_SIZE') * os.sysconf('SC_PHYS_PAGES')
mem_gib = mem_bytes/(1024.**3)
ext = image_extension
#Define recognized output file data extensions that will be retained in "Dir"
FileExtensions = ['.PDT','.pdt','.nam','.dat','.out']
#Create Directory lists and initialise numfolders to zero.
Dirlist = list() #List containing all simulation folder directories relative to HELENA
Dir = list() #List containing all output file in each Dirlist folder relative to HELENA
numfolders = 0 #Initiate folder number to zero
#Obtain home directory and contents
HomeDir = list() #List of all folders in home.
HomeDirContents = os.listdir( os.path.abspath(".") )
#Determine folders within home directory and add correct 'grammar'.
for i in range(0,len(HomeDirContents)):
if os.path.isdir(HomeDirContents[i]) == True:
HomeDir.append('./'+HomeDirContents[i]+'/')
#endif
#endfor
#Determine number of folders containing accepted file extensions (i.e. simulation folders)
#Extract directories of each sub-folder within home directory
for i in range(0,len(HomeDir)):
previousnumfolders = numfolders
CurrentDir = HomeDir[i]
DirContents = os.listdir(CurrentDir)
#For each file contained within the subfolders, determine which are datafiles.
for j in range(0,len(DirContents)):
Filename = DirContents[j]
#Save datafiles (with root) to working directory (Dir) and number of datafolders.
if any([x in Filename for x in FileExtensions]):
Dir.append(CurrentDir+Filename)
if (numfolders - previousnumfolders) == 0:
Dirlist.append(CurrentDir)
numfolders += 1
#endif
else:
File_Format_Is_Not_Requested = 1
#endif
#endfor
#endfor
#Maintain alphanumerical foldername structure (Dirlist) in-sync with dataname structure (Dir)
Dir,Dirlist = sorted(Dir),sorted(Dirlist)
#If no folders detected, end analysis script.
if numfolders == 0:
print( '-------------------------------------------')
print( 'No Ouput Files Detected, Aborting Analysis.')
print( '-------------------------------------------')
print( '')
exit()
#endif
#Extract directories for all required data I/O files
#These directories are relative to HELENA.py directory
icpnam = list(filter(lambda x: 'icp.nam' in x, Dir))
icpdat = list(filter(lambda x: 'icp.dat' in x, Dir))
icpout = list(filter(lambda x: 'icp.out' in x, Dir))
mesh = list(filter(lambda x: 'initmesh.out' in x, Dir))
TEC2D = list(filter(lambda x: 'TECPLOT2D.PDT' in x, Dir))
#Loop over all folders and retrieve mesh sizes and SI sizes.
for l in range(0,numfolders):
#==========##===== INITMESH.OUT READIN =====##==========#
#==========##===============================##==========#
#Attempt automated retrieval of mesh sizes.
try:
#Identify mesh size from TECPLOT2D file. (Data Array Always Correct Size)
meshdata = open(TEC2D[l]).readlines()
#Zone line holds data, split at comma, R&Z values are given by "I=,J=" respectively.
# R_py3_object = list(filter(lambda x: x.isdigit(), R))
# Z_py3_object = list(filter(lambda x: x.isdigit(), Z))
R = list(filter(lambda x: 'ZONE' in x, meshdata)) #String: 'ZONE I=xxx, J=xxx, F=BLOCK"
Z = list(filter(lambda x: 'ZONE' in x, meshdata)) #String: 'ZONE I=xxx, J=xxx, F=BLOCK"
R = R[0].split(",")[0].strip(' \t\n\r,=ZONE I') #Split at commas, [0] gives "I=xxx"
Z = Z[0].split(",")[1].strip(' \t\n\r,=ZONE J') #Split at commas, [1] gives "J=xxx"
R_mesh.append( int(R) ) #R_mesh (Cells) [int]
Z_mesh.append( int(Z) ) #Z_mesh (Cells) [int]
#If extraction from TECPLOT2D file fails, attempt to extract from initmesh.out header
#This is an old method and causes issues with Q-VT meshes and magnified meshes
except ValueError:
#Identify mesh size from initmesh.out header:
meshdata = open(mesh[l]).readline()
R_mesh.append([int(i) for i in meshdata.split()][1])
if Magmesh == 1: Z_mesh.append([int(i)+1 for i in meshdata.split()][3])
elif Magmesh == 2: Z_mesh.append([int(i)+3 for i in meshdata.split()][3])
elif Magmesh == 3: Z_mesh.append([int(i)+5 for i in meshdata.split()][3])
#endif
#If all else fails, request manual input of mesh resolution
except:
#If data for current file exists:
if l <= len(TEC2D)-1:
#If the initmesh.out file cannot be found, manual input is required.
print( 'ERR: ICP.NAM GEOMETRY READIN, USING MANUAL MESH CELL INPUT:')
print( Dirlist[l])
r_mesh = int(input("DEFINE NUM RADIAL CELLS:"))
z_mesh = int(input("DEFINE NUM AXIAL CELLS:"))
print ('')
R_mesh.append(r_mesh)
Z_mesh.append(z_mesh)
#endif
#endtry
#Retrieve entire mesh for plotting if requested. #MESH PLOTTING NOT WORKING#
if image_plotmesh == True: #MESH PLOTTING NOT WORKING#
image_plotmesh = False
print( 'WARNING: AUTOMESH PLOTTING IS NOT CURRENTLY SUPPORTED')
print( 'SETTING image_plotmesh = False')
print( '')
#Extract mesh data from initmesh.out #MESH PLOTTING NOT WORKING#
# mesh = open(mesh[l]).readlines() #MESH PLOTTING NOT WORKING#
#endif
#==========##===== ICP.NAM READIN =====##==========#
#==========##==========================##==========#
#Attempt automated retrieval of SI conversion units.
NamelistData = open(icpnam[l]).readlines()
#Mesh Geometry Namelist Inputs
try:
RADIUS = list(filter(lambda x:'RADIUS=' in x, NamelistData))
RADIUS = RADIUS[0].split('!!!')[0]
RADIUS = float(RADIUS.strip(' \t\n\r,=RADIUS'))
RADIUST = list(filter(lambda x:'RADIUST=' in x, NamelistData))
RADIUST = RADIUST[0].split('!!!')[0]
RADIUST = float(RADIUST.strip(' \t\n\r,=RADIUST'))
HEIGHT = list(filter(lambda x:'HEIGHT=' in x, NamelistData))
HEIGHT = HEIGHT[0].split('!!!')[0]
HEIGHT = float(HEIGHT.strip(' \t\n\r,=HEIGHT'))
HEIGHTT = list(filter(lambda x:'HEIGHTT=' in x, NamelistData))
HEIGHTT = HEIGHTT[0].split('!!!')[0]
HEIGHTT = float(HEIGHTT.strip(' \t\n\r,=HEIGHTT'))
DEPTH = list(filter(lambda x:'DEPTH=' in x, NamelistData))
DEPTH = DEPTH[0].split('!!!')[0]
DEPTH = float(DEPTH.strip(' \t\n\r,=DEPTH'))
IXZ = list(filter(lambda x:'IXZ=' in x, NamelistData))
IXZ = IXZ[0].split('!!!')[0]
IXZ = int(IXZ.strip(' \t\n\r,=IXZ'))
ISYM = list(filter(lambda x:'ISYM=' in x, NamelistData))
ISYM = ISYM[0].split('!!!')[0]
ISYM = int(ISYM.strip(' \t\n\r,=ISYM'))
#ISYMlist[l] = 1 if mesh uses radial symmetry, = 0 if not
if image_plotsymmetry == True: ISYMlist= np.append(ISYMlist,ISYM)
else: ISYMlist.append(0)
#IXZlist[l] = 1 if mesh uses cartesian coordinates, = 0 if cylindrical
IXZlist = np.append(IXZlist,IXZ)
#Determine if mesh RADIUS or RADIUST was used, save 'Radius' used | |
a klass'es bases
Args:
klass (t.Union[type, object]): The type or object
name (str): The name of the property
Returns:
bool: True if has a property with the given name.
"""
candidates = find_attributes(klass, name)
if not candidates:
return False
def is_property(c):
return not isinstance(getattr(klass, str(c), None), property)
return all(is_property(f) for f in candidates)
def has_type(klass: t.Union[type, object]) -> bool:
"""Check if a type or instance has a Type member type that derives from Enum
Args:
klass (t.Union[type, object]): The type or object
Returns:
bool: True if has the "Type" attribute.
"""
if not isinstance(klass, (type, object)):
raise TypeError(klass)
return issubclass(getattr(klass, "Type", type(None)), Enum)
def has_variable(klass: t.Union[type, object], name: str) -> bool:
"""Check if a variable exists in any of a klass'es bases
Args:
klass (t.Union[type, object]): The type or object
name (str): The name of the variable
Returns:
bool: True if has a variable with the given name.
"""
candidates = find_attributes(klass, name)
if not candidates:
return False
def is_not_callable(c):
return not isinstance(getattr(klass, str(c), None), t.Callable)
return all(is_not_callable(f) for f in candidates)
def is_abstract(klass: t.Union[type, object]) -> bool:
"""Check if a type or instance is abstract
Args:
klass (t.Union[type, object]): The type or object
Returns:
bool: True if the type/instance is abstract.
"""
if not isinstance(klass, (type, object)):
raise TypeError(klass)
if hasattr(klass, "__abstractmethods__"):
return 0 != len(getattr(klass, "__abstractmethods__"))
else:
from inspect import isabstract
return isabstract(klass)
def is_scalar_numeric(value: t.Any) -> bool:
"""Check if is an int, a float, or a NumPy variant thereof
Args:
value (t.Any): The value to inspect
Returns:
bool: True if scalar and numeric.
"""
return isinstance(value, (float, int, np.integer, np.floating))
def leaves(obj: Map) -> t.Generator:
"""Get leaves of a mapping
Args:
obj (Map): The dict-like object
Returns:
t.Generator: A generator that yields the leaf elements of the mapping.
"""
paths = get_valid_access_paths(obj, _leaf_only=True, _use_lists=False)
return (getitem(obj, path) for path in paths)
def list2cmdline(seq: t.Iterable) -> str:
"""Translates a sequence of arguments into a command line string with "None" removal
Args:
seq (t.Iterable): The sequence of arguments
Returns:
str: The command-line string
"""
seq = [_ for _ in seq if _ is not None]
return _list2cmdline(seq)
def map_to_leaves(function: t.Callable[[t.Any], t.Any], obj: t.T, _seq: bool = True) -> t.Any:
"""Map a function to leaves of an object
A leaf is considered to be an object that is not a Mapping (or, when _seq is set,
also not a Sequence except a string, which is also a Sequence).
Args:
function (t.Callable[[t.Any], t.Any]): a function or signatude "a -> a"
obj (t.T): a dict-like, list-like, or plain object
_seq (bool, optional): map on elements of lists?
Returns:
t.T: the obj with transformed elements
"""
def inner(obj: t.T) -> t.Any:
if isinstance(obj, Map):
return type(obj)({k: inner(v) for k, v in obj.items()})
elif _seq and isinstance(obj, (t.List, t.Set)):
return type(obj)(inner(v) for v in obj)
else:
return function(obj)
return inner(obj)
def mro_getattr(cls: type, attr: str, *args: t.Any) -> t.Any:
"""Get an attribute from a type's class hierarchy
Args:
cls (type): The type
attr (str): The attribute
*args (t.Any): The default value (like in Python's default getattr)
Returns:
t.Any: The attribute, or when not found the default value (if provided)
Raises:
TypeError: Not called on a type
TypeError: Wrong number of arguments
AttributeError: Attribute not found and no default value provided
"""
if not isinstance(cls, type):
raise TypeError(f"mro_getattr can only be used on types, got {type(cls)}")
if len(args) > 1:
raise TypeError(f"mro_getattr expected at most 3 arguments, got {2 + len(args)}")
for klass in cls.mro()[1:]:
if hasattr(klass, attr):
# return first matching attribute
return getattr(klass, attr)
if args:
# if provided, return args[0], i.e. the a default value
return args[0]
else:
raise AttributeError(f"type object {cls.__name__!r} has not attribute {attr!r}")
def mro_hasattr(cls: type, attr: str) -> bool:
"""Check if an attribute exists in a type's class hierarchy
Args:
cls (type): The type
attr (str): The attribute
Returns:
bool: True if has the attribute.
Raises:
TypeError: Not called on a type
"""
if not isinstance(cls, type):
raise TypeError(f"mro_getattr can only be used on types, got {type(cls)}")
for klass in cls.mro()[1:]:
if hasattr(klass, attr):
return True
return False
def random_string(length: int) -> str:
"""Make a random string of specified length
Args:
length (int): The desired random string length
Returns:
str: The random string
"""
assert isinstance(length, int), f"'length' must be an int, got: {type(length)}"
return "".join(random.choices(ascii_letters, k=length))
def timestamp() -> str:
"""Make a timestamp with current time
Returns:
str: The timestamp in ISO format
"""
return datetime.now().isoformat("_", timespec="seconds").replace(":", "-")
def safe_eval(
to_eval: str, *, expect: t.Tuple[type] = (list, np.ndarray), timeout: int = 10
) -> object:
"""Evaluate a restricted subset of Python (and numpy) from a string
Args:
to_eval (str): The string to evaluate
expect (t.Tuple[type]): The list of expected resulting types. Defaults to list, ndarray.
timeout (int): The timeout after which the call fails in seconds. Defaults to 10.
The `safe_eval` function allows using a subset of commands, listed in `_globals` and
`_locals`, which includes a few numpy functions: linspace, arange, array, rand, and
randint. Examples:
>>> safe_eval("linspace(1, 10, 10, dtype=int).tolist()")
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
>>> safe_eval("__import__('os').getcwd()")
NameError Traceback (most recent call last)
...
NameError: name '__import__' is not defined
>>> safe_eval("range(5)")
TypeError Traceback (most recent call last)
...
TypeError: eval produced a <class 'range'>, expected: (<class 'list'>, <class 'numpy.ndarray'>)
>>> safe_eval("list(round(rand(), 2) for _ in range(5))")
[0.96, 0.41, 0.9, 0.98, 0.02]
"""
assert isinstance(to_eval, str), "'to_eval' must be a str"
assert isinstance(expect, tuple), "'expect' must be a tuple"
assert all(isinstance(_, type) for _ in expect), "'expect' must contain only types"
_locals = {}
_globals = {
"__builtins__": {},
"list": list,
"range": range,
"len": len,
"int": int,
"float": float,
"min": min,
"max": max,
"round": round,
"linspace": np.linspace,
"geomspace": np.geomspace,
"logspace": np.logspace,
"hstack": np.hstack,
"vstack": np.vstack,
"split": np.split,
"arange": np.arange,
"array": np.array,
"rand": np.random.rand,
"randint": np.random.randint,
}
class AlarmException(Exception):
pass
def signal_handler(number: int, frame):
assert number == signal.SIGALRM.value
raise AlarmException()
signal.signal(signal.SIGALRM, signal_handler)
signal.alarm(timeout)
try:
_ = eval(to_eval, _globals, _locals)
except AlarmException:
raise TimeoutError(f"safe_eval took longer than {timeout} seconds")
else:
signal.signal(signal.SIGALRM, signal.SIG_IGN)
signal.alarm(0)
if not isinstance(_, expect):
raise EvalTypeError(f"eval produced a {type(_)}, expected: {expect}")
return _
def sanitise_ansi(value: t.Union[t.List[str], str]) -> t.Union[t.List[str], str]:
"""Remove all ANSI escape characters from a str or a list of str
Args:
value (t.Union[t.List[str], str]): The string or list of strings
Returns:
t.Union[t.List[str], str]: The sanitised string or a list of sanitised strings
"""
_ansi_escape = re.compile(r"(\x9B|\x1B\[)[0-?]*[ -\/]*[@-~]")
if isinstance(value, str):
return _ansi_escape.sub("", value)
elif isinstance(value, t.List):
return list(map(lambda x: _ansi_escape.sub("", x).strip(), value))
else:
raise TypeError("sanitise_ansi accepts only str or lists of str")
def setgetattr(klass: t.Union[type, object], attr: str, default: t.Any) -> None:
"""Combines `setattr` and `getattr` to set attributes
Args:
klass (t.Union[type, object]): The type or object
attr (str): The attribute
default (t.Any): The default value
"""
if not any([isinstance(klass, type), isinstance(klass, object)]):
raise TypeError("'klass' should be a type or an object", klass)
if not isinstance(attr, str):
raise TypeError("'attr' should be a str")
if not attr:
raise ValueError("'attr' should not be empty")
setattr(klass, attr, getattr(klass, attr, default))
def setitem(obj: t.MutableMapping, query: t.Tuple, value: t.Any, force: bool = False) -> None:
"""Set a value in a dict-like object using a tuple-path query
Args:
obj (t.MutableMapping): a mutable mapping
query (t.Tuple): a query path as a tuple
value (t.Any): value to set
Raises:
TypeError: if obj is not a mutable mapping
"""
if not isinstance(obj, t.MutableMapping):
raise TypeError("'obj' needs to be a mutable mapping", type(obj))
_obj = obj
_valid = get_valid_access_paths(obj)
if query not in _valid:
if not force:
raise KeyError(f"query-path {query!r} not found")
else:
for node in query[:-1]:
if node not in _obj:
_obj = dict()
_obj = _obj[node]
else:
for node in query[:-1]:
_obj = _obj[node]
_obj[query[-1]] = value
def stub_recursively(
obj: t.T, stub: t.Any = None, _stub_list_elements: bool = True
) -> t.Optional[t.T]:
"""Produce a copy with all leaf values recursively set to a 'stub' value
Args:
obj (t.T): the object to stub
| |
"""
# For the moment, bypass the local queue and put the task in the global scheduler queue
self.scheduler.enqueue_task(task)
# Allowing local resource of tasks (probably only when it comes to the front of the queue) would allow threads
# to make progress even if the global scheduler is blocked by other assignment tasks. However, it would also
# require that the workers do some degree of resource assignment which complicates things and could break
# correctness or efficiency guarantees. That said a local, "fast assignment" algorithm to supplement the
# out-of-band assignment of the scheduler would probably allow Parla to efficiently run programs with
# significantly finer-grained tasks.
# For tasks that are already assigned it may be as simple as:
# self.scheduler._unassigned_resources.allocate_resources(task.assigned_device, task.assigned_amount)
# self._push_task(task)
# This would need to fail over to the scheduler level enqueue if the resources is not available for assignment.
def _enqueue_task_local(self, task):
with self._monitor:
self._queue.appendleft(task)
self._monitor.notify()
def run(self) -> None:
try:
with self:
for component in self.scheduler.components:
component.initialize_thread()
while self._should_run:
self._status = "Getting Task"
task: Task = self._pop_task()
if not task:
break
self._status = "Running Task {}".format(task)
task.run()
except Exception as e:
logger.exception("Unexpected exception in Task handling")
self.scheduler.stop()
def dump_status(self, lg=logger):
lg.info("%r:\n%r", self, self._queue)
def __repr__(self):
return "<{} {} {}>".format(type(self).__name__, self.index, self._status)
class ResourcePool:
_multiplier: float
_monitor: Condition
_devices: Dict[Device, Dict[str, float]]
# Resource pools track device resources. Environments are a separate issue and are not tracked here. Instead,
# tasks will consume resources based on their devices even though those devices are bundled into an environment.
def __init__(self, multiplier):
self._multiplier = multiplier
self._monitor = threading.Condition(threading.Lock())
self._devices = self._initial_resources(multiplier)
@staticmethod
def _initial_resources(multiplier):
return {dev: {name: amt * multiplier for name, amt in dev.resources.items()} for dev in get_all_devices()}
def allocate_resources(self, d: Device, resources: ResourceDict, *, blocking: bool = False) -> bool:
"""Allocate the resources described by `dd`.
:param d: The device on which resources exist.
:param resources: The resources to allocate.
:param blocking: If True, this call will block until the resource is available and will always return True.
:return: True iff the allocation was successful.
"""
return self._atomically_update_resources(d, resources, -1, blocking)
def deallocate_resources(self, d: Device, resources: ResourceDict) -> None:
"""Deallocate the resources described by `dd`.
:param d: The device on which resources exist.
:param resources: The resources to deallocate.
"""
ret = self._atomically_update_resources(d, resources, 1, False)
assert ret
def _atomically_update_resources(self, d: Device, resources: ResourceDict, multiplier, block: bool):
with self._monitor:
to_release = []
success = True
for name, v in resources.items():
if not self._update_resource(d, name, v * multiplier, block):
success = False
break
else:
to_release.append((name, v))
else:
to_release.clear()
logger.info("Attempted to allocate %s * %r (blocking %s) => %s\n%r", multiplier, (d, resources), block, success, self)
if to_release:
logger.info("Releasing resources due to failure: %r", to_release)
for name, v in to_release:
ret = self._update_resource(d, name, -v * multiplier, block)
assert ret
assert not success or len(to_release) == 0 # success implies to_release empty
return success
def _update_resource(self, dev: Device, res: str, amount: float, block: bool):
try:
while True: # contains return
dres = self._devices[dev]
if -amount <= dres[res]:
dres[res] += amount
if amount > 0:
self._monitor.notify_all()
assert dres[res] <= dev.resources[res] * self._multiplier, \
"{}.{} was over deallocated".format(dev, res)
assert dres[res] >= 0, \
"{}.{} was over allocated".format(dev, res)
return True
else:
if block:
self._monitor.wait()
else:
return False
except KeyError:
raise ValueError("Resource {}.{} does not exist".format(dev, res))
def __repr__(self):
return "ResourcePool(devices={})".format(self._devices)
class AssignmentFailed(Exception):
pass
_T = TypeVar('_T')
def shuffled(lst: Iterable[_T]) -> List[_T]:
"""Shuffle a list non-destructively."""
lst = list(lst)
random.shuffle(lst)
return lst
class Scheduler(ControllableThread, SchedulerContext):
_environments: TaskEnvironmentRegistry
_worker_threads: List[WorkerThread]
_unassigned_resources: ResourcePool
_available_resources: ResourcePool
period: float
max_worker_queue_depth: int
def __init__(self, environments: Collection[TaskEnvironment], n_threads: int = None, period: float = 0.01,
max_worker_queue_depth: int = 2):
super().__init__()
n_threads = n_threads or sum(d.resources.get("vcus", 1) for e in environments for d in e.placement)
self._environments = TaskEnvironmentRegistry(*environments)
self._exceptions = []
self._active_task_count = 1 # Start with one count that is removed when the scheduler is "exited"
self.max_worker_queue_depth = max_worker_queue_depth
self.period = period
self._monitor = threading.Condition(threading.Lock())
self._allocation_queue = deque()
self._available_resources = ResourcePool(multiplier=1.0)
self._unassigned_resources = ResourcePool(multiplier=max_worker_queue_depth*1.0)
self._worker_threads = [WorkerThread(self, i) for i in range(n_threads)]
for t in self._worker_threads:
t.start()
self.start()
@property
def components(self) -> List["EnvironmentComponentInstance"]:
return [i for e in self._environments for i in e.components.values()]
@property
def scheduler(self):
return self
def __enter__(self):
if self._active_task_count != 1:
raise InvalidSchedulerAccessException("Schedulers can only have a single scope.")
return super().__enter__()
def __exit__(self, exc_type, exc_val, exc_tb):
super().__exit__(exc_type, exc_val, exc_tb)
self.decr_active_tasks()
with self._monitor:
while self._should_run:
self._monitor.wait()
for t in self._worker_threads:
t.join()
if self._exceptions:
# TODO: Should combine all of them into a single exception.
raise self._exceptions[0]
def incr_active_tasks(self):
with self._monitor:
self._active_task_count += 1
def decr_active_tasks(self):
done = False
with self._monitor:
self._active_task_count -= 1
if self._active_task_count == 0:
done = True
if done:
self.stop()
def enqueue_task(self, task: Task):
"""Enqueue a task on the resource allocation queue.
"""
with self._monitor:
self._allocation_queue.appendleft(task)
self._monitor.notify_all()
def _dequeue_task(self, timeout=None) -> Optional[Task]:
"""Dequeue a task from the resource allocation queue.
"""
with self._monitor:
while True:
try:
if self._should_run:
return self._allocation_queue.pop()
else:
return None
except IndexError:
self._monitor.wait(timeout)
if timeout is not None:
try:
return self._allocation_queue.pop()
except IndexError:
return None
def _try_assignment(self, req: EnvironmentRequirements) -> bool:
# Allocate available resources
allocated_devices: List[Device] = []
try:
for d in shuffled(req.devices):
assert len(allocated_devices) < req.ndevices
assert isinstance(d, Device)
if self._unassigned_resources.allocate_resources(d, req.resources):
allocated_devices.append(d)
else:
raise AssignmentFailed()
# Select an environment the matches the allocated resources.
return True
except AssignmentFailed:
# Free any resources we already assigned
for d in allocated_devices:
self._unassigned_resources.deallocate_resources(d, req.resources)
return False
def _assignment_policy(self, task: Task):
"""
Attempt to assign resources to `task`.
If this function returns true, `task.req` should have type EnvironmentRequirements.
:return: True if the assignment succeeded, False otherwise.
"""
# Build a list of environments with "qualities" assigned based on how well they match a possible
# option for the task
env_match_quality = defaultdict(lambda: 0)
for opt in shuffled(task.req.possibilities):
if isinstance(opt, DeviceSetRequirements):
for e in self._environments.find_all(placement=opt.devices, tags=opt.tags, exact=False):
intersection = e.placement & opt.devices
match_quality = len(intersection) / len(e.placement)
env_match_quality[e] = max(env_match_quality[e], match_quality)
elif isinstance(opt, EnvironmentRequirements):
env_match_quality[opt.environment] = max(env_match_quality[opt.environment], 1)
environments_to_try = list(env_match_quality.keys())
environments_to_try.sort(key=env_match_quality.__getitem__, reverse=True)
# print(task, ":", env_match_quality, " ", environments_to_try)
# Try the environments in order
specific_requirements = None
for env in environments_to_try:
specific_requirements = EnvironmentRequirements(task.req.resources, env, task.req.tags)
if self._try_assignment(specific_requirements):
task.req = specific_requirements
return True
return False
def run(self) -> None:
# noinspection PyBroadException
try: # Catch all exception to report them usefully
while self._should_run:
task: Optional[Task] = self._dequeue_task()
if not task:
# Exit if the dequeue fails. This implies a failure or shutdown.
break
if not task.assigned:
is_assigned = self._assignment_policy(task)
assert isinstance(is_assigned, bool)
task.assigned = is_assigned
# assert task.req.exact == task.assigned
assert not task.assigned or isinstance(task.req, EnvironmentRequirements)
if not task.assigned:
task._assignment_tries = getattr(task, "_assignment_tries", 0) + 1
# if task._assignment_tries > _ASSIGNMENT_FAILURE_WARNING_LIMIT:
# logger.warning("Task %r: Failed to assign devices. The required resources may not be "
# "available on this machine at all.\n"
# "Available resources: %r\n"
# "Unallocated resources: %r",
# task, self._available_resources, self._unassigned_resources)
# Put task we cannot assign resources to at the back of the queue
logger.debug("Task %r: Failed to assign", task)
self.enqueue_task(task)
# Avoid spinning when no tasks are schedulable.
time.sleep(self.period)
# TODO: There is almost certainly a better way to handle this. Add a dependency on
# a task holding the needed resources?
else:
# Place task in shortest worker queue if it's not too long
while True: # contains break
worker = min(self._worker_threads, key=lambda w: w.estimated_queue_depth())
if worker.estimated_queue_depth() < self.max_worker_queue_depth:
logger.debug("Task %r: Enqueued on worker %r", task, worker)
worker._enqueue_task_local(task)
break
else:
# Delay a bit waiting for a workers queue to shorten; This is not an issue since
# definitionally there is plenty of work in the queues.
time.sleep(self.period)
except Exception:
logger.exception("Unexpected exception in Scheduler")
self.stop()
def stop(self):
super().stop()
for w in self._worker_threads:
w.stop()
def report_exception(self, e: BaseException):
with self._monitor:
self._exceptions.append(e)
def dump_status(self, lg=logger):
lg.info("%r:\n%r\nunassigned: %r\navailable: %r", self,
self._allocation_queue, self._unassigned_resources, self._available_resources)
w: WorkerThread
for w | |
that this creates a "Set-like" region, not the "Surface-like" regions used in the bonding functions(below) in api.py. See Abaqus Scripting Reference Guide section 45.3 for the distinction"""
return self.DM.regionToolset.Region(edges=self.GetInstanceEdge_point_tangent(edgepointtangent,pointtolerance,tangenttolerance))
def GetInstanceEdge_point_tangent(self,edgepointtangents,pointtolerance,tangenttolerance):
"""Get an instance edge based on a point and tangent and tolerances. """
return self.DM.globals["GetEdge_point_tangent"](self.fe_inst.edges, self.fe_inst.vertices,edgepointtangent,pointtolerance,tangenttolerance)
def GetInstanceEdgeRegion(self,edgepoints,pointtolerance):
"""Get an instance edge region based on a point and tolerance. Note that this creates a "Set-like" region, not the "Surface-like" regions used in the bonding functions(below) in api.py. See Abaqus Scripting Reference Guide section 45.3 for the distinction"""
return self.DM.regionToolset.Region(edges=self.GetInstanceEdge(edgepoints,pointtolerance))
def GetInstanceEdgeRegionSurface(self,edgepoints,pointtolerance):
"""Get an instance edge region based on a point and tolerance. Note that this creates a "Surface-like" region with side1Edges= used in the bonding functions(below) in api.py, not the "Set-like" regions used in some other contexts. See Abaqus Scripting Reference Guide section 45.3 for the distinction."""
# Note that this creates a "Surface-like" edge region using "side1Edges"
return self.DM.regionToolset.Region(side1Edges=self.GetInstanceEdge(edgepoints,pointtolerance))
def GetInstanceEdge(self,edgepoints,pointtolerance):
"""Get an instance edge based on a point and tolerance. """
return self.DM.globals["GetEdge"](self.fe_inst.edges,self.fe_inst.vertices,edgepoints,pointtolerance)
def GetMultipleInstanceEdgesRegion(self,edgepoints,pointtolerance):
"""Get instance edge regions based on a point and tolerance. Note that this creates a "Set-like" region, not the "Surface-like" regions used in the bonding functions(below) in api.py. See Abaqus Scripting Reference Guide section 45.3 for the distinction"""
return self.DM.regionToolset.Region(edges=self.GetMultipleInstanceEdges(edgepoints,pointtolerance))
def GetMultipleInstanceEdges(self,edgepoints,pointtolerance):
"""Get multiple instance edges based on points and tolerance. """
return self.DM.globals["GetMultipleEdges"](self.fe_inst.edges,self.fe_inst.vertices,edgepoints,pointtolerance)
#def GetInstanceNodes(self,nodepoints,pointtolerance):
# return GetNodes(self.fe_inst.nodes,nodepoints,pointtolerance)
#def GetInstanceEdges_ThreePoints(self,edgeandinteriorpoints,pointtolerance):
# return self.DM.globals["GetEdges_ThreePoints"](self.fe_inst.edges, self.fe_inst.vertices,edgeandinteriorpoints,pointtolerance)
def SeedPartEdgesByFaces(self,surface_points_and_normals,pointtolerance,normaltolerance,meshsize):
"""Seed the edges around a set of faces with a particular meshing size. This is used for localized mesh refinement.
The actual implementation is the ABAQUS code in abqfuncs_mesh.py"""
self.DM.globals["SeedPartEdgesByFaces"](self.fe_part,surface_points_and_normals,pointtolerance,normaltolerance,meshsize)
pass
def MeshSimple(self,ElemTypes,meshsize,ElemShape=None,ElemTechnique=None,refined_edges=[],pointtolerance=None,tangenttolerance=None,refinedmeshsize=None,DeviationFactor=None,MinSizeFactor=None):
"""Perform meshing of this Part. meshsize is nominal meshing size
ElemShape should be abqC.HEX, abqC.HEX_DOMINATED (default), abqC.TET, etc. for solids or abqC.QUAD or abqC.QUAD_DOMINATED (default), or abqC.TRI for shells
ElemTechnique can be abqC.SYSTEM_ASSIGN (default), abqC.FREE, or abqC.STRUCTURED
refined edges is a list of tuples: ((point1, tangent1),(point2,tangent2)) on each edge to be specially refined
pointtolerance is tolerance size for finding refined edges
refinedmeshsize is size of refined mesh regions
DeviationFactor and MinSizeFactor are scaling factors for seeding (default 0.1)
"""
if self.shell:
if ElemShape is None:
ElemShape=abqC.QUAD_DOMINATED
pass
pass
else:
if ElemShape is None:
ElemShape=abqC.HEX_DOMINATED
pass
pass
if ElemTechnique is None:
ElemTechnique=abqC.SYSTEM_ASSIGN
pass
if DeviationFactor is None:
DeviationFactor=0.1
pass
if MinSizeFactor is None:
MinSizeFactor=0.1
pass
if self.shell:
self.fe_part_meshing.setElementType(regions=(self.fe_part_meshing.faces,),elemTypes=ElemTypes)
self.fe_part_meshing.setMeshControls(regions=self.fe_part_meshing.faces,elemShape=ElemShape,technique=ElemTechnique)
pass
else:
self.fe_part_meshing.setElementType(regions=(self.fe_part_meshing.cells,),elemTypes=ElemTypes)
self.fe_part_meshing.setMeshControls(regions=self.fe_part_meshing.cells,elemShape=ElemShape,technique=ElemTechnique)
pass
self.fe_part_meshing.seedPart(size=meshsize,deviationFactor=DeviationFactor,minSizeFactor=MinSizeFactor)
# refined_edges is a list of tuples ... endpoint1, interiorpoint, endpoint2
# representing 3 points on the edge, with two of them being endpoints.
if len(refined_edges) > 0:
# edges defined for refinement
#print("got refined_edges=%s" % (str(refined_edges)))
#picked=self.GetPartEdges_ThreePoints(refined_edges,pointtolerance)
picked=self.GetMultiplePartEdges(refined_edges,pointtolerance)
self.fe_part_meshing.seedEdgeBySize(edges=picked,size=refinedmeshsize,deviationFactor=0.1,minSizeFactor=0.1,constraint=abqC.FINER)
pass
self.fe_part_meshing.generateMesh()
pass
def MeshCohesive(self,meshsize,ElemShape=None,Algorithm=None,ElemLibrary=None,refined_edges=[],pointtolerance=None,refinedmeshsize=None,DeviationFactor=None,MinSizeFactor=None,SweepSense=None):
"""Meshing routine for cohesive layers:
meshsize is nominal meshing size
ElemShape should be abqC.HEX or abqC.HEX_DOMINATED (default)
Algorithm can be abqC.ADVANCING_FRONT (default) or abqC.MEDIAL_AXIS
ElemLibrary can be abqC.STANDARD (default) or abqC.EXPLICIT
refined edges is a list of tuples: (endpoint1, interiorpoint, endpoint2) representing 3 points on each edge to be specially refined
pointtolerance is tolerance size for finding refined edges
refinedmeshsize is size of refined mesh regions
DeviationFactor and MinSizeFactor are scaling factors for seeding (default 0.1)
SweepSense should be abqC.FORWARD (default) or abqC.REVERSE """
assert(not self.shell)
if ElemShape is None:
ElemShape=abqC.HEX_DOMINATED
pass
if Algorithm is None:
Algorithm=abqC.ADVANCING_FRONT
pass
if ElemLibrary is None:
ElemLibrary=abqC.STANDARD
pass
if DeviationFactor is None:
DeviationFactor=0.1
pass
if MinSizeFactor is None:
MinSizeFactor=0.1
pass
if SweepSense is None:
SweepSense=abqC.FORWARD
pass
cells=self.fe_part_meshing.cells # .getSequenceFromMask(...)
#sys.stderr.write("WARNING: Assuming reference region for stacking direction is faces[0] (FIXME)\n")
#refregion=self.fe_part_meshing.faces[0]
# reference region should be the largest face
refregion=self.DM.globals["GetLargestFace"](self.fe_part_meshing,self.fe_part_meshing.faces)
self.fe_part_meshing.assignStackDirection(referenceRegion=refregion,cells=cells)
self.fe_part_meshing.setMeshControls(regions=cells,
elemShape=ElemShape,
technique=abqC.SWEEP,
algorithm=Algorithm)
sys.stderr.write("WARNING: Assuming region for sweep path is cells[0] (FIXME) and edge is edges[1]\n")
# !!!*** ALSO need to add contact model for delaminated region across cohesive layer ***!!!
assembly_assert=self.DM.assemblyinstrs.preexisting_variable("assert")
assembly_len=self.DM.assemblyinstrs.preexisting_variable("len")
assembly_assert(assembly_len(self.fe_part_meshing.cells)==1) # Given how we construct the cohesive layer it should never have anything but exactly one cell
#self.fe_part_meshing.NEED_TO_DETERMINE_SWEEP_PATH
(SweepPathEdgePoint, SweepPathEdgeTangent) = self.gk_layerbody_or_solid.GetOffsetEdge()
# Get proxied Abaqus edge object.
SweepPathEdge=self.GetPartEdge_point_tangent((SweepPathEdgePoint,SweepPathEdgeTangent),self.DM.abqpointtolerance,self.DM.tangenttolerance)
self.fe_part_meshing.setSweepPath(region=self.fe_part_meshing.cells[0],edge=SweepPathEdge,sense=SweepSense)
HexElemType = self.DM.mesh.ElemType(elemCode=abqC.COH3D8,elemLibrary=ElemLibrary)
WedgeElemType = self.DM.mesh.ElemType(elemCode=abqC.COH3D6,elemLibrary=ElemLibrary)
self.fe_part_meshing.setElementType(regions=(cells,),elemTypes=(HexElemType,WedgeElemType))
self.fe_part_meshing.seedPart(size=meshsize,deviationFactor=DeviationFactor,minSizeFactor=MinSizeFactor)
self.fe_part_meshing.generateMesh()
pass
def ApplyLayup(self,coordsys,layupdirection,fiberorientation=None):
"""Assign self.fe_datum_csys and self.fe_materialorientation"""
if fiberorientation is not None:
self.fe_materialorientation = self.DM.fiberinstrs.rewrapobj(self.fe_part.MaterialOrientation)
self.fe_materialorientation = self.fe_materialorientation(
region=self.DM.regionToolset.Region(
cells=self.fe_part.cells),
orientationType=abqC.FIELD, axis=abqC.AXIS_3,
fieldName='%s_orientation' % self.name,
localCsys=None,
additionalRotationType=abqC.ROTATION_NONE,
angle=0.0,
additionalRotationField='',
stackDirection=abqC.STACK_3)
else:
coordsys.ApplyLayup(self, layupdirection)
pass
pass
class LayerPart(Part):
"""A LayerPart is a Part that will be used in a Layer"""
# So far no extra member variables
def __init__(self,**kwargs):
self.shell=False
for key in kwargs:
assert(hasattr(self,key))
setattr(self,key,kwargs[key])
pass
if self.fe_part_meshing is None and self.fe_part is not None:
self.fe_part_meshing=self.DM.meshinstrs.rewrapobj(self.fe_part)
pass
pass
pass
class Assembly(object):
"""The assembly class represents an assembly of parts and assemblies."""
name=None
""" The name of the assembly"""
parts=None
"""An ordered dictionary by name of Part objects or Part subclasses"""
assemblies=None
"""An ordered dictionary by name of Assembly objects or Assembly subclasses"""
# There is also a synthetic attribute,
# partlist = None ... which is a list of the parts
# and a synthetic attribute
# singlepart = None which only works if this is a one-part assembly
def __init__(self,**kwargs):
self.parts=collections.OrderedDict()
self.assemblies=collections.OrderedDict()
for key in kwargs:
assert(hasattr(self,key))
setattr(self,key,kwargs[key])
pass
pass
@property
def partlist(self):
"""This property gives a list of the parts in this assembly and
all subassemblies """
parts = [ self.parts[partname] for partname in self.parts ]
# also include anything within an assembly
for assemblyname in self.assemblies:
parts.extend(self.assemblies[assemblyname].partlist)
pass
return parts
@property
def singlepart(self):
"""Assuming this assembly contains only a single part, this property gives that part. Otherwise raises IndexError()."""
partkeys=list(self.parts.keys())
assemblykeys=list(self.assemblies.keys())
if len(partkeys)!=1 or len(assemblykeys) != 0:
raise IndexError("onlypart attribute is only valid for assemblies that contain a single part")
return self.parts[partkeys[0]]
def MeshSimple(self,*args,**kwargs):
"""This method iterates over the MeshSimple methods of each
part that is a child of this assembly (but not to subassemblies)"""
# NOTE: Only applies to direct part children, not subassemblies
for name in self.parts:
self.parts[name].MeshSimple(*args,**kwargs)
pass
pass
def MeshCohesive(self,*args,**kwargs):
"""This method iterates over the MeshCohesive methods of each
part that is a child of this assembly (but not to subassemblies)"""
# NOTE: Only applies to direct part children, not subassemblies
for name in self.parts:
self.parts[name].MeshCohesive(*args,**kwargs)
pass
pass
@classmethod
def FromParts(cls,name,*args):
"""Creates an assembly given the name parameter, and
additional parameters representing the parts to include
in the assembly"""
assem=cls(name=name)
for part in args:
assem.parts[part.name]=part
pass
return assem
@classmethod
def FromAssemblies(cls,name,*args):
"""Creates an assembly given the name parameter, and
additional parameters representing the subassemblies to include
in the assembly"""
assem=cls(name=name)
for assembly in args:
assem.assemblies[assembly.name]=assembly
pass
return assem
@classmethod
def FromPartsAndAssemblies(cls,name,parts,assemblies):
"""Creates an assembly given the name parameter, and
lists of parts and subassemblies to include
in the assembly"""
assem=cls(name=name)
for part in parts:
assem.parts[part.name]=part
pass
for assembly in assemblies:
assem.assemblies[assembly.name]=assembly
pass
return assem
pass
class CoordSys(object):
""" Abstract class representing a coordinate system.
Each CoordSys subclass should implement the method ApplyLayup(self,layerpart,layerdirection)"""
pass
class SimpleCoordSys(CoordSys):
"""Concrete implementation of a CoordSys representing a fixed Cartesian coordinate frame"""
# fibervec and crossfibervec correspond to a 0 deg ply
fibervec=None
"""Unit vector along the fibers of a 0 degree ply"""
crossfibervec=None
"""Unit vector along the fibers of a 90 degree ply"""
outofplanevec=None
"""Out-of-plane unit vector, i.e. fibervec cross crossfibervec"""
def __init__(self,fibervec,crossfibervec):
self.fibervec=fibervec/np.linalg.norm(fibervec)
# correct crossfibervec if necessary by Gram-Schmidt orthonormalization
self.crossfibervec = crossfibervec - np.inner(self.fibervec,crossfibervec)*self.fibervec
self.outofplanevec=np.cross(fibervec,crossfibervec)
# multiply oriented_to_xyz_mat on right by coordinates in oriented frame to get coordinates in (x,y,z) space
self.oriented_to_xyz_mat = np.array((self.fibervec,self.crossfibervec,self.outofplanevec),dtype='d').T
# Multiply xyz_to_oriented_mat on right by (x,y,z) coordinates to get coordinates in oriented frame.
self.xyz_to_oriented_mat = self.oriented_to_xyz_mat.T
pass
def ApplyLayup(self,part,layupdirection):
"""Set the material orientation of the specified part or layerpart to layupdirection (in degrees) | |
# get VLANs and IPs for CDU switches
if "sw-cdu" in node_shasta_name:
nodes_by_name = {}
nodes_by_id = {}
destination_rack_list = []
variables["NMN_MTN_VLANS"] = []
variables["HMN_MTN_VLANS"] = []
for node in network_node_list:
node_tmp = node.serialize()
name = node_tmp["common_name"]
nodes_by_name[name] = node_tmp
nodes_by_id[node_tmp["id"]] = node_tmp
for port in nodes_by_name[switch_name]["ports"]:
destination_rack = nodes_by_id[port["destination_node_id"]]["location"][
"rack"
]
destination_rack_list.append(int(re.search(r"\d+", destination_rack)[0]))
for cabinets in (
sls_variables["NMN_MTN_CABINETS"] + sls_variables["HMN_MTN_CABINETS"]
):
ip_address = netaddr.IPNetwork(cabinets["CIDR"])
is_primary = switch_is_primary(switch_name)
sls_rack_int = int(re.search(r"\d+", (cabinets["Name"]))[0])
if sls_rack_int in destination_rack_list:
if cabinets in sls_variables["NMN_MTN_CABINETS"]:
variables["NMN_MTN_VLANS"].append(cabinets)
variables["NMN_MTN_VLANS"][-1][
"PREFIX_LENGTH"
] = ip_address.prefixlen
if is_primary[0]:
ip = str(ip_address[2])
variables["NMN_MTN_VLANS"][-1]["IP"] = ip
else:
ip = str(ip_address[3])
variables["NMN_MTN_VLANS"][-1]["IP"] = ip
if cabinets in sls_variables["HMN_MTN_CABINETS"]:
variables["HMN_MTN_VLANS"].append(cabinets)
variables["HMN_MTN_VLANS"][-1][
"PREFIX_LENGTH"
] = ip_address.prefixlen
if is_primary[0]:
ip = str(ip_address[2])
variables["HMN_MTN_VLANS"][-1]["IP"] = ip
else:
ip = str(ip_address[3])
variables["HMN_MTN_VLANS"][-1]["IP"] = ip
switch_config = template.render(
variables=variables,
cabling=cabling,
)
devices = set()
for node in cabling["nodes"]:
devices.add(node["subtype"])
def hier_options(switch_os):
options_file = os.path.join(
project_root,
"canu",
"validate",
"switch",
"config",
f"{switch_os}_options.yaml",
)
return options_file
def add_preserve_config(switch_config):
preserve_lag_config = "# The interface to LAG mappings below have been preserved in the generated config\n"
for port in preserve[0][0]:
interface = port.get("interface")
lag = port.get("lag")
if lag is not None:
preserve_lag_config += f"# interface {interface} LAG id {lag}\n"
preserve_lag_config += "\n"
preserve_lag_config += switch_config
return preserve_lag_config
def error_check_preserve_config(switch_config):
if (
"mlag-channel-group None" in switch_config
or "lag None" in switch_config
or "channel-group None" in switch_config
):
click.secho(
"Incorrect port > MLAG mapping, please verify that all the ports have a correct MLAG mapping.",
fg="red",
)
sys.exit(1)
if architecture == "network_v1" and node_shasta_name != "sw-edge":
switch_config_v1 = ""
if "sw-cdu" in switch_name or "sw-leaf-bmc" in switch_name:
switch_os = "dellOS10"
options = yaml.load(open(hier_options(switch_os)))
elif "sw-spine" in switch_name:
switch_os = "onyx"
options = yaml.load(open(hier_options(switch_os)))
hier_host = Host(switch_name, switch_os, options)
if custom_config and custom_config.get(switch_name) is not None:
switch_custom_config = custom_config.get(switch_name)
switch_config_v1 = add_custom_config(
switch_custom_config,
switch_config,
hier_host,
switch_os,
custom_config_file,
)
else:
hier_v1 = HConfig(host=hier_host)
hier_v1.load_from_string(switch_config)
hier_v1.set_order_weight()
for line in hier_v1.all_children_sorted():
switch_config_v1 += line.cisco_style_text() + "\n"
if preserve:
preserve_lag_config = add_preserve_config(switch_config_v1)
error_check_preserve_config(preserve_lag_config)
return (preserve_lag_config, devices, unknown)
return switch_config_v1, devices, unknown
# defaults to aruba options file
else:
if custom_config:
switch_custom_config = custom_config.get(switch_name)
if switch_custom_config is not None:
switch_os = "aoscx"
options = yaml.load(open(hier_options(switch_os)))
hier_host = Host(switch_name, switch_os, options)
switch_config = add_custom_config(
switch_custom_config,
switch_config,
hier_host,
switch_os,
custom_config_file,
)
if preserve:
preserve_lag_config = add_preserve_config(switch_config)
error_check_preserve_config(preserve_lag_config)
return (preserve_lag_config, devices, unknown)
if reorder:
switch_os = "aoscx"
options = yaml.load(open(hier_options(switch_os)))
host = Host(switch_name, switch_os, options)
switch_config_hier = HConfig(host=host)
switch_config_hier.load_from_string(switch_config)
switch_config_hier.set_order_weight()
# add ! to the end of the aruba banner.
banner = switch_config_hier.get_child("contains", "banner")
banner.add_child("!")
config = ""
for line in switch_config_hier.all_children_sorted():
# add two spaces to indented config to match aruba formatting.
if (
line.cisco_style_text().startswith(" ")
and "!" not in line.cisco_style_text()
):
config += "\n" + " " + line.cisco_style_text()
else:
config += "\n" + line.cisco_style_text().lstrip()
switch_config = config
return switch_config, devices, unknown
def get_pair_connections(nodes, switch_name):
"""Given a hostname and nodes, return connections to the primary or secondary switch.
Args:
nodes: List of nodes connected to the switch
switch_name: Switch hostname
Returns:
List of connections to the paired switch
"""
is_primary, primary, secondary = switch_is_primary(switch_name)
if is_primary:
pair_hostname = secondary
else:
pair_hostname = primary
connections = []
for x in nodes:
if pair_hostname in x["config"]["DESCRIPTION"]:
connections.append(x["config"]["PORT"])
connections = natsort.natsorted(connections)
return connections
def preserve_port(
preserve,
source_port,
mellanox=None,
):
"""Get the nodes connected to the switch ports.
Args:
preserve: parsed running config
source_port: port that is going to be assigned a LAG
mellanox: if switch is mellanox parse the interface differently. (mellanox = 1/1, aruba/dell = 1/1/1)
Returns:
The LAG Number of the old running config.
"""
for port in preserve[0][0]:
if "lag" in port.keys() and (
(str(source_port) == port["interface"][2:] and mellanox)
or (str(source_port) == port["interface"][4:])
):
return port["lag"]
def get_switch_nodes(
architecture,
switch_name,
network_node_list,
factory,
sls_variables,
preserve,
):
"""Get the nodes connected to the switch ports.
Args:
architecture: CSM architecture
switch_name: Switch hostname
network_node_list: List of nodes from the SHCD / Paddle
factory: Node factory object
sls_variables: Dictionary containing SLS variables.
preserve: Parsed running config.
Returns:
List of nodes connected to the switch
List of unknown nodes
"""
nodes = []
nodes_by_name = {}
nodes_by_id = {}
unknown = []
# Make 2 dictionaries for easy node lookup
for node in network_node_list:
node_tmp = node.serialize()
name = node_tmp["common_name"]
nodes_by_name[name] = node_tmp
nodes_by_id[node_tmp["id"]] = node_tmp
if switch_name not in nodes_by_name.keys():
click.secho(
f"For switch {switch_name}, the type cannot be determined. Please check the switch name and try again.",
fg="red",
)
sys.exit(1)
for port in nodes_by_name[switch_name]["ports"]:
destination_node_id = port["destination_node_id"]
destination_node_name = nodes_by_id[destination_node_id]["common_name"]
destination_rack = nodes_by_id[destination_node_id]["location"]["rack"]
source_port = port["port"]
destination_port = port["destination_port"]
destination_slot = port["destination_slot"]
shasta_name = get_shasta_name(destination_node_name, factory.lookup_mapper())
primary_port = get_primary_port(nodes_by_name, switch_name, destination_node_id)
if shasta_name == "ncn-m":
new_node = {
"subtype": "master",
"slot": destination_slot,
"destination_port": destination_port,
"config": {
"DESCRIPTION": get_description(
switch_name,
destination_node_name,
destination_slot,
destination_port,
),
"PORT": f"{source_port}",
"LAG_NUMBER": primary_port,
},
}
if preserve and architecture == "network_v1":
new_node["config"]["LAG_NUMBER"] = preserve_port(
preserve,
source_port,
mellanox=True,
)
elif preserve:
new_node["config"]["LAG_NUMBER"] = preserve_port(preserve, source_port)
nodes.append(new_node)
elif shasta_name == "ncn-s":
# ncn-s also needs destination_port to find the match
primary_port_ncn_s = get_primary_port(
nodes_by_name,
switch_name,
destination_node_id,
destination_port,
)
new_node = {
"subtype": "storage",
"slot": destination_slot,
"destination_port": destination_port,
"config": {
"DESCRIPTION": get_description(
switch_name,
destination_node_name,
destination_slot,
destination_port,
),
"PORT": f"{source_port}",
"LAG_NUMBER": primary_port_ncn_s,
"LAG_NUMBER_V1": primary_port,
},
}
if preserve and architecture == "network_v1":
new_node["config"]["LAG_NUMBER_V1"] = preserve_port(
preserve,
source_port,
mellanox=True,
)
elif preserve:
new_node["config"]["LAG_NUMBER"] = preserve_port(preserve, source_port)
nodes.append(new_node)
elif shasta_name == "ncn-w":
new_node = {
"subtype": "worker",
"slot": destination_slot,
"destination_port": destination_port,
"config": {
"DESCRIPTION": get_description(
switch_name,
destination_node_name,
destination_slot,
destination_port,
),
"PORT": f"{source_port}",
"LAG_NUMBER": primary_port,
},
}
if preserve and architecture == "network_v1":
new_node["config"]["LAG_NUMBER"] = preserve_port(
preserve,
source_port,
mellanox=True,
)
elif preserve:
new_node["config"]["LAG_NUMBER"] = preserve_port(preserve, source_port)
nodes.append(new_node)
elif shasta_name == "cec":
destination_rack_int = int(re.search(r"\d+", destination_rack)[0])
for cabinets in sls_variables["HMN_MTN_CABINETS"]:
sls_rack_int = int(re.search(r"\d+", (cabinets["Name"]))[0])
if destination_rack_int == sls_rack_int:
hmn_mtn_vlan = cabinets["VlanID"]
new_node = {
"subtype": "cec",
"slot": None,
"config": {
"DESCRIPTION": get_description(
switch_name,
destination_node_name,
None,
destination_port,
),
"INTERFACE_NUMBER": f"{source_port}",
"NATIVE_VLAN": hmn_mtn_vlan,
},
}
nodes.append(new_node)
elif shasta_name == "cmm":
destination_rack_int = int(re.search(r"\d+", destination_rack)[0])
for cabinets in sls_variables["NMN_MTN_CABINETS"]:
sls_rack_int = int(re.search(r"\d+", (cabinets["Name"]))[0])
if destination_rack_int == sls_rack_int:
nmn_mtn_vlan = cabinets["VlanID"]
for cabinets in sls_variables["HMN_MTN_CABINETS"]:
sls_rack_int = int(re.search(r"\d+", (cabinets["Name"]))[0])
if destination_rack_int == sls_rack_int:
hmn_mtn_vlan = cabinets["VlanID"]
new_node = {
"subtype": "cmm",
"slot": None,
"config": {
"DESCRIPTION": get_description(
switch_name,
destination_node_name,
None,
destination_port,
),
"PORT": f"{source_port}",
"LAG_NUMBER": primary_port,
"NATIVE_VLAN": nmn_mtn_vlan,
"TAGGED_VLAN": hmn_mtn_vlan,
},
}
if preserve:
new_node["config"]["LAG_NUMBER"] = preserve_port(preserve, source_port)
nodes.append(new_node)
elif shasta_name in {"uan", "login", "viz", "lmem"}:
primary_port_uan = get_primary_port(
nodes_by_name,
switch_name,
destination_node_id,
destination_port,
)
new_node = {
"subtype": "uan",
"slot": destination_slot,
"destination_port": destination_port,
"config": {
"DESCRIPTION": get_description(
switch_name,
destination_node_name,
destination_slot,
destination_port,
),
"PORT": f"{source_port}",
"LAG_NUMBER": primary_port_uan,
"LAG_NUMBER_V1": primary_port,
},
}
if preserve and architecture == "network_v1":
new_node["config"]["LAG_NUMBER_V1"] = preserve_port(
preserve,
source_port,
mellanox=True,
)
elif preserve:
new_node["config"]["LAG_NUMBER"] = preserve_port(preserve, source_port)
nodes.append(new_node)
elif shasta_name in {"gateway", "ssn", "dvs"}:
new_node = {
"subtype": "river_ncn_node_4_port_1g_ocp",
"slot": destination_slot,
"destination_port": destination_port,
"config": {
"DESCRIPTION": get_description(
switch_name,
destination_node_name,
destination_slot,
destination_port,
),
"PORT": f"{source_port}",
"INTERFACE_NUMBER": f"{source_port}",
},
}
nodes.append(new_node)
elif shasta_name == "cn":
new_node = {
"subtype": "compute",
"slot": destination_slot,
"destination_port": destination_port,
"config": {
"DESCRIPTION": get_description(
switch_name,
destination_node_name,
destination_slot,
destination_port,
),
"PORT": f"{source_port}",
"INTERFACE_NUMBER": f"{source_port}",
},
}
nodes.append(new_node)
elif shasta_name == "sw-hsn":
new_node = {
"subtype": "sw-hsn",
"slot": destination_slot,
"destination_port": destination_port,
"config": {
"DESCRIPTION": get_description(
switch_name,
destination_node_name,
destination_slot,
destination_port,
),
"PORT": f"{source_port}",
"INTERFACE_NUMBER": f"{source_port}",
},
}
nodes.append(new_node)
elif shasta_name == "pdu":
new_node = {
"subtype": "pdu",
"slot": destination_slot,
"destination_port": destination_port,
"config": {
"DESCRIPTION": get_description(
switch_name,
destination_node_name,
destination_slot,
destination_port,
),
"PORT": f"{source_port}",
"INTERFACE_NUMBER": f"{source_port}",
},
}
nodes.append(new_node)
elif shasta_name == "SubRack":
new_node = {
"subtype": "bmc",
"slot": destination_slot,
"destination_port": destination_port,
"config": {
"DESCRIPTION": get_description(
switch_name,
destination_node_name,
destination_slot,
destination_port,
),
"PORT": f"{source_port}",
"INTERFACE_NUMBER": f"{source_port}",
},
}
nodes.append(new_node)
elif shasta_name == "sw-spine":
# sw-leaf ==> sw-spine
if switch_name.startswith("sw-leaf"):
is_primary, primary, secondary = switch_is_primary(switch_name)
digits = re.findall(r"(\d+)", primary)[0]
lag_number = 100 + int(digits)
# sw-cdu ==> sw-spine
elif switch_name.startswith("sw-cdu"):
lag_number = 255
is_primary, primary, secondary = switch_is_primary(switch_name)
# sw-leaf-bmc ==> sw-spine
elif | |
<filename>test/functional/feature_int64_cscriptnum.py
#!/usr/bin/env python3
# Copyright (c) 2021 The Bitcoin developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
This tests the new May 2022 upgrade8 feature: 64-bit script integers
as well as the re-enabled OP_MUL opcode.
"""
from typing import Tuple
from test_framework.address import (
script_to_p2sh, hash160
)
from test_framework.blocktools import (
create_block,
create_coinbase,
create_tx_with_script,
make_conform_to_ctor,
)
from test_framework.key import ECKey
from test_framework.messages import (
CBlock,
COutPoint,
CTransaction,
CTxIn,
CTxOut,
FromHex,
)
from test_framework.mininode import (
P2PDataStore,
)
from test_framework import schnorr
from test_framework.script import (
CScript,
CScriptNum,
OP_1NEGATE, OP_0, OP_1, OP_2, OP_3, OP_4, OP_6, OP_7, OP_8, OP_10, OP_15, OP_16,
OP_CHECKMULTISIG, OP_EQUALVERIFY, OP_HASH160, OP_EQUAL, OP_NUM2BIN, OP_BIN2NUM, OP_PICK,
OP_ADD,
OP_DIV,
OP_MOD,
OP_MUL,
OP_SUB,
OP_TRUE,
OP_DROP,
SIGHASH_ALL,
SIGHASH_FORKID,
SignatureHashForkId,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
# Overflow failure
OVERFLOW_ERROR_BAD_OPERAND = ('mandatory-script-verify-flag-failed '
'(Given operand is not a number within the valid range [-2^63 + 1, 2^63 - 1])')
# Overflow if one of the operands coming in from a push off the stack is out-of-range (known issue with interpreter)
OVERFLOW_ERROR_UNK = 'mandatory-script-verify-flag-failed (unknown error)'
# OP_NUM2BIN failure when we try to encode a number that won't fit within the requested size
IMPOSSIBLE_ENCODING_ERROR = 'mandatory-script-verify-flag-failed (The requested encoding is impossible to satisfy)'
# OP_PICK if the index is out of bounds
INVALID_STACK_OPERATION = 'mandatory-script-verify-flag-failed (Operation not valid with the current stack size)'
class Int64CScriptNum(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.block_heights = {}
self.extra_args = [
# Node0 has bigint64 activated (activates at upgrade8)
["-acceptnonstdtxn=1", "-upgrade8activationtime=1", "-expire=0"],
]
def bootstrap_p2p(self, *, num_connections=1):
"""Add a P2P connection to the node.
Helper to connect and wait for version handshake."""
for _ in range(num_connections):
self.nodes[0].add_p2p_connection(P2PDataStore())
for p2p in self.nodes[0].p2ps:
p2p.wait_for_getheaders()
def reconnect_p2p(self, **kwargs):
"""Tear down and bootstrap the P2P connection to the node.
The node gets disconnected several times in this test. This helper
method reconnects the p2p and restarts the network thread."""
self.nodes[0].disconnect_p2ps()
self.bootstrap_p2p(**kwargs)
def getbestblock(self, node):
"""Get the best block. Register its height so we can use build_block."""
block_height = node.getblockcount()
blockhash = node.getblockhash(block_height)
block = FromHex(CBlock(), node.getblock(blockhash, 0))
block.calc_sha256()
self.block_heights[block.sha256] = block_height
return block
def build_block(self, parent, transactions=(), nTime=None):
"""Make a new block with an OP_1 coinbase output.
Requires parent to have its height registered."""
parent.calc_sha256()
block_height = self.block_heights[parent.sha256] + 1
block_time = (parent.nTime + 1) if nTime is None else nTime
block = create_block(
parent.sha256, create_coinbase(block_height), block_time)
block.vtx.extend(transactions)
make_conform_to_ctor(block)
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
self.block_heights[block.sha256] = block_height
return block
def run_test(self):
node = self.nodes[0]
self.bootstrap_p2p()
self.log.info("Create some blocks with OP_1 coinbase for spending.")
tip = self.getbestblock(node)
blocks = []
for _ in range(10):
tip = self.build_block(tip)
blocks.append(tip)
node.p2p.send_blocks_and_test(blocks, node, success=True)
spendable_txns = [block.vtx[0] for block in blocks]
self.log.info("Mature the blocks and get out of IBD.")
node.generatetoaddress(100, node.get_deterministic_priv_key().address)
self.log.info("Setting up spends to test and mining the fundings")
# Generate a key pair
privkeybytes = b"INT64!!!" * 4
private_key = ECKey()
private_key.set(privkeybytes, True)
# get uncompressed public key serialization
public_key = private_key.get_pubkey().get_bytes()
def create_fund_and_spend_tx(scriptsigextra, redeemextra) -> Tuple[CTransaction, CTransaction]:
spendfrom = spendable_txns.pop()
redeem_script = CScript(redeemextra + [OP_1, public_key, OP_1, OP_CHECKMULTISIG])
script_pubkey = CScript([OP_HASH160, hash160(redeem_script), OP_EQUAL])
value = spendfrom.vout[0].nValue
value1 = value - 500
# Fund transaction
txfund = create_tx_with_script(spendfrom, 0, b'', value1, script_pubkey)
txfund.rehash()
p2sh = script_to_p2sh(redeem_script)
self.log.info(f"scriptPubKey {script_pubkey!r}")
self.log.info(f"redeemScript {redeem_script!r} -> p2sh address {p2sh}")
# Spend transaction
value2 = value1 - 500
txspend = CTransaction()
txspend.vout.append(
CTxOut(value2, CScript([OP_TRUE])))
txspend.vin.append(
CTxIn(COutPoint(txfund.sha256, 0), b''))
# Sign the transaction
sighashtype = SIGHASH_ALL | SIGHASH_FORKID
hashbyte = bytes([sighashtype & 0xff])
sighash = SignatureHashForkId(
redeem_script, txspend, 0, sighashtype, value1)
txsig = schnorr.sign(privkeybytes, sighash) + hashbyte
dummy = OP_1 # Required for 1-of-1 schnorr sig
txspend.vin[0].scriptSig = ss = CScript([dummy, txsig] + scriptsigextra + [redeem_script])
self.log.info(f"scriptSig: {ss!r}")
txspend.rehash()
return txfund, txspend
mempool = []
# Basic test of OP_MUL 2 * 3 = 6
tx0, tx = create_fund_and_spend_tx([OP_2, OP_3], [OP_MUL, OP_6, OP_EQUALVERIFY])
node.p2p.send_txs_and_test([tx0, tx], node)
mempool += [tx0.hash, tx.hash]
spendable_txns.insert(0, tx) # Recycle the output from this tx
assert_equal(node.getrawmempool(), mempool)
# Basic test of OP_DIV 6 / 3 = 2
tx0, tx = create_fund_and_spend_tx([OP_6, OP_3], [OP_DIV, OP_2, OP_EQUALVERIFY])
node.p2p.send_txs_and_test([tx0, tx], node)
mempool += [tx0.hash, tx.hash]
spendable_txns.insert(0, tx) # Recycle te output from this tx
assert_equal(node.getrawmempool(), mempool)
# Divide 2^63-1 by 1 -- This should be 100% ok
ssextra = [CScriptNum(int(2**63 - 1)), OP_1]
rsextra = [OP_DIV, CScriptNum(int(2**63 - 1) // 1), OP_EQUALVERIFY]
tx0, tx = create_fund_and_spend_tx(ssextra, rsextra)
node.p2p.send_txs_and_test([tx0, tx], node)
mempool += [tx0.hash, tx.hash]
spendable_txns.insert(0, tx) # Recycle te output from this tx
assert_equal(node.getrawmempool(), mempool)
# Divide -2^63-1 / -2^63-1 -- This should be 100% ok
ssextra = [CScriptNum(-int(2**63 - 1)), CScriptNum(-int(2**63 - 1))]
rsextra = [OP_DIV, OP_1, OP_EQUALVERIFY]
tx0, tx = create_fund_and_spend_tx(ssextra, rsextra)
node.p2p.send_txs_and_test([tx0, tx], node)
mempool += [tx0.hash, tx.hash]
spendable_txns.insert(0, tx) # Recycle te output from this tx
assert_equal(node.getrawmempool(), mempool)
# Divide -2^63-1 / 2^63-1 -- This should be 100% ok
ssextra = [CScriptNum(-int(2**63 - 1)), CScriptNum(int(2**63 - 1))]
rsextra = [OP_DIV, OP_1NEGATE, OP_EQUALVERIFY]
tx0, tx = create_fund_and_spend_tx(ssextra, rsextra)
node.p2p.send_txs_and_test([tx0, tx], node)
mempool += [tx0.hash, tx.hash]
spendable_txns.insert(0, tx) # Recycle te output from this tx
assert_equal(node.getrawmempool(), mempool)
# Divide 2^63-1 / -2^63-1 -- This should be 100% ok
ssextra = [CScriptNum(int(2**63 - 1)), CScriptNum(-int(2**63 - 1))]
rsextra = [OP_DIV, OP_1NEGATE, OP_EQUALVERIFY]
tx0, tx = create_fund_and_spend_tx(ssextra, rsextra)
node.p2p.send_txs_and_test([tx0, tx], node)
mempool += [tx0.hash, tx.hash]
spendable_txns.insert(0, tx) # Recycle te output from this tx
assert_equal(node.getrawmempool(), mempool)
# Divide 2^63-1 / 2^63-1 -- This should be 100% ok
ssextra = [CScriptNum(int(2**63 - 1)), CScriptNum(int(2**63 - 1))]
rsextra = [OP_DIV, OP_1, OP_EQUALVERIFY]
tx0, tx = create_fund_and_spend_tx(ssextra, rsextra)
node.p2p.send_txs_and_test([tx0, tx], node)
mempool += [tx0.hash, tx.hash]
spendable_txns.insert(0, tx) # Recycle te output from this tx
assert_equal(node.getrawmempool(), mempool)
# Multiply past 2^32 -- should work
ssextra = [CScriptNum(int(2**31)), CScriptNum(int(2**31))]
rsextra = [OP_MUL, CScriptNum(int(2**62)), OP_EQUALVERIFY]
tx0, tx = create_fund_and_spend_tx(ssextra, rsextra)
node.p2p.send_txs_and_test([tx0, tx], node)
mempool += [tx0.hash, tx.hash]
spendable_txns.insert(0, tx) # Recycle te output from this tx
assert_equal(node.getrawmempool(), mempool)
# Add past (2^31 - 1) -- should work
ssextra = [CScriptNum(int(2**31)), CScriptNum(int(2**31))]
rsextra = [OP_ADD, CScriptNum(int(2**32)), OP_EQUALVERIFY]
tx0, tx = create_fund_and_spend_tx(ssextra, rsextra)
node.p2p.send_txs_and_test([tx0, tx], node)
mempool += [tx0.hash, tx.hash]
spendable_txns.insert(0, tx) # Recycle te output from this tx
assert_equal(node.getrawmempool(), mempool)
# Sub below -(2^31 - 1) -- should work
ssextra = [CScriptNum(-int(2**31 - 1)), CScriptNum(int(2**31 - 1))]
rsextra = [OP_SUB, CScriptNum(-int(2**32 - 2)), OP_EQUALVERIFY]
tx0, tx = create_fund_and_spend_tx(ssextra, rsextra)
node.p2p.send_txs_and_test([tx0, tx], node)
mempool += [tx0.hash, tx.hash]
spendable_txns.insert(0, tx) # Recycle te output from this tx
assert_equal(node.getrawmempool(), mempool)
# Divide/multiply mixed: -2^60 * 3 / 6 == -2^59
ssextra = [CScriptNum(-int(2**60)), OP_3]
rsextra = [OP_MUL, OP_6, OP_DIV, CScriptNum(-int(2**59)), OP_EQUALVERIFY]
tx0, tx = create_fund_and_spend_tx(ssextra, rsextra)
node.p2p.send_txs_and_test([tx0, tx], node)
mempool += [tx0.hash, tx.hash]
spendable_txns.insert(0, tx) # Recycle te output from this tx
assert_equal(node.getrawmempool(), mempool)
# Divide: -2^31 * 3 / -6 == 2^30 (intermediate value outside of 32-bit range)
ssextra = [CScriptNum(-int(2**31)), OP_3]
rsextra = [OP_MUL, CScriptNum(-6), OP_DIV, CScriptNum(int(2**30)), OP_EQUALVERIFY]
tx0, tx = create_fund_and_spend_tx(ssextra, rsextra)
node.p2p.send_txs_and_test([tx0, tx], node)
mempool += [tx0.hash, tx.hash]
spendable_txns.insert(0, tx) # Recycle te output from this tx
assert_equal(node.getrawmempool(), mempool)
# Divide: -2^32 * 3 / -6 == 2^31 (1 operand & intermediate value > 2^31 - 1)
ssextra = [CScriptNum(-int(2**32)), OP_3]
rsextra = [OP_MUL, CScriptNum(-6), OP_DIV, CScriptNum(int(2**31)), OP_EQUALVERIFY]
tx0, tx = create_fund_and_spend_tx(ssextra, rsextra)
node.p2p.send_txs_and_test([tx0, tx], node)
mempool += [tx0.hash, tx.hash]
spendable_txns.insert(0, tx) # Recycle te output from this tx
assert_equal(node.getrawmempool(), mempool)
# Multiply past 2^63 - 1 -- funding tx is ok, spending should not be accepted due to out-of-range operand
ssextra = [CScriptNum(int((2**63) - 1)), OP_3]
rsextra = [OP_MUL, OP_DROP, OP_1, OP_1, OP_EQUALVERIFY]
tx0, tx = create_fund_and_spend_tx(ssextra, rsextra)
node.p2p.send_txs_and_test([tx0], node)
node.p2p.send_txs_and_test([tx], node, success=False, expect_disconnect=True,
reject_reason=OVERFLOW_ERROR_BAD_OPERAND)
mempool += [tx0.hash]
assert_equal(node.getrawmempool(), mempool)
self.reconnect_p2p() # we lost the connection from above bad tx, reconnect
# Add past 2^63 - 1 -- funding tx is ok, spending should not be accepted due to bad operand
ssextra = [CScriptNum(int((2**63) - 1)), OP_1]
rsextra = [OP_ADD, OP_DROP, OP_1, OP_1, OP_EQUALVERIFY]
tx0, tx = create_fund_and_spend_tx(ssextra, rsextra)
node.p2p.send_txs_and_test([tx0], node)
node.p2p.send_txs_and_test([tx], node, success=False, expect_disconnect=True,
reject_reason=OVERFLOW_ERROR_BAD_OPERAND)
mempool += [tx0.hash]
assert_equal(node.getrawmempool(), mempool)
self.reconnect_p2p() # we lost the connection from | |
West Virginia",381),
("Mount Gay-Shamrock CDP, West Virginia",1178),
("Mount Hope city, West Virginia",1185),
("Mullens city, West Virginia",1564),
("Neibert CDP, West Virginia",89),
("Nettie CDP, West Virginia",609),
("Newburg town, West Virginia",400),
("New Cumberland city, West Virginia",1252),
("Newell CDP, West Virginia",1349),
("New Haven town, West Virginia",1520),
("New Martinsville city, West Virginia",5190),
("New Richmond CDP, West Virginia",374),
("Nitro city, West Virginia",6201),
("Northfork town, West Virginia",291),
("North Hills town, West Virginia",999),
("Nutter Fort town, West Virginia",1431),
("Oak Hill city, West Virginia",8458),
("Oakvale town, West Virginia",127),
("Oceana town, West Virginia",1192),
("Omar CDP, West Virginia",468),
("Paden City city, West Virginia",3132),
("Page CDP, West Virginia",79),
("Pageton CDP, West Virginia",141),
("Parcoal CDP, West Virginia",115),
("Parkersburg city, West Virginia",30328),
("Parsons city, West Virginia",1523),
("Paw Paw town, West Virginia",535),
("Pax town, West Virginia",168),
("Pea Ridge CDP, West Virginia",6215),
("Pennsboro city, West Virginia",1073),
("Pentress CDP, West Virginia",210),
("Petersburg city, West Virginia",2520),
("Peterstown town, West Virginia",654),
("Philippi city, West Virginia",3409),
("Pickens CDP, West Virginia",19),
("Piedmont town, West Virginia",719),
("Pinch CDP, West Virginia",2554),
("Pine Grove town, West Virginia",456),
("Pineville town, West Virginia",736),
("Piney View CDP, West Virginia",1267),
("Pleasant Valley city, West Virginia",3172),
("Poca town, West Virginia",1045),
("Point Pleasant city, West Virginia",4194),
("Powellton CDP, West Virginia",684),
("Pratt town, West Virginia",436),
("Prichard CDP, West Virginia",282),
("Prince CDP, West Virginia",93),
("Princeton city, West Virginia",5907),
("Prosperity CDP, West Virginia",1597),
("Pullman town, West Virginia",151),
("Quinwood town, West Virginia",144),
("Rachel CDP, West Virginia",219),
("Racine CDP, West Virginia",229),
("Rainelle town, West Virginia",1245),
("Rand CDP, West Virginia",1618),
("Ranson corporation, West Virginia",5058),
("Ravenswood city, West Virginia",3740),
("Raysal CDP, West Virginia",598),
("Reader CDP, West Virginia",335),
("Red Jacket CDP, West Virginia",385),
("Reedsville town, West Virginia",407),
("Reedy town, West Virginia",114),
("Rhodell town, West Virginia",111),
("Richwood city, West Virginia",2031),
("Ridgeley town, West Virginia",654),
("Ripley city, West Virginia",3198),
("Rivesville town, West Virginia",1113),
("Robinette CDP, West Virginia",557),
("Roderfield CDP, West Virginia",24),
("Romney city, West Virginia",2176),
("Ronceverte city, West Virginia",1970),
("Rossmore CDP, West Virginia",393),
("Rowlesburg town, West Virginia",461),
("Rupert town, West Virginia",894),
("St. Albans city, West Virginia",10442),
("St. George CDP, West Virginia",161),
("St. Marys city, West Virginia",1994),
("Salem city, West Virginia",1664),
("Salt Rock CDP, West Virginia",340),
("Sand Fork town, West Virginia",223),
("Sarah Ann CDP, West Virginia",88),
("Scarbro CDP, West Virginia",721),
("Shady Spring CDP, West Virginia",3485),
("Shannondale CDP, West Virginia",3404),
("Shenandoah Junction CDP, West Virginia",413),
("Shepherdstown town, West Virginia",1605),
("Shinnston city, West Virginia",2601),
("Shrewsbury CDP, West Virginia",823),
("Sissonville CDP, West Virginia",4951),
("Sistersville city, West Virginia",1311),
("Smithers city, West Virginia",1058),
("Smithfield town, West Virginia",166),
("Sophia town, West Virginia",1220),
("South Charleston city, West Virginia",12686),
("Spelter CDP, West Virginia",370),
("Spencer city, West Virginia",1997),
("Springfield CDP, West Virginia",272),
("Stanaford CDP, West Virginia",1060),
("Star City town, West Virginia",2196),
("Stollings CDP, West Virginia",347),
("Stonewood city, West Virginia",1777),
("Summersville city, West Virginia",3408),
("Sutton town, West Virginia",1358),
("Switzer CDP, West Virginia",557),
("Sylvester town, West Virginia",208),
("Teays Valley CDP, West Virginia",13816),
("Terra Alta town, West Virginia",1781),
("Thomas city, West Virginia",609),
("Thurmond town, West Virginia",4),
("Tioga CDP, West Virginia",101),
("Tornado CDP, West Virginia",1211),
("Triadelphia town, West Virginia",779),
("Tunnelton town, West Virginia",244),
("Twilight CDP, West Virginia",36),
("Union town, West Virginia",440),
("Valley Bend CDP, West Virginia",449),
("Valley Grove village, West Virginia",383),
("Valley Head CDP, West Virginia",248),
("Van CDP, West Virginia",206),
("Verdunville CDP, West Virginia",590),
("Vienna city, West Virginia",10446),
("Vivian CDP, West Virginia",54),
("Wallace CDP, West Virginia",412),
("War city, West Virginia",825),
("Wardensville town, West Virginia",285),
("Washington CDP, West Virginia",856),
("Waverly CDP, West Virginia",402),
("Wayne town, West Virginia",1492),
("Weirton city, West Virginia",18894),
("Welch city, West Virginia",3287),
("Wellsburg city, West Virginia",2624),
("West Hamlin town, West Virginia",713),
("West Liberty town, West Virginia",1728),
("West Logan town, West Virginia",491),
("West Milford town, West Virginia",613),
("Weston city, West Virginia",3993),
("Westover city, West Virginia",4205),
("West Union town, West Virginia",819),
("Wheeling city, West Virginia",27190),
("White Hall town, West Virginia",767),
("White Sulphur Springs city, West Virginia",2745),
("Whitesville town, West Virginia",501),
("Whitmer CDP, West Virginia",12),
("<NAME> CDP, West Virginia",896),
("Williamson city, West Virginia",2883),
("Williamstown city, West Virginia",2927),
("Windsor Heights village, West Virginia",331),
("Winfield town, West Virginia",2636),
("Wolf Summit CDP, West Virginia",157),
("Womelsdorf (Coalton) town, West Virginia",315),
("Worthington town, West Virginia",186),
("Abbotsford city, Wisconsin",2175),
("Abrams CDP, Wisconsin",462),
("Adams city, Wisconsin",1996),
("Adell village, Wisconsin",564),
("Albany village, Wisconsin",1207),
("Algoma city, Wisconsin",3069),
("Allenton CDP, Wisconsin",925),
("Allouez village, Wisconsin",13891),
("Alma city, Wisconsin",735),
("Alma Center village, Wisconsin",444),
("Almena village, Wisconsin",685),
("Almond village, Wisconsin",482),
("Altoona city, Wisconsin",7499),
("Amberg CDP, Wisconsin",200),
("Amery city, Wisconsin",2825),
("Amherst village, Wisconsin",1028),
("Amherst Junction village, Wisconsin",400),
("Angelica CDP, Wisconsin",93),
("Aniwa village, Wisconsin",251),
("Antigo city, Wisconsin",7799),
("Appleton city, Wisconsin",74234),
("Arcadia city, Wisconsin",3013),
("Arena village, Wisconsin",874),
("Argonne CDP, Wisconsin",154),
("Argyle village, Wisconsin",875),
("Arkansaw CDP, Wisconsin",196),
("Arkdale CDP, Wisconsin",119),
("Arlington village, Wisconsin",796),
("Arpin village, Wisconsin",333),
("Ashippun CDP, Wisconsin",1040),
("Ashland city, Wisconsin",7963),
("Ashwaubenon village, Wisconsin",17181),
("Athens village, Wisconsin",1025),
("Auburndale village, Wisconsin",752),
("Augusta city, Wisconsin",1566),
("Avoca village, Wisconsin",725),
("Babcock CDP, Wisconsin",68),
("Bagley village, Wisconsin",333),
("Baileys Harbor CDP, Wisconsin",207),
("Baldwin village, Wisconsin",4000),
("Balsam Lake village, Wisconsin",777),
("Bancroft CDP, Wisconsin",548),
("Bangor village, Wisconsin",1293),
("Baraboo city, Wisconsin",12093),
("Barneveld village, Wisconsin",1276),
("Barron city, Wisconsin",3334),
("Barronett CDP, Wisconsin",148),
("Bay City village, Wisconsin",468),
("Bayfield city, Wisconsin",504),
("Bayside village, Wisconsin",4529),
("Bear Creek village, Wisconsin",383),
("Beaver Dam city, Wisconsin",16366),
("Belgium village, Wisconsin",2210),
("Bell Center village, Wisconsin",120),
("Belleville village, Wisconsin",2459),
("Bellevue village, Wisconsin",15515),
("Belmont village, Wisconsin",981),
("Beloit city, Wisconsin",36813),
("Benton village, Wisconsin",970),
("Berlin city, Wisconsin",5428),
("Big Bend village, Wisconsin",1286),
("Big Falls village, Wisconsin",42),
("Birch Hill CDP, Wisconsin",370),
("Birchwood village, Wisconsin",356),
("Birnamwood village, Wisconsin",802),
("Biron village, Wisconsin",918),
("Black Creek village, Wisconsin",1379),
("Black Earth village, Wisconsin",1395),
("Black River Falls city, Wisconsin",3512),
("Blair city, Wisconsin",1334),
("Blanchardville village, Wisconsin",819),
("Bloomer city, Wisconsin",3503),
("Bloomfield village, Wisconsin",4704),
("Bloomington village, Wisconsin",764),
("Blue Mounds village, Wisconsin",1002),
("Blue River village, Wisconsin",444),
("Bluffview CDP, Wisconsin",555),
("Boaz village, Wisconsin",132),
("Bohners Lake CDP, Wisconsin",2520),
("Bonduel village, Wisconsin",1378),
("Boscobel city, Wisconsin",3150),
("Boulder Junction CDP, Wisconsin",100),
("Bowler village, Wisconsin",405),
("Boyceville village, Wisconsin",985),
("Boyd village, Wisconsin",599),
("Brandon village, Wisconsin",906),
("Brice Prairie CDP, Wisconsin",2173),
("Brillion city, Wisconsin",3133),
("Bristol village, Wisconsin",4993),
("Brodhead city, Wisconsin",3275),
("Brookfield city, Wisconsin",38151),
("Brooklyn village, Wisconsin",1283),
("Brown Deer village, Wisconsin",12039),
("Browns Lake CDP, Wisconsin",2070),
("Brownsville village, Wisconsin",616),
("Browntown village, Wisconsin",263),
("Bruce village, Wisconsin",738),
("Brule CDP, Wisconsin",227),
("Buffalo City city, Wisconsin",893),
("Burlington city, Wisconsin",10758),
("Burnett CDP, Wisconsin",230),
("Butler village, Wisconsin",1835),
("Butte des Morts CDP, Wisconsin",799),
("Butternut village, Wisconsin",426),
("Cable CDP, Wisconsin",216),
("Cadott village, Wisconsin",1423),
("Caledonia village, Wisconsin",24875),
("Cambria village, Wisconsin",714),
("Cambridge village, Wisconsin",1250),
("Cameron village, Wisconsin",1903),
("Campbellsport village, Wisconsin",1911),
("Camp Douglas village, Wisconsin",635),
("Caroline CDP, Wisconsin",217),
("Cascade village, Wisconsin",656),
("Casco village, Wisconsin",556),
("Cashton village, Wisconsin",986),
("Cassville village, Wisconsin",875),
("Cataract CDP, Wisconsin",169),
("Catawba village, Wisconsin",124),
("Cazenovia village, Wisconsin",360),
("Cecil village, Wisconsin",548),
("Cedarburg city, Wisconsin",11501),
("Cedar Grove village, Wisconsin",2111),
("Centuria village, Wisconsin",904),
("Chain O' Lakes CDP, Wisconsin",848),
("Chaseburg village, Wisconsin",320),
("Chelsea CDP, Wisconsin",104),
("Chenequa village, Wisconsin",560),
("Chetek city, Wisconsin",2166),
("Chief Lake CDP, Wisconsin",506),
("Chili CDP, Wisconsin",180),
("Chilton city, Wisconsin",3848),
("Chippewa Falls city, Wisconsin",14017),
("Clam Lake CDP, Wisconsin",29),
("Clayton village, Wisconsin",466),
("Clear Lake village, Wisconsin",996),
("Cleveland village, Wisconsin",1642),
("Clinton village, Wisconsin",2160),
("Clintonville city, Wisconsin",4384),
("Clyman village, Wisconsin",375),
("Cobb village, Wisconsin",490),
("Cochrane village, Wisconsin",451),
("Colby city, Wisconsin",1989),
("Coleman village, Wisconsin",776),
("Colfax village, Wisconsin",1106),
("Collins CDP, Wisconsin",140),
("Coloma village, Wisconsin",413),
("Columbus city, Wisconsin",5022),
("Combined Locks village, Wisconsin",3570),
("Como CDP, Wisconsin",2477),
("Conrath village, Wisconsin",76),
("Coon Valley village, Wisconsin",825),
("Cornell city, Wisconsin",1733),
("Cornucopia CDP, Wisconsin",92),
("Cottage Grove village, Wisconsin",6904),
("Couderay village, Wisconsin",73),
("Crandon city, Wisconsin",1994),
("Crivitz village, Wisconsin",998),
("Cross Plains village, Wisconsin",4106),
("Cuba City city, Wisconsin",2168),
("Cudahy city, Wisconsin",18349),
("Cumberland city, Wisconsin",2458),
("Curtiss village, Wisconsin",299),
("Dale CDP, Wisconsin",586),
("Dallas village, Wisconsin",403),
("Dalton CDP, Wisconsin",230),
("Danbury CDP, Wisconsin",156),
("Dane village, Wisconsin",1098),
("Darien village, Wisconsin",1795),
("Darlington city, Wisconsin",2330),
("Deerfield village, Wisconsin",2528),
("Deer Park village, Wisconsin",283),
("DeForest village, Wisconsin",9936),
("Delafield city, Wisconsin",7390),
("Delavan city, Wisconsin",8338),
("Delavan Lake CDP, Wisconsin",2609),
("Dellwood CDP, Wisconsin",614),
("Denmark village, Wisconsin",2143),
("De Pere city, Wisconsin",24836),
("De Soto village, Wisconsin",331),
("Diamond Bluff CDP, Wisconsin",175),
("Diaperville CDP, Wisconsin",37),
("Dickeyville village, Wisconsin",1227),
("Dodge CDP, Wisconsin",117),
("Dodgeville city, Wisconsin",4734),
("Dorchester village, Wisconsin",910),
("Dousman village, Wisconsin",2411),
("Downing village, Wisconsin",245),
("Downsville CDP, Wisconsin",113),
("Doylestown village, Wisconsin",234),
("Dresser village, Wisconsin",1019),
("Drummond CDP, Wisconsin",128),
("Dunbar CDP, Wisconsin",81),
("Durand city, Wisconsin",1787),
("Dyckesville CDP, Wisconsin",549),
("Eagle village, Wisconsin",2069),
("Eagle Lake CDP, Wisconsin",1084),
("Eagle River city, Wisconsin",1550),
("Eastman village, Wisconsin",419),
("East Troy village, Wisconsin",4330),
("Eau Claire city, Wisconsin",68086),
("Eden village, Wisconsin",758),
("Edgar village, Wisconsin",1572),
("Edgerton city, Wisconsin",5522),
("Edmund CDP, Wisconsin",165),
("Egg Harbor village, Wisconsin",244),
("Eland village, Wisconsin",154),
("Elcho CDP, Wisconsin",278),
("Elderon village, Wisconsin",326),
("Eleva village, Wisconsin",774),
("Elkhart Lake village, Wisconsin",1076),
("Elkhorn city, Wisconsin",9907),
("Elk Mound village, Wisconsin",1085),
("Ellison Bay CDP, Wisconsin",161),
("Ellsworth village, Wisconsin",3257),
("Elm Grove village, Wisconsin",6172),
("Elmwood village, Wisconsin",710),
("Elmwood Park village, Wisconsin",520),
("Elroy city, Wisconsin",1361),
("Embarrass village, Wisconsin",548),
("Emerald CDP, Wisconsin",110),
("Endeavor village, Wisconsin",586),
("Ephraim village, Wisconsin",280),
("Ettrick village, Wisconsin",587),
("Eureka CDP, Wisconsin",594),
("Evansville city, Wisconsin",5260),
("Exeland village, Wisconsin",225),
("Fairchild village, Wisconsin",567),
("Fairwater village, Wisconsin",378),
("Fall Creek village, Wisconsin",1475),
("Fall River village, Wisconsin",1884),
("Fennimore city, Wisconsin",2575),
("Fenwood village, Wisconsin",157),
("Ferryville village, Wisconsin",159),
("Fitchburg city, Wisconsin",28722),
("Florence CDP, Wisconsin",501),
("Fond du Lac city, Wisconsin",42858),
("Fontana-on-Geneva Lake village, Wisconsin",1507),
("Footville village, Wisconsin",872),
("Forest Junction CDP, Wisconsin",735),
("Forestville village, Wisconsin",482),
("Fort Atkinson city, Wisconsin",12429),
("Fountain City city, Wisconsin",859),
("Fox Crossing village, Wisconsin",18788),
("Fox Lake city, Wisconsin",1647),
("Fox Point village, Wisconsin",6688),
("Francis Creek village, Wisconsin",550),
("Franklin city, Wisconsin",36185),
("Franks Field CDP, Wisconsin",163),
("Frederic village, Wisconsin",1037),
("Fredonia village, Wisconsin",2395),
("Fremont village, Wisconsin",593),
("French Island CDP, Wisconsin",4360),
("Friendship village, Wisconsin",628),
("Friesland village, Wisconsin",304),
("Galesville city, Wisconsin",1794),
("Gays Mills village, Wisconsin",488),
("Genoa village, Wisconsin",235),
("Genoa City village, Wisconsin",3009),
("Germantown village, Wisconsin",19997),
("Gibbsville CDP, Wisconsin",509),
("Gillett city, Wisconsin",1104),
("Gilman village, Wisconsin",379),
("Gilmanton CDP, Wisconsin",132),
("Glenbeulah village, Wisconsin",425),
("Glendale city, Wisconsin",13015),
("Glen Flora village, Wisconsin",97),
("Glen Haven CDP, Wisconsin",48),
("Glenwood City city, Wisconsin",1295),
("Glidden CDP, Wisconsin",323),
("Goodman CDP, Wisconsin",232),
("Gordon CDP, Wisconsin",175),
("Gotham CDP, Wisconsin",186),
("Grafton village, Wisconsin",11627),
("Grand Marsh CDP, Wisconsin",168),
("Grand View CDP, Wisconsin",83),
("Granton village, Wisconsin",326),
("Grantsburg village, Wisconsin",1343),
("Gratiot village, Wisconsin",261),
("Green Bay city, Wisconsin",104818),
("Greenbush CDP, Wisconsin",142),
("Greendale village, Wisconsin",14245),
("Greenfield city, Wisconsin",37083),
("Green Lake city, Wisconsin",828),
("Greenleaf CDP, Wisconsin",825),
("Green Valley CDP, Wisconsin",138),
("Greenwood city, Wisconsin",1009),
("Gresham village, Wisconsin",504),
("Hager City CDP, Wisconsin",274),
("Hales Corners village, Wisconsin",7666),
("Hammond village, Wisconsin",2020),
("Hancock village, Wisconsin",385),
("Hanover CDP, Wisconsin",167),
("Harrison village, Wisconsin",11761),
("Hartford city, Wisconsin",14761),
("Hartland village, Wisconsin",9260),
("Hatfield CDP, Wisconsin",199),
("Hatley village, Wisconsin",488),
("Haugen village, Wisconsin",334),
("Hawkins village, Wisconsin",381),
("Hayward city, Wisconsin",2533),
("Hazel Green village, Wisconsin",1002),
("Hebron CDP, Wisconsin",170),
("Helenville CDP, Wisconsin",234),
("Herbster CDP, Wisconsin",81),
("Hewitt village, Wisconsin",942),
("Highland village, Wisconsin",986),
("Hilbert village, Wisconsin",1004),
("Hillsboro city, Wisconsin",1318),
("Hingham CDP, Wisconsin",901),
("Hixton village, Wisconsin",455),
("Hobart village, Wisconsin",8606),
("Holcombe CDP, Wisconsin",198),
("Hollandale village, Wisconsin",261),
("Holmen village, Wisconsin",9909),
("Horicon city, Wisconsin",3623),
("Hortonville village, Wisconsin",2743),
("Houlton CDP, Wisconsin",212),
("Howard village, Wisconsin",19396),
("Howards Grove village, Wisconsin",3254),
("Hudson city, Wisconsin",13650),
("Humbird CDP, Wisconsin",270),
("Hurley city, Wisconsin",1533),
("Hustisford village, Wisconsin",991),
("Hustler village, Wisconsin",214),
("Independence city, Wisconsin",1579),
("Ingram village, Wisconsin",70),
("Iola village, Wisconsin",1179),
("Iron Belt CDP, Wisconsin",187),
("Iron Ridge village, Wisconsin",971),
("Iron River CDP, Wisconsin",661),
("Ironton village, Wisconsin",243),
("Ixonia CDP, Wisconsin",1925),
("Jackson village, Wisconsin",7020),
("Janesville city, Wisconsin",64128),
("Jefferson city, Wisconsin",7964),
("<NAME> CDP, Wisconsin",243),
("Johnson Creek village, Wisconsin",2969),
("Juda CDP, Wisconsin",256),
("Jump River CDP, Wisconsin",24),
("Junction City village, Wisconsin",428),
("Juneau city, Wisconsin",2686),
("Kaukauna city, Wisconsin",15998),
("Kekoskee village, Wisconsin",179),
("Kellnersville village, Wisconsin",346),
("Kendall village, Wisconsin",518),
("Kennan village, Wisconsin",113),
("Kenosha city, Wisconsin",99810),
("Keshena CDP, Wisconsin",1377),
("Kewaskum | |
<reponame>bopopescu/phyG
#!/usr/bin/env python
"""
Export a history to an archive file using attribute files.
usage: %prog history_attrs dataset_attrs job_attrs out_file
-G, --gzip: gzip archive file
"""
from __future__ import with_statement
import optparse, sys, os, tempfile, time, subprocess, shlex, tarfile, shutil
import pkg_resources
pkg_resources.require("simplejson")
import simplejson
class ManagedIndexer():
def __init__( self, output_file, infile, workingdir, rsync_url, tooldata ):
self.tooldatapath = os.path.abspath( tooldata )
self.workingdir = os.path.abspath( workingdir )
self.outfile = open( os.path.abspath( output_file ), 'w' )
self.basedir = os.path.split( self.workingdir )[0]
self.fasta = os.path.abspath( infile )
self.locations = dict( nt=[], cs=[] )
self.log = []
self.rsync_opts = '-aclSzq'
self.rsync_url = rsync_url
self.indexers = {
'bwa': '_bwa',
'bowtie': '_bowtie',
'bowtie2': '_bowtie2',
'2bit': '_twobit',
'perm': '_perm',
'bfast': '_bfast',
'picard': '_picard',
'sam': '_sam'
}
if not os.path.exists( self.workingdir ):
os.makedirs( self.workingdir )
self.logfile = open( os.path.join( self.workingdir, 'ManagedIndexer.log' ), 'w+' )
def run_indexer( self, indexer ):
self.fapath = self.fasta
self.fafile = os.path.basename( self.fapath )
self.genome = os.path.splitext( self.fafile )[0]
with WithChDir( self.basedir ):
if indexer not in self.indexers:
sys.stderr.write( 'The requested indexing function does not exist' )
exit(127)
else:
with WithChDir( self.workingdir ):
self._log( 'Running indexer %s.' % indexer )
result = getattr( self, self.indexers[ indexer ] )()
if result in [ None, False ]:
sys.stderr.write( 'Error running indexer %s, %s' % ( indexer, result ) )
self._flush_files()
exit(1)
else:
self._log( self.locations )
self._log( 'Indexer %s completed successfully.' % indexer )
self._flush_files()
exit(0)
def _check_link( self ):
self._log( 'Checking symlink to %s' % self.fafile )
if not os.path.exists( self.fafile ):
self._log( 'Symlink not found, creating' )
os.symlink( os.path.relpath( self.fapath ), self.fafile )
def _do_rsync( self, idxpath ):
self._log( 'Trying rsync at %s/%s%s' % ( self.rsync_url, self.genome, idxpath ) )
result = subprocess.call( shlex.split( 'rsync %s %s/%s%s .' % ( self.rsync_opts, self.rsync_url, self.genome, idxpath ) ), stderr=self.logfile )
if result != 0:
self._log( 'Rsync failed or index not found. Generating.' )
else:
self._log( 'Rsync succeeded.' )
return result
def _flush_files( self ):
simplejson.dump( self.locations, self.outfile )
self.outfile.close()
self.logfile.close()
def _log( self, stuff ):
timestamp = time.strftime('%Y-%m-%d %H:%M:%S %z')
self.logfile.write( "[%s] %s\n" % (timestamp, stuff) )
def _bwa( self ):
result = self._do_rsync( '/bwa_index/' )
if result == 0:
self.locations[ 'nt' ].append( self.fafile )
return self._bwa_cs()
else:
self._check_link()
command = shlex.split( 'bwa index -a bwtsw %s' % self.fafile )
result = subprocess.call( command, stderr=self.logfile, stdout=self.logfile )
if result != 0:
newcommand = shlex.split( 'bwa index -c %s' % self.fafile )
result = call( newcommand, stderr=self.logfile, stdout=self.logfile )
if result == 0:
self.locations[ 'nt' ].append( self.fafile )
os.remove( self.fafile )
return self._bwa_cs()
else:
self._log( 'BWA (base) exited with code %s' % result )
return False
def _bwa_cs( self ):
if not os.path.exists( os.path.join( self.workingdir, 'cs' ) ):
os.makedirs( 'cs' )
with WithChDir( 'cs' ):
self._check_link()
command = shlex.split( 'bwa index -a bwtsw -c %s' % self.fafile )
result = subprocess.call( command, stderr=self.logfile, stdout=self.logfile )
if result != 0:
newcommand = shlex.split( 'bwa index -c %s' % self.fafile )
result = call( newcommand, stderr=self.logfile, stdout=self.logfile )
if result == 0:
self.locations[ 'cs' ].append( self.fafile )
os.remove( self.fafile )
else:
self._log( 'BWA (color) exited with code %s' % result )
return False
else:
self.locations[ 'cs' ].append( self.fafile )
os.remove( self.fafile )
else:
self.locations[ 'cs' ].append( self.fafile )
temptar = tarfile.open( 'cs.tar', 'w' )
temptar.add( 'cs' )
temptar.close()
shutil.rmtree( 'cs' )
return True
def _bowtie( self ):
result = self._do_rsync( '/bowtie_index/' )
if result == 0:
self.locations[ 'nt' ].append( self.genome )
return self._bowtie_cs()
else:
self._check_link()
command = shlex.split( 'bowtie-build -f %s %s' % ( self.fafile, self.genome ) )
result = subprocess.call( command, stderr=self.logfile, stdout=self.logfile )
if result == 0:
self.locations[ 'nt' ].append( self.genome )
os.remove( self.fafile )
return self._bowtie_cs()
else:
self._log( 'Bowtie (base) exited with code %s' % result )
return False
def _bowtie_cs( self ):
indexdir = os.path.join( os.getcwd(), 'cs' )
if not ( os.path.exists( indexdir ) ):
os.makedirs( indexdir )
with WithChDir( indexdir ):
self._check_link()
command = shlex.split( 'bowtie-build -C -f %s %s' % ( self.fafile, self.genome ) )
result = subprocess.call( command, stderr=self.logfile, stdout=self.logfile )
if result == 0:
self.locations[ 'cs' ].append( self.genome )
else:
self._log( 'Bowtie (color) exited with code %s' % result )
return False
os.remove( os.path.join( indexdir, self.fafile ) )
else:
self.locations[ 'cs' ].append( self.genome )
temptar = tarfile.open( 'cs.tar', 'w' )
temptar.add( 'cs' )
temptar.close()
shutil.rmtree( 'cs' )
return True
def _bowtie2( self ):
result = self._do_rsync( '/bowtie2_index/' )
if result == 0:
self.locations[ 'nt' ].append( self.fafile )
return True
ref_base = os.path.splitext(self.fafile)[0]
self._check_link()
command = shlex.split( 'bowtie2-build %s %s' % ( self.fafile, ref_base ) )
result = subprocess.call( command, stderr=self.logfile, stdout=self.logfile )
if result == 0:
self.locations[ 'nt' ].append( ref_base )
os.remove( self.fafile )
return True
else:
self._log( 'Bowtie2 exited with code %s' % result )
return False
def _twobit( self ):
"""Index reference files using 2bit for random access.
"""
result = self._do_rsync( '/seq/%s.2bit' % self.genome )
if result == 0:
self.locations['nt'].append( "%s.2bit" % self.genome )
return True
else:
out_file = "%s.2bit" % self.genome
self._check_link()
command = shlex.split( 'faToTwoBit %s %s' % ( self.fafile, out_file ) )
result = subprocess.call( command, stderr=self.logfile, stdout=self.logfile )
if result == 0:
self.locations['nt'].append( out_file )
os.remove( self.fafile )
return True
else:
self._log( 'faToTwoBit exited with code %s' % result )
return False
def _perm( self ):
result = self._do_rsync( '/perm_index/' )
self._check_link()
genome = self.genome
read_length = 50
for seed in [ 'F3', 'F4' ]:
key = '%s_%s_%s' % (self.genome, seed, read_length)
desc = '%s: seed=%s, read length=%s' % (self.genome, seed, read_length)
index = "%s_base_%s_%s.index" % (self.genome, seed, read_length)
if not os.path.exists( index ):
command = shlex.split("PerM %s %s --readFormat fastq --seed %s -m -s %s" % (self.fafile, read_length, seed, index))
result = subprocess.call( command )
if result != 0:
self._log( 'PerM (base) exited with code %s' % result )
return False
self.locations[ 'nt' ].append( [ key, desc, index ] )
os.remove( self.fafile )
return self._perm_cs()
def _perm_cs( self ):
genome = self.genome
read_length = 50
if not os.path.exists( 'cs' ):
os.makedirs( 'cs' )
with WithChDir( 'cs' ):
self._check_link()
for seed in [ 'F3', 'F4' ]:
key = '%s_%s_%s' % (genome, seed, read_length)
desc = '%s: seed=%s, read length=%s' % (genome, seed, read_length)
index = "%s_color_%s_%s.index" % (genome, seed, read_length)
if not os.path.exists( index ):
command = shlex.split("PerM %s %s --readFormat csfastq --seed %s -m -s %s" % (self.fafile, read_length, seed, index))
result = subprocess.call( command, stderr=self.logfile, stdout=self.logfile )
if result != 0:
self._log( 'PerM (color) exited with code %s' % result )
return False
self.locations[ 'cs' ].append( [ key, desc, index ] )
os.remove( self.fafile )
temptar = tarfile.open( 'cs.tar', 'w' )
temptar.add( 'cs' )
temptar.close()
shutil.rmtree( 'cs' )
return True
def _picard( self ):
result = self._do_rsync( '/srma_index/' )
if result == 0 and os.path.exists( '%s.dict' % self.genome):
self.locations[ 'nt' ].append( self.fafile )
return True
local_ref = self.fafile
srma = os.path.abspath( os.path.join( self.tooldatapath, 'shared/jars/picard/CreateSequenceDictionary.jar' ) )
genome = os.path.splitext( self.fafile )[0]
self._check_link()
if not os.path.exists( '%s.fai' % self.fafile ) and not os.path.exists( '%s.fai' % self.genome ):
command = shlex.split( 'samtools faidx %s' % self.fafile )
subprocess.call( command, stderr=self.logfile )
command = shlex.split( "java -jar %s R=%s O=%s.dict URI=%s" \
% ( srma, local_ref, genome, local_ref ) )
if not os.path.exists( '%s.dict' % self.genome ):
result = subprocess.call( command, stderr=self.logfile, stdout=self.logfile )
self._log( ' '.join( command ) )
if result != 0:
self._log( 'Picard exited with code %s' % result )
return False
self.locations[ 'nt' ].append( self.fafile )
os.remove( self.fafile )
return True
def _sam( self ):
local_ref = self.fafile
local_file = os.path.splitext( self.fafile )[ 0 ]
print 'Trying rsync'
result = self._do_rsync( '/sam_index/' )
if result == 0 and ( os.path.exists( '%s.fai' % self.fafile ) or os.path.exists( '%s.fai' % self.genome ) ):
self.locations[ 'nt' ].append( '%s.fai' % local_ref )
return True
self._check_link()
print 'Trying indexer'
command = shlex.split("samtools faidx %s" | |
plt.gca().add_artist(sim_legend)
fig.subplots_adjust(wspace=0.3)
ffig = os.path.join(fig_dir, 'abc_slope_AV.%s.png' % gal_type)
fig.savefig(ffig, bbox_inches='tight')
fig.savefig(fig_tex(ffig, pdf=True), bbox_inches='tight')
plt.close()
return None
def ABC_slope_AV_subpop():
''' comparison of slope to A_V relation marking the different
subpopulations
'''
wave = np.linspace(1000, 10000, 451)
i1500 = 25
i3000 = 100
i5500 = 225
fig = plt.figure(figsize=(12,10))
for i, sim in enumerate(['TNG', 'EAGLE']):
sub1 = fig.add_subplot(2,2,2*i+1)
sub2 = fig.add_subplot(2,2,2*i+2)
sub1.text(0.05, 0.95, sim, transform=sub1.transAxes, fontsize=20, ha='left', va='top')
# read sim
# get abc posterior
theta_T = np.loadtxt(os.path.join(os.environ['GALPOPFM_DIR'], 'abc',
abc_run(sim.lower()), 'theta.t%i.dat' % nabc[sim.lower()]))
theta_median = np.median(theta_T, axis=0)
x_sim, _sim, _sfr0 = _sim_observables(sim.lower(), theta_median)
# galaxies with low M* and high SFR
veryhighSFR = (_sim['logsfr.inst'] - _sim['logmstar'] > -9.75)
highSFR = ((_sim['logsfr.inst'] - _sim['logmstar'] < -9.75) &
(_sim['logsfr.inst'] - _sim['logmstar'] > -10.5))
lowSFR = ((_sim['logsfr.inst'] - _sim['logmstar'] < -10.5) &
(_sim['logsfr.inst'] - _sim['logmstar'] > -11.))
verylowSFR = ((_sim['logsfr.inst'] - _sim['logmstar'] < -11.) &
(_sim['logsfr.inst'] - _sim['logmstar'] > -12.))
veryverylowSFR = (_sim['logsfr.inst'] - _sim['logmstar'] < -12.)
lowmass = (_sim['logmstar'] < 10.5)
highmass = (_sim['logmstar'] >= 10.5)
subpops = [veryhighSFR, highSFR, lowSFR, verylowSFR, veryverylowSFR][::-1]
subclrs = ['C0', 'C2', 'C1', 'r', 'C4'][::-1]
# get attenuation curve
_A_lambda = dem_attenuate(
theta_median,
wave,
np.ones(len(wave)),
_sim['logmstar'],
_sim['logsfr.inst'])#, mstar[subpop], sfr[subpop], nebular=False)
A_lambda = -2.5 * np.log10(_A_lambda)
A_V = A_lambda[:,i5500]
S = A_lambda[:,i1500]/A_V
delta_median = theta_median[3] * (_sim['logmstar'] - 10.) +\
theta_median[4] * _sim['logsfr.inst'] + theta_median[5]
DFM.hist2d(A_V, S, range=[(0., 1.4), (0., 14.4)],
levels=[0.68, 0.95],
bins=10, color='k',
plot_datapoints=False, fill_contours=False, plot_density=False,
ax=sub1)
DFM.hist2d(A_V, delta_median, range=[(0., 1.4), (-1.5, 1.)],
levels=[0.68, 0.95],
bins=10, color='k', #contour_kwargs={'linewidths': 0},
plot_datapoints=False, fill_contours=False, plot_density=False,
ax=sub2)
for subpop, clr in zip(subpops, subclrs):
sub1.scatter(A_V[subpop & lowmass], S[subpop & lowmass], c=clr, s=3)
sub1.scatter(A_V[subpop & highmass], S[subpop & highmass], c=clr,
s=10, marker='o', edgecolors='k')
sub2.scatter(A_V[subpop & lowmass], delta_median[subpop & lowmass], c=clr, s=3)
sub2.scatter(A_V[subpop & highmass], delta_median[subpop & highmass],
c=clr, s=10, marker='o', edgecolors='k')
sub1.fill_between(np.linspace(0., 1.4, 100),
10**(-0.68 * np.log10(np.linspace(0., 1.4, 100))+0.424-0.12),
10**(-0.68 * np.log10(np.linspace(0., 1.4, 100))+0.424+0.12),
color='k', alpha=0.25, linewidth=0, label='Salim\&Narayanan(2020)')
sub2.fill_between(
[0.100, 0.200, 0.300, 0.400, 0.500, 0.600, 0.700, 0.800, 0.900,
1.000, 1.100, 1.200, 1.300],
[-1.030, -0.830, -0.636, -0.519, -0.440, -0.381, -0.341, -0.286,
-0.198, -0.103, -0.035, 0.054, 0.118],
[-0.499, -0.270, -0.111, -0.007, 0.054, 0.112, 0.134, 0.144, 0.169,
0.196, 0.236, 0.282, 0.283],
facecolor='k', alpha=0.1, hatch='X', edgecolor='k', linewidth=0., label='Salim+(2018)')
sub1.set_xlim(0.1, 1.4)
sub1.set_ylabel('$S = A_{1500}/A_V$', fontsize=25)
sub1.set_ylim(0., 14.4)
sub2.set_xlim(0.1, 1.4)
sub2.set_ylabel('$\delta$', fontsize=25)
sub2.set_ylim(-1.6, 1.5)
sub1.set_xlabel(r'$A_V$', fontsize=25)
sub2.set_xlabel(r'$A_V$', fontsize=25)
#sub1.legend(loc='upper right', handletextpad=0.1, fontsize=18)
fig.subplots_adjust(wspace=0.3)
ffig = os.path.join(fig_dir, 'abc_slope_AV_subpop.png')
fig.savefig(ffig, bbox_inches='tight')
return None
def _ABC_slope_AV_quiescent():
''' comparison of slope to A_V
'''
wave = np.linspace(1000, 10000, 451)
i1500 = 25
i3000 = 100
i5500 = 225
fig = plt.figure(figsize=(12,5))
sub1 = fig.add_subplot(121)
sub2 = fig.add_subplot(122)
for isim, sim, iabc in zip(range(len(sims))[1:], sims[1:], nabc[1:]):
# read sim
# get abc posterior
theta_T = np.loadtxt(os.path.join(os.environ['GALPOPFM_DIR'], 'abc',
abc_run(sim), 'theta.t%i.dat' % iabc))
theta_median = np.median(theta_T, axis=0)
x_sim, sfr0_sim, _sim = _sim_observables(sim.lower(), theta_median,
zero_sfr_sample=True, return_sim=True)
# get attenuation curve
_A_lambda = dem_attenuate(
theta_median,
wave,
np.ones(len(wave)),
_sim['logmstar'],
_sim['logsfr.inst'], # mstar[subpop], sfr[subpop],
nebular=False)
A_lambda = -2.5 * np.log10(_A_lambda)
A_V = A_lambda[:,i5500]
S = A_lambda[:,i1500]/A_V
quiescent = (_sim['logsfr.inst'] - _sim['logmstar'] < -11.5) & ~sfr0_sim
print('%i quiescent galaxies' % np.sum(quiescent))
delta_median = theta_median[3] * (_sim['logmstar'] - 10.) +\
theta_median[4] * _sim['logsfr.inst'] + theta_median[5]
DFM.hist2d(A_V, S, levels=[0.68, 0.95],
range=[(0., 2.), (0., 14.4)], bins=10, color=clrs[isim], #contour_kwargs={'linewidths': 0},
plot_datapoints=False, fill_contours=True, plot_density=False, ax=sub1)
sub1.scatter(A_V[quiescent], S[quiescent], c='r')
A_V = A_lambda[:,i5500]
DFM.hist2d(A_V, delta_median, levels=[0.68, 0.95],
range=[(0., 2.), (-1.5, 1.)], bins=10, color=clrs[isim], #contour_kwargs={'linewidths': 0},
plot_datapoints=False, fill_contours=True, plot_density=False, ax=sub2)
sub2.scatter(A_V[quiescent], delta_median[quiescent], c='r')
sub1.set_xlabel(r'$A_V$', fontsize=25)
sub1.set_xlim(0.1, 2.)
sub1.set_ylabel('$S = A_{1500}/A_V$', fontsize=25)
sub1.set_ylim(0., 14.4)
sub2.set_xlabel(r'$A_V$', fontsize=25)
sub2.set_xlim(0.1, 2.)
sub2.set_ylabel('$\delta$', fontsize=25)
sub2.set_ylim(-1.5, 1.)
# sim legends
_plt_sims = []
for i in range(1,3):
_plt_sim = sub2.fill_between([], [], [], color=clrs[i], alpha=0.25,
linewidth=0)
_plt_sims.append(_plt_sim)
sim_legend = sub2.legend(_plt_sims, sims[1:], loc='lower right',
handletextpad=0.1, prop={'size': 20})
plt.gca().add_artist(sim_legend)
fig.subplots_adjust(wspace=0.3)
ffig = os.path.join(fig_dir, '_abc_slope_AV_quiescent.png')
fig.savefig(ffig, bbox_inches='tight')
plt.close()
return None
def ABC_slope_MSFR():
''' comparison of slope on the M*-SFR plane
'''
wave = np.linspace(1000, 10000, 451)
i1500 = 25
i3000 = 100
i5500 = 225
fig = plt.figure(figsize=(10,5))
for isim, sim, iabc in zip(range(len(sims))[1:], sims[1:], nabc[1:]):
sub = fig.add_subplot(1,2,isim)
# read sim
# get abc posterior
theta_T = np.loadtxt(os.path.join(os.environ['GALPOPFM_DIR'], 'abc',
abc_run(sim), 'theta.t%i.dat' % iabc))
theta_median = np.median(theta_T, axis=0)
x_sim, sfr0_sim, _sim = _sim_observables(sim.lower(), theta_median,
zero_sfr_sample=True, return_sim=True)
delta_median = theta_median[3] * (_sim['logmstar'] - 10.) +\
theta_median[4] * _sim['logsfr.inst'] + theta_median[5]
sc = sub.scatter(_sim['logmstar'][~sfr0_sim],
_sim['logsfr.inst'][~sfr0_sim]-_sim['logmstar'][~sfr0_sim],
c=delta_median[~sfr0_sim], vmin=-1.5, vmax=1.)
sub.set_xlim([9., 12.])
sub.set_ylim([-14., -8.])
if isim != 1: sub.set_yticklabels([])
sub.set_xticklabels([9., '', 10., '', 11.])
sub.text(0.05, 0.95, sim, transform=sub.transAxes, fontsize=20, ha='left', va='top')
bkgd = fig.add_subplot(111, frameon=False)
bkgd.tick_params(labelcolor='none', top=False, bottom=False, left=False, right=False)
bkgd.set_xlabel(r'log ( $M_* \;\;[M_\odot]$ )', labelpad=15, fontsize=25)
bkgd.set_ylabel(r'log ( sSFR $[yr^{-1}]$ )', labelpad=15, fontsize=25)
fig.subplots_adjust(wspace=0.1, right=0.8)
cbar_ax = fig.add_axes([0.85, 0.15, 0.05, 0.7])
fig.colorbar(sc, cax=cbar_ax)
ffig = os.path.join(fig_dir, 'abc_slope_msfr.png')
fig.savefig(ffig, bbox_inches='tight')
plt.close()
return None
def ABC_attenuation():
''' comparison of attenuation curves of DEM models to standard attenuation curves in
the literature.
todo:
* compile the following attenuation curves:
* Cardelli+(1989) MW
* Wild+(2011)
* Kriek & Conroy (2013)
* Reddy+(2015)
'''
def _salim2018(_lam, logm, logsfr):
# Salim(2018) table 1 and Eq. 10
logssfr = logsfr - logm
lam = _lam/10000.
if logssfr < -11: # quiescent
RV = 2.61
B = 2.21
a0 = -3.72
a1 = 2.20
a2 = -0.062
a3 = 0.0080
elif logm > 9.5 and logm < 10.5:
RV = 2.99
B = 1.73
a0 = -4.13
a1 = 2.56
a2 = -0.153
a3 = 0.0105
elif logm > 10.5:
RV = 3.47
B = 1.09
a0 = -4.66
a1 = 3.03
a2 = -0.271
a3 = 0.0147
Dl = B * lam**2 * 0.035**2 / ((lam**2 - 0.2175**2)**2 + lam**2 * 0.035**2)
kl = a0 + a1/lam + a2 / lam**2 + a3/lam**3 + Dl + RV
return kl / RV
def _calzetti(lam):
return dustFM.calzetti_absorption(lam)
# Battisti+(2017) Eq. 9
def _battisti2017(_lam):
lam = np.atleast_1d(_lam/1e4)
x = 1./lam
lowlam = (lam < 0.63)
highlam = (lam >= 0.63)
kl = np.zeros(len(lam))
kl[lowlam] = 2.40 * (-2.488 + 1.803 * x[lowlam] - 0.261 * x[lowlam]**2 + 0.0145 *
x[lowlam]**3) + 3.67
kl[highlam] = 2.30 * (-1.996 + 1.135 * x[highlam] - 0.0124 * x[highlam]**2) + 3.67
return kl
# read Narayanan+(2018) attenuation curves
fnara = os.path.join(dat_dir, 'obs', 'narayanan_median_Alambda.dat.txt')
_wave_n2018, av_n2018 = np.loadtxt(fnara, skiprows=1, unpack=True, usecols=[0, 1])
wave_n2018 = 1e4/_wave_n2018
## read SMC from Pei(1992)
#fsmc = os.path.join(dat_dir, 'obs', 'pei1992_smc.txt')
#_1_lam, E_ratio = np.loadtxt(fsmc, skiprows=1, unpack=True, usecols=[0, 1])
#wave_smc = 1e4/_1_lam
#RV_smc = 2.93
#Asmc = (E_ratio + RV_smc)/(1+RV_smc)
## normalize at 3000
#Asmc3000 = np.interp([3000.], wave_smc, Asmc)[0]
#Asmc /= Asmc3000
wave = np.linspace(1000, 10000, 2251)
i3000 = 500
theta_meds, sim_seds = [], []
for sim in ['TNG', 'EAGLE']:
# get abc posterior
theta_T = np.loadtxt(os.path.join(os.environ['GALPOPFM_DIR'], 'abc',
abc_run(sim.lower()), 'theta.t%i.dat' % nabc[sim.lower()]))
theta_median = np.median(theta_T, axis=0)
theta_meds.append(theta_median)
# get sims posterior
_, _sim_sed, _ = _sim_observables(sim.lower(), theta_median)
sim_seds.append(_sim_sed)
fig = plt.figure(figsize=(11,8))
# SF or Q
for isfq, _sfq in enumerate(['star-forming', 'quiescent']):
# low or high mass
for im, _m in enumerate(['low mass', 'high mass']):
sub = fig.add_subplot(2,2, 2 * im + isfq + 1)
for i, sim in enumerate(['TNG', 'EAGLE']):
# get abc posterior
theta_median = theta_meds[i]
_sim_sed = sim_seds[i]
mstar = _sim_sed['logmstar']
sfr = | |
limit = localLoanToken.transactionLimit(SUSD)
print("DOC limit, ",limit)
limit = localLoanToken.transactionLimit(USDT)
print("USDT limit, ",limit)
limit = localLoanToken.transactionLimit(BPro)
print("BPro limit, ",limit)
def readLiquidity():
loanToken = Contract.from_abi("loanToken", address=contracts['iRBTC'], abi=LoanTokenLogicStandard.abi, owner=acct)
tasRBTC = loanToken.totalAssetSupply()
tabRBTC = loanToken.totalAssetBorrow()
print("liquidity on iRBTC", (tasRBTC-tabRBTC)/1e18)
loanToken = Contract.from_abi("loanToken", address=contracts['iDOC'], abi=LoanTokenLogicStandard.abi, owner=acct)
tasIUSD = loanToken.totalAssetSupply()
tabIUSD = loanToken.totalAssetBorrow()
print("liquidity on iDOC", (tasIUSD-tabIUSD)/1e18)
loanToken = Contract.from_abi("loanToken", address=contracts['iUSDT'], abi=LoanTokenLogicStandard.abi, owner=acct)
tasIUSD = loanToken.totalAssetSupply()
tabIUSD = loanToken.totalAssetBorrow()
print("liquidity on iUSDT", (tasIUSD-tabIUSD)/1e18)
tokenContract = Contract.from_abi("Token", address=contracts['USDT'], abi=TestToken.abi, owner=acct)
bal = tokenContract.balanceOf(contracts['ConverterUSDT'])
print("supply of USDT on swap", bal/1e18)
tokenContract = Contract.from_abi("Token", address=contracts['WRBTC'], abi=TestToken.abi, owner=acct)
bal = tokenContract.balanceOf(contracts['ConverterUSDT'])
print("supply of rBTC on swap", bal/1e18)
def hasApproval(tokenContractAddr, sender, receiver):
tokenContract = Contract.from_abi("Token", address=tokenContractAddr, abi=TestToken.abi, owner=sender)
allowance = tokenContract.allowance(sender, receiver)
print("allowance: ", allowance/1e18)
return allowance
def checkIfUserHasToken(EAT, user):
tokenContract = Contract.from_abi("Token", address=EAT, abi=TestToken.abi, owner=user)
balance = tokenContract.balanceOf(user)
print("balance: ", balance)
def readLendingBalanceForUser(loanTokenAddress, userAddress):
loanToken = Contract.from_abi("loanToken", address=loanTokenAddress, abi=LoanTokenLogicStandard.abi, owner=userAddress)
bal = loanToken.balanceOf(userAddress)
print('iToken balance', bal)
bal = loanToken.assetBalanceOf(userAddress)
print('underlying token balance', bal)
# def replaceLoanTokenLogic(loanTokenAddress, logicAddress):
# loanToken = Contract.from_abi("loanToken", address=loanTokenAddress, abi=LoanToken.abi, owner=acct)
# loanToken.setTarget(logicAddress)
def readOwner(contractAddress):
contract = Contract.from_abi("loanToken", address=contractAddress, abi=LoanToken.abi, owner=acct)
print('owner:',contract.owner())
def checkOwnerIsAddress(contractAddress, expectedOwner):
contract = Contract.from_abi("loanToken", address=contractAddress, abi=LoanToken.abi, owner=acct)
owner = contract.owner()
print("owner == expectedOwner?", owner == expectedOwner)
'''
sets a collateral token address as collateral for margin trading
'''
def setupMarginLoanParams(collateralTokenAddress, loanTokenAddress):
loanToken = Contract.from_abi("loanToken", address=loanTokenAddress, abi=LoanTokenLogicStandard.abi, owner=acct)
params = [];
setup = [
b"0x0", ## id
False, ## active
acct, ## owner
"0x0000000000000000000000000000000000000000", ## loanToken -> will be overwritten
collateralTokenAddress, ## collateralToken.
Wei("20 ether"), ## minInitialMargin
Wei("15 ether"), ## maintenanceMargin
0 ## fixedLoanTerm -> will be overwritten
]
params.append(setup)
tx = loanToken.setupLoanParams(params, False)
print(tx.info())
def swapTokens(amount, minReturn, swapNetworkAddress, sourceTokenAddress, destTokenAddress):
abiFile = open('./scripts/contractInteraction/SovrynSwapNetwork.json')
abi = json.load(abiFile)
swapNetwork = Contract.from_abi("SovrynSwapNetwork", address=swapNetworkAddress, abi=abi, owner=acct)
sourceToken = Contract.from_abi("Token", address=sourceTokenAddress, abi=TestToken.abi, owner=acct)
contract = Contract.from_abi("WRBTC", address=contracts["WRBTC"], abi=WRBTC.abi, owner=acct)
tx = contract.deposit({'value':amount})
tx.info()
if(sourceToken.allowance(acct, swapNetworkAddress) < amount):
sourceToken.approve(swapNetworkAddress,amount)
path = swapNetwork.conversionPath(sourceTokenAddress,destTokenAddress)
print("path", path)
expectedReturn = swapNetwork.getReturnByPath(path, amount)
print("expected return ", expectedReturn)
tx = swapNetwork.convertByPath(
path,
amount,
minReturn,
"0x0000000000000000000000000000000000000000",
"0x0000000000000000000000000000000000000000",
0
)
tx.info()
def readFromMedianizer():
medianizer = Contract.from_abi("Medianizer", address=contracts['medianizer'], abi=PriceFeedsMoCMockup.abi, owner=acct)
print(medianizer.peek())
def updateOracleAddress(newAddress):
print("set oracle address to", newAddress)
priceFeedsMoC = Contract.from_abi("PriceFeedsMoC", address = '0x066ba9453e230a260c2a753d9935d91187178C29', abi = PriceFeedsMoC.abi, owner = acct)
priceFeedsMoC.setMoCOracleAddress(newAddress)
def addLiquidity(converter, reserve, amount):
abiFile = open('./scripts/contractInteraction/LiquidityPoolV2Converter.json')
abi = json.load(abiFile)
converter = Contract.from_abi("LiquidityPoolV2Converter", address=converter, abi=abi, owner=acct)
print("is active? ", converter.isActive())
print("price oracle", converter.priceOracle())
tx = converter.addLiquidity(reserve, amount, 1)
print(tx)
def deployMultisig(owners, requiredConf):
multisig = acct.deploy(MultiSigWallet, owners, requiredConf)
print("multisig:", multisig)
def setupLoanParamsForCollaterals(loanTokenAddress, collateralAddresses):
loanToken = Contract.from_abi("loanToken", address=loanTokenAddress, abi=LoanTokenLogicStandard.abi, owner=acct)
marginParams = []
torqueParams = []
for collateralAddress in collateralAddresses:
marginData = [
b"0x0", ## id
False, ## active
str(acct), ## owner
"0x0000000000000000000000000000000000000000", ## loanToken -> will be overwritten
collateralAddress, ## collateralToken.
Wei("20 ether"), ## minInitialMargin -> 20% (allows up to 5x leverage)
Wei("15 ether"), ## maintenanceMargin -> 15%, below liquidation
0 ## fixedLoanTerm -> will be overwritten with 28 days
]
torqueData = copy.deepcopy(marginData)
torqueData[5] = Wei("50 ether")
print(torqueData)
marginParams.append(marginData)
torqueParams.append(torqueData)
#configure the token settings, and set the setting contract address at the loan token logic contract
dataM = loanToken.setupLoanParams.encode_input(marginParams, False)
dataT = loanToken.setupLoanParams.encode_input(torqueParams, True)
multisig = Contract.from_abi("MultiSig", address=contracts['multisig'], abi=MultiSigWallet.abi, owner=acct)
tx = multisig.submitTransaction(loanToken.address,0,dataM)
txId = tx.events["Submission"]["transactionId"]
print("txid",txId);
tx = multisig.submitTransaction(loanToken.address,0,dataT)
txId = tx.events["Submission"]["transactionId"]
print("txid",txId);
def updatePriceFeedToRSKOracle():
newPriceFeed = acct.deploy(PriceFeedRSKOracle, contracts['RSKOracle'])
print("new price feed: ", newPriceFeed)
feeds = Contract.from_abi("PriceFeeds", address= contracts['PriceFeeds'], abi = PriceFeeds.abi, owner = acct)
feeds.setPriceFeed([contracts['WRBTC']], [newPriceFeed.address])
def updatePriceFeedToMOCOracle():
newPriceFeed = acct.deploy(PriceFeedsMoC, contracts['medianizer'], contracts['RSKOracle'])
print("new price feed: ", newPriceFeed)
feeds = Contract.from_abi("PriceFeeds", address= contracts['PriceFeeds'], abi = PriceFeeds.abi, owner = acct)
data = feeds.setPriceFeed.encode_input([contracts['WRBTC']], [newPriceFeed.address])
multisig = Contract.from_abi("MultiSig", address=contracts['multisig'], abi=MultiSigWallet.abi, owner=acct)
tx = multisig.submitTransaction(feeds.address,0,data)
txId = tx.events["Submission"]["transactionId"]
print("txid",txId);
def readPrice(source, destination):
feeds = Contract.from_abi("PriceFeeds", address= contracts['PriceFeeds'], abi = PriceFeeds.abi, owner = acct)
rate = feeds.queryRate(source, destination)
print('rate is ', rate)
return rate[0]
def readSwapRate(source, destination):
abiFile = open('./scripts/contractInteraction/SovrynSwapNetwork.json')
abi = json.load(abiFile)
swapNetwork = Contract.from_abi("SovrynSwapNetwork", address=contracts['swapNetwork'], abi=abi, owner=acct)
path = swapNetwork.conversionPath(source,destination)
#print("path:", path)
expectedReturn = swapNetwork.getReturnByPath(path, 1e18)
print('rate is ', expectedReturn)
return expectedReturn[0]
def readConversionFee(converterAddress):
abiFile = open('./scripts/contractInteraction/LiquidityPoolV1Converter.json')
abi = json.load(abiFile)
converter = Contract.from_abi("Converter", address=converterAddress, abi=abi, owner=acct)
fee = converter.conversionFee()
print('fee is ', fee)
def readPriceFromOracle(oracleAddress):
oracle = Contract.from_abi("Oracle", address=oracleAddress, abi=PriceFeedsMoC.abi, owner=acct)
price = oracle.latestAnswer()
print('rate is ', price)
def readTargetWeights(converter, reserve):
abiFile = open('./scripts/contractInteraction/LiquidityPoolV2Converter.json')
abi = json.load(abiFile)
converter = Contract.from_abi("LiquidityPoolV2Converter", address=converter, abi=abi, owner=acct)
res = converter.reserves(reserve).dict()
print(res)
print('target weight is ',res['weight'])
def updateContracts():
replaceSwapsImplSovrynSwap()
replaceSwapsUser()
replaceLoanOpenings()
replaceLoanTokenLogicOnAllContracts()
def replaceSwapsExternal():
#swapsExternal = acct.deploy(SwapsExternal)
sovryn = Contract.from_abi("sovryn", address=contracts['sovrynProtocol'], abi=interface.ISovrynBrownie.abi, owner=acct)
data = sovryn.replaceContract.encode_input('0xAa1dEDE8C097349Dd25C98A0bF79c8D9B6e55caf')
multisig = Contract.from_abi("MultiSig", address=contracts['multisig'], abi=MultiSigWallet.abi, owner=acct)
tx = multisig.submitTransaction(sovryn.address,0,data)
txId = tx.events["Submission"]["transactionId"]
print(txId);
def replaceLoanOpenings():
print("replacing loan openings")
loanOpenings = acct.deploy(LoanOpenings)
sovryn = Contract.from_abi("sovryn", address=contracts['sovrynProtocol'], abi=interface.ISovrynBrownie.abi, owner=acct)
data = sovryn.replaceContract.encode_input(loanOpenings.address)
multisig = Contract.from_abi("MultiSig", address=contracts['multisig'], abi=MultiSigWallet.abi, owner=acct)
tx = multisig.submitTransaction(sovryn.address,0,data)
txId = tx.events["Submission"]["transactionId"]
print(txId);
def replaceSwapsUser():
print("replacing swaps user")
swapsUser = acct.deploy(SwapsUser)
sovryn = Contract.from_abi("sovryn", address=contracts['sovrynProtocol'], abi=interface.ISovrynBrownie.abi, owner=acct)
data = sovryn.replaceContract.encode_input(swapsUser.address)
multisig = Contract.from_abi("MultiSig", address=contracts['multisig'], abi=MultiSigWallet.abi, owner=acct)
tx = multisig.submitTransaction(sovryn.address,0,data)
txId = tx.events["Submission"]["transactionId"]
print(txId);
def replaceSwapsImplSovrynSwap():
print("replacing swaps")
swaps = acct.deploy(SwapsImplSovrynSwap)
sovryn = Contract.from_abi("sovryn", address=contracts['sovrynProtocol'], abi=interface.ISovrynBrownie.abi, owner=acct)
data = sovryn.setSwapsImplContract.encode_input(swaps.address)
multisig = Contract.from_abi("MultiSig", address=contracts['multisig'], abi=MultiSigWallet.abi, owner=acct)
tx = multisig.submitTransaction(sovryn.address,0,data)
txId = tx.events["Submission"]["transactionId"]
print(txId);
def replaceLoanTokenLogicOnAllContracts():
print("replacing loan token logic")
logicContract = acct.deploy(LoanTokenLogicLM)
print('new LoanTokenLogicStandard contract for iDoC:' + logicContract.address)
replaceLoanTokenLogic(contracts['iDOC'],logicContract.address)
replaceLoanTokenLogic(contracts['iUSDT'],logicContract.address)
replaceLoanTokenLogic(contracts['iBPro'],logicContract.address)
replaceLoanTokenLogic(contracts['iXUSD'],logicContract.address)
logicContract = acct.deploy(LoanTokenLogicWrbtc)
print('new LoanTokenLogicStandard contract for iWRBTC:' + logicContract.address)
replaceLoanTokenLogic(contracts['iRBTC'], logicContract.address)
def replaceLoanTokenLogic(loanTokenAddress, logicAddress):
loanToken = Contract.from_abi("loanToken", address=loanTokenAddress, abi=LoanToken.abi, owner=acct)
data = loanToken.setTarget.encode_input(logicAddress)
multisig = Contract.from_abi("MultiSig", address=contracts['multisig'], abi=MultiSigWallet.abi, owner=acct)
tx = multisig.submitTransaction(loanToken.address,0,data)
txId = tx.events["Submission"]["transactionId"]
print(txId)
def setLiquidityMiningAddressOnAllContracts():
print("setting LM address")
setLiquidityMiningAddress(contracts['iDOC'])
setLiquidityMiningAddress(contracts['iUSDT'])
setLiquidityMiningAddress(contracts['iBPro'])
setLiquidityMiningAddress(contracts['iXUSD'])
setLiquidityMiningAddress(contracts['iRBTC'])
def setLiquidityMiningAddress(loanTokenAddress):
loanToken = Contract.from_abi("loanToken", address=loanTokenAddress, abi=LoanTokenLogicLM.abi, owner=acct)
data = loanToken.setLiquidityMiningAddress.encode_input(contracts['LiquidityMiningProxy'])
multisig = Contract.from_abi("MultiSig", address=contracts['multisig'], abi=MultiSigWallet.abi, owner=acct)
tx = multisig.submitTransaction(loanToken.address,0,data)
txId = tx.events["Submission"]["transactionId"]
print(txId)
def getLiquidityMiningAddressOnAllContracts():
print("setting LM address")
getLiquidityMiningAddress(contracts['iDOC'])
getLiquidityMiningAddress(contracts['iUSDT'])
getLiquidityMiningAddress(contracts['iBPro'])
getLiquidityMiningAddress(contracts['iRBTC'])
def getLiquidityMiningAddress(loanTokenAddress):
loanToken = Contract.from_abi("loanToken", address=loanTokenAddress, abi=LoanTokenLogicLM.abi, owner=acct)
print(loanToken.liquidityMiningAddress())
print(loanToken.target_())
def checkRates():
print('reading price from WRBTC to DOC')
readPrice(contracts['WRBTC'], contracts['DoC'])
print('reading price from WRBTC to USDT')
readPrice(contracts['WRBTC'], contracts['USDT'])
print('reading price from WRBTC to BPRO')
readPrice(contracts['WRBTC'], contracts['BPro'])
print('read price from USDT to DOC')
readPrice(contracts['USDT'], contracts['DoC'])
print('read swap rate from WRBTC to DOC')
readSwapRate(contracts['WRBTC'], contracts['DoC'])
print('read swap rate from WRBTC to USDT')
readSwapRate(contracts['WRBTC'], contracts['USDT'])
print('read swap rate from WRBTC to BPRO')
readSwapRate(contracts['WRBTC'], contracts['BPro'])
print('read swap rate from USDT to DOC')
readSwapRate(contracts['USDT'], contracts['DoC'])
print('read swap rate from BPro to DOC')
readSwapRate(contracts['BPro'], contracts['DoC'])
print('read swap rate from BPro to USDT')
readSwapRate(contracts['BPro'], contracts['USDT'])
print('read swap rate from USDT to WRBTC')
readSwapRate(contracts['USDT'], contracts['WRBTC'])
print('read swap rate from DOC to WRBTC')
readSwapRate(contracts['DoC'], contracts['WRBTC'])
print("price from the USDT oracle on AMM:")
readPriceFromOracle('0x78F0b35Edd78eD564830c45F4A22e4b553d7f042')
readTargetWeights('0x133eBE9c8bA524C9B1B601E794dF527f390729bF', contracts['USDT'])
readTargetWeights('0x133eBE9c8bA524C9B1B601E794dF527f390729bF', contracts['WRBTC'])
def addOwnerToMultisig(newOwner):
multisig = Contract.from_abi("MultiSig", address=contracts['multisig'], abi=MultiSigWallet.abi, owner=acct)
data = multisig.addOwner.encode_input(newOwner)
tx = multisig.submitTransaction(multisig.address,0,data)
txId = tx.events["Submission"]["transactionId"]
print("txid",txId);
def governorAcceptAdmin(type):
governor = Contract.from_abi("GovernorAlpha", address=contracts[type], abi=GovernorAlpha.abi, owner=acct)
data = governor.__acceptAdmin.encode_input()
multisig = Contract.from_abi("MultiSig", address=contracts['multisig'], abi=MultiSigWallet.abi, owner=acct)
tx = multisig.submitTransaction(governor.address,0,data)
txId = tx.events["Submission"]["transactionId"]
print(txId)
def setEarlyAccessToken(loanTokenAddress, EATokenAddress):
loanToken = Contract.from_abi("loanToken", address=loanTokenAddress, abi=LoanToken.abi, owner=acct)
data = loanToken.setEarlyAccessToken.encode_input(EATokenAddress)
multisig = Contract.from_abi("MultiSig", address=contracts['multisig'], abi=MultiSigWallet.abi, owner=acct)
tx = multisig.submitTransaction(loanToken.address,0,data)
txId = tx.events["Submission"]["transactionId"]
print(txId);
def queueProposal(id):
governor = Contract.from_abi("GovernorAlpha", address=contracts['GovernorOwner'], abi=GovernorAlpha.abi, owner=acct)
tx = governor.queue(id)
tx.info()
def executeProposal(id):
governor = Contract.from_abi("GovernorAlpha", address=contracts['GovernorOwner'], abi=GovernorAlpha.abi, owner=acct)
tx = governor.execute(id)
tx.info()
def setLendingFee(fee):
sovryn = Contract.from_abi("sovryn", address=contracts['sovrynProtocol'], abi=interface.ISovrynBrownie.abi, owner=acct)
data = sovryn.setLendingFeePercent.encode_input(fee)
multisig = Contract.from_abi("MultiSig", address=contracts['multisig'], abi=MultiSigWallet.abi, owner=acct)
tx = multisig.submitTransaction(sovryn.address,0,data)
txId = tx.events["Submission"]["transactionId"]
print(txId);
def setTradingFee(fee):
sovryn = Contract.from_abi("sovryn", address=contracts['sovrynProtocol'], abi=interface.ISovrynBrownie.abi, owner=acct)
data = sovryn.setTradingFeePercent.encode_input(fee)
multisig = Contract.from_abi("MultiSig", address=contracts['multisig'], abi=MultiSigWallet.abi, owner=acct)
tx = multisig.submitTransaction(sovryn.address,0,data)
txId = tx.events["Submission"]["transactionId"]
print(txId);
def setBorrowingFee(fee):
sovryn = Contract.from_abi("sovryn", address=contracts['sovrynProtocol'], abi=interface.ISovrynBrownie.abi, owner=acct)
data = sovryn.setBorrowingFeePercent.encode_input(fee)
multisig = Contract.from_abi("MultiSig", address=contracts['multisig'], abi=MultiSigWallet.abi, owner=acct)
tx = multisig.submitTransaction(sovryn.address,0,data)
txId = tx.events["Submission"]["transactionId"]
print(txId);
def setAffiliateFeePercent(fee):
sovryn = Contract.from_abi("sovryn", address=contracts['sovrynProtocol'], abi=interface.ISovrynBrownie.abi, owner=acct)
data = sovryn.setAffiliateFeePercent.encode_input(fee)
multisig = Contract.from_abi("MultiSig", address=contracts['multisig'], abi=MultiSigWallet.abi, owner=acct)
tx = multisig.submitTransaction(sovryn.address,0,data)
txId = tx.events["Submission"]["transactionId"]
print('sovryn.setAffiliateFeePercent for', fee, ' tx:')
print(txId);
def setAffiliateTradingTokenFeePercent(percentFee):
sovryn = Contract.from_abi("sovryn", address=contracts['sovrynProtocol'], abi=interface.ISovrynBrownie.abi, owner=acct)
data = sovryn.setAffiliateTradingTokenFeePercent.encode_input(percentFee)
multisig = Contract.from_abi("MultiSig", address=contracts['multisig'], abi=MultiSigWallet.abi, owner=acct)
tx = multisig.submitTransaction(sovryn.address,0,data)
txId = tx.events["Submission"]["transactionId"]
print('sovryn.setAffiliateTradingTokenFeePercent for ', percentFee, ' tx:')
print(txId);
def setMinReferralsToPayout(minReferrals):
sovryn = Contract.from_abi("sovryn", address=contracts['sovrynProtocol'], abi=interface.ISovrynBrownie.abi, owner=acct)
data = sovryn.setMinReferralsToPayoutAffiliates.encode_input(minReferrals)
multisig = Contract.from_abi("MultiSig", address=contracts['multisig'], abi=MultiSigWallet.abi, owner=acct)
tx = multisig.submitTransaction(sovryn.address,0,data)
txId = tx.events["Submission"]["transactionId"]
print('setMinReferralsToPayoutAffiliates set to ', minReferrals, ' tx:')
print(txId);
def sendFromMultisigToVesting(amount):
vestingRegistry = Contract.from_abi("VestingRegistry", address=contracts['VestingRegistry'], abi=VestingRegistry.abi, owner=acct)
data = vestingRegistry.deposit.encode_input()
multisig = Contract.from_abi("MultiSig", address=contracts['multisig'], abi=MultiSigWallet.abi, owner=acct)
tx = multisig.submitTransaction(vestingRegistry.address,amount,data)
txId | |
expected_cs, 'Got wrong checksum'
if attempt == 0:
new_ds = None
new_ds = gdal.Open('tmp/tiff_write_77.tif')
new_ds = None
gdaltest.tiff_drv.Delete('tmp/tiff_write_77.tif')
src_ds = None
gdaltest.tiff_drv.Delete('tmp/tiff_write_77_src.tif')
###############################################################################
# Test generating & reading a YCbCr JPEG all-in-one-strip multiband TIFF (#3259)
def test_tiff_write_78():
md = gdaltest.tiff_drv.GetMetadata()
if md['DMD_CREATIONOPTIONLIST'].find('JPEG') == -1:
pytest.skip()
src_ds = gdaltest.tiff_drv.Create('tmp/tiff_write_78_src.tif', 16, 2048, 3)
src_ds.GetRasterBand(2).Fill(255)
new_ds = gdaltest.tiff_drv.CreateCopy('tmp/tiff_write_78.tif', src_ds,
options=['BLOCKYSIZE=%d' % src_ds.RasterYSize,
'COMPRESS=JPEG',
'PHOTOMETRIC=YCBCR'])
# Make sure the file is flushed so that we re-read from it rather from cached blocks
new_ds.FlushCache()
# new_ds = None
# new_ds = gdal.Open('tmp/tiff_write_78.tif')
if 'GetBlockSize' in dir(gdal.Band):
(_, blocky) = new_ds.GetRasterBand(1).GetBlockSize()
if blocky != 1:
print('')
print('using regular band (libtiff <= 3.9.2 or <= 4.0.0beta5, or SplitBand disabled by config option)')
# Test reading a few samples to check that random reading works
band_lines = [(1, 0), (1, 5), (1, 3), (2, 10), (1, 100), (2, 1000), (2, 500),
(1, 500), (2, 500), (2, 2047), (2, 2047), (3, 2047), (1, 2047)]
for band_line in band_lines:
cs = new_ds.GetRasterBand(band_line[0]).Checksum(0, band_line[1], 1, 1)
if band_line[0] == 1:
expected_cs = 0 % 7
elif band_line[0] == 2:
expected_cs = 255 % 7
else:
# We should expect 0, but due to JPEG YCbCr compression & decompression,
# this ends up being 1
expected_cs = 1 % 7
if cs != expected_cs:
print(band_line)
pytest.fail('Got wrong checksum')
# Test whole bands
for i in range(3):
cs = new_ds.GetRasterBand(i + 1).Checksum()
expected_cs = src_ds.GetRasterBand(i + 1).Checksum()
if i == 2:
# We should expect 0, but due to JPEG YCbCr compression & decompression,
# this ends up being 32768
expected_cs = 32768
assert cs == expected_cs, 'Got wrong checksum'
new_ds = None
gdaltest.tiff_drv.Delete('tmp/tiff_write_78.tif')
src_ds = None
gdaltest.tiff_drv.Delete('tmp/tiff_write_78_src.tif')
###############################################################################
# Test reading & updating GDALMD_AREA_OR_POINT (#3522)
def test_tiff_write_79():
ds = gdaltest.tiff_drv.Create('tmp/tiff_write_79.tif', 1, 1)
srs = osr.SpatialReference()
srs.SetFromUserInput('EPSG:32601')
ds.SetProjection(srs.ExportToWkt())
ds = None
for do_projection_ref in [False, True]:
for check_just_after in [False, True]:
ds = gdal.Open('tmp/tiff_write_79.tif')
if do_projection_ref:
ds.GetProjectionRef()
mdi = ds.GetMetadataItem('AREA_OR_POINT')
assert mdi == 'Area', \
('(1) did not get expected value. do_projection_ref = %d, check_just_after = %d' % (do_projection_ref, check_just_after))
ds = None
# Still read-only.
ds = gdal.Open('tmp/tiff_write_79.tif')
if do_projection_ref:
ds.GetProjectionRef()
ds.SetMetadataItem('AREA_OR_POINT', 'Point')
ds = None
assert not os.path.exists('tmp/tiff_write_79.tif.aux.xml')
# So should get 'Area'
ds = gdal.Open('tmp/tiff_write_79.tif')
if do_projection_ref:
ds.GetProjectionRef()
mdi = ds.GetMetadataItem('AREA_OR_POINT')
assert mdi == 'Area', \
('(2) did not get expected value. do_projection_ref = %d, check_just_after = %d' % (do_projection_ref, check_just_after))
ds = None
# Now update to 'Point'
ds = gdal.Open('tmp/tiff_write_79.tif', gdal.GA_Update)
if do_projection_ref:
ds.GetProjectionRef()
ds.SetMetadataItem('AREA_OR_POINT', 'Point')
if check_just_after:
mdi = ds.GetMetadataItem('AREA_OR_POINT')
assert mdi == 'Point', \
('(3) did not get expected value. do_projection_ref = %d, check_just_after = %d' % (do_projection_ref, check_just_after))
ds = None
assert not os.path.exists('tmp/tiff_write_79.tif.aux.xml')
# Now should get 'Point'
ds = gdal.Open('tmp/tiff_write_79.tif')
if do_projection_ref:
ds.GetProjectionRef()
mdi = ds.GetMetadataItem('AREA_OR_POINT')
assert mdi == 'Point', \
('(4) did not get expected value. do_projection_ref = %d, check_just_after = %d' % (do_projection_ref, check_just_after))
ds = None
# Now update back to 'Area' through SetMetadata()
ds = gdal.Open('tmp/tiff_write_79.tif', gdal.GA_Update)
if do_projection_ref:
ds.GetProjectionRef()
md = {}
md['AREA_OR_POINT'] = 'Area'
ds.SetMetadata(md)
if check_just_after:
mdi = ds.GetMetadataItem('AREA_OR_POINT')
assert mdi == 'Area', \
('(5) did not get expected value. do_projection_ref = %d, check_just_after = %d' % (do_projection_ref, check_just_after))
ds = None
# Now should get 'Area'
ds = gdal.Open('tmp/tiff_write_79.tif')
if do_projection_ref:
ds.GetProjectionRef()
mdi = ds.GetMetadataItem('AREA_OR_POINT')
assert mdi == 'Area', '(6) did not get expected value'
ds = None
gdaltest.tiff_drv.Delete('tmp/tiff_write_79.tif')
###############################################################################
# Test SetOffset() & SetScale()
def test_tiff_write_80():
# First part : test storing and retrieving scale & offsets from internal metadata
ds = gdaltest.tiff_drv.Create('tmp/tiff_write_80.tif', 1, 1)
ds.GetRasterBand(1).SetScale(100)
ds.GetRasterBand(1).SetOffset(1000)
ds = None
assert not os.path.exists('tmp/tiff_write_80.tif.aux.xml')
ds = gdal.Open('tmp/tiff_write_80.tif')
scale = ds.GetRasterBand(1).GetScale()
offset = ds.GetRasterBand(1).GetOffset()
assert scale == 100 and offset == 1000, \
'did not get expected values in internal case (1)'
ds = None
# Test CreateCopy()
src_ds = gdal.Open('tmp/tiff_write_80.tif')
ds = gdaltest.tiff_drv.CreateCopy('tmp/tiff_write_80_copy.tif', src_ds)
src_ds = None
ds = None
ds = gdal.Open('tmp/tiff_write_80_copy.tif')
scale = ds.GetRasterBand(1).GetScale()
offset = ds.GetRasterBand(1).GetOffset()
assert scale == 100 and offset == 1000, 'did not get expected values in copy'
ds = None
gdaltest.tiff_drv.Delete('tmp/tiff_write_80_copy.tif')
# Second part : test unsetting scale & offsets from internal metadata
ds = gdal.Open('tmp/tiff_write_80.tif', gdal.GA_Update)
ds.GetRasterBand(1).SetScale(1)
ds.GetRasterBand(1).SetOffset(0)
ds = None
ds = gdal.Open('tmp/tiff_write_80.tif')
scale = ds.GetRasterBand(1).GetScale()
offset = ds.GetRasterBand(1).GetOffset()
assert not scale
assert not offset
ds = None
gdaltest.tiff_drv.Delete('tmp/tiff_write_80.tif')
# Third part : test storing and retrieving scale & offsets from PAM metadata
ds = gdaltest.tiff_drv.Create('tmp/tiff_write_80_bis.tif', 1, 1)
assert ds.GetRasterBand(1).GetScale() is None and ds.GetRasterBand(1).GetOffset() is None, \
'expected None values'
ds = None
ds = gdal.Open('tmp/tiff_write_80_bis.tif')
ds.GetRasterBand(1).SetScale(-100)
ds.GetRasterBand(1).SetOffset(-1000)
ds = None
try:
# check that it *goes* to PAM
os.stat('tmp/tiff_write_80_bis.tif.aux.xml')
except OSError:
pytest.fail('did not go to PAM as expected')
ds = gdal.Open('tmp/tiff_write_80_bis.tif')
scale = ds.GetRasterBand(1).GetScale()
offset = ds.GetRasterBand(1).GetOffset()
assert scale == -100 and offset == -1000, \
'did not get expected values in PAM case (1)'
ds = None
# Fourth part : test unsetting scale & offsets from PAM metadata
ds = gdal.Open('tmp/tiff_write_80_bis.tif')
ds.GetRasterBand(1).SetScale(1)
ds.GetRasterBand(1).SetOffset(0)
ds = None
assert not os.path.exists('tmp/tiff_write_80_bis.tif.aux.xml')
ds = gdal.Open('tmp/tiff_write_80_bis.tif')
scale = ds.GetRasterBand(1).GetScale()
offset = ds.GetRasterBand(1).GetOffset()
assert not scale
assert not offset
ds = None
gdaltest.tiff_drv.Delete('tmp/tiff_write_80_bis.tif')
###############################################################################
# Test retrieving GCP from PAM
def test_tiff_write_81():
shutil.copyfile('data/byte.tif', 'tmp/tiff_write_81.tif')
f = open('tmp/tiff_write_81.tif.aux.xml', 'wt')
f.write("""
<PAMDataset>
<GCPList Projection="PROJCS["NAD27 / UTM zone 11N",GEOGCS["NAD27",DATUM["North_American_Datum_1927",SPHEROID["Clarke 1866",6378206.4,294.9786982139006,AUTHORITY["EPSG","7008"]],AUTHORITY["EPSG","6267"]],PRIMEM["Greenwich",0],UNIT["degree",0.0174532925199433],AUTHORITY["EPSG","4267"]],PROJECTION["Transverse_Mercator"],PARAMETER["latitude_of_origin",0],PARAMETER["central_meridian",-117],PARAMETER["scale_factor",0.9996],PARAMETER["false_easting",500000],PARAMETER["false_northing",0],UNIT["metre",1,AUTHORITY["EPSG","9001"]],AUTHORITY["EPSG","26711"]]">
<GCP Id="" Pixel="0.0000" Line="0.0000" X="4.407200000000E+05" Y="3.751320000000E+06"/>
<GCP Id="" Pixel="100.0000" Line="0.0000" X="4.467200000000E+05" Y="3.751320000000E+06"/>
<GCP Id="" Pixel="0.0000" Line="100.0000" X="4.407200000000E+05" Y="3.745320000000E+06"/>
<GCP Id="" Pixel="100.0000" Line="100.0000" X="4.467200000000E+05" Y="3.745320000000E+06"/>
</GCPList>
</PAMDataset>""")
f.close()
ds = gdal.Open('tmp/tiff_write_81.tif')
assert (ds.GetGCPProjection().find(
'AUTHORITY["EPSG","26711"]') != -1), 'GCP Projection not set properly.'
gcps = ds.GetGCPs()
assert len(gcps) == 4, 'GCP count wrong.'
ds = None
gdaltest.tiff_drv.Delete('tmp/tiff_write_81.tif')
###############################################################################
# Test writing & reading a signedbyte 8 bit geotiff
def test_tiff_write_82():
src_ds = gdal.Open('data/byte.tif')
ds = gdaltest.tiff_drv.CreateCopy('tmp/tiff_write_82.tif', src_ds, options=['PIXELTYPE=SIGNEDBYTE'])
src_ds = None
ds = None
ds = gdal.Open('tmp/tiff_write_82.tif')
md = ds.GetRasterBand(1).GetMetadata('IMAGE_STRUCTURE')
assert md['PIXELTYPE'] == 'SIGNEDBYTE', 'did not get SIGNEDBYTE'
ds = None
gdaltest.tiff_drv.Delete('tmp/tiff_write_82.tif')
###############################################################################
# Test writing & reading an indexed GeoTIFF with an extra transparency band (#3547)
def test_tiff_write_83():
# Test Create() method
ds = gdaltest.tiff_drv.Create('tmp/tiff_write_83.tif', 1, 1, 2)
ct = gdal.ColorTable()
ct.SetColorEntry(127, (255, 255, 255, 255))
ds.GetRasterBand(1).SetRasterColorTable(ct)
ds.GetRasterBand(1).Fill(127)
ds.GetRasterBand(2).Fill(255)
ds = None
# Test CreateCopy() method
src_ds = gdal.Open('tmp/tiff_write_83.tif')
ds = gdaltest.tiff_drv.CreateCopy('tmp/tiff_write_83_2.tif', src_ds)
src_ds = None
ds = None
ds = gdal.Open('tmp/tiff_write_83_2.tif')
ct2 = ds.GetRasterBand(1).GetRasterColorTable()
assert ct2.GetColorEntry(127) == (255, 255, 255, 255), \
'did not get expected color table'
ct2 = None
cs1 = ds.GetRasterBand(1).Checksum()
assert cs1 == 127 % 7, 'did not get expected checksum for band 1'
cs2 = ds.GetRasterBand(2).Checksum()
assert cs2 == 255 % 7, 'did not get expected checksum for band 2'
ds = None
gdaltest.tiff_drv.Delete('tmp/tiff_write_83.tif')
gdaltest.tiff_drv.Delete('tmp/tiff_write_83_2.tif')
###############################################################################
# Test propagation of non-standard JPEG quality when the current directory
# changes in the midst of encoding of tiles (#3539)
def test_tiff_write_84():
md = gdaltest.tiff_drv.GetMetadata()
if md['DMD_CREATIONOPTIONLIST'].find('JPEG') == -1:
pytest.skip()
with gdaltest.SetCacheMax(0):
ds = gdal.GetDriverByName('GTiff').Create('tmp/tiff_write_84.tif', 128, 128, 3)
ds = None
try:
os.remove('tmp/tiff_write_84.tif.ovr')
except OSError:
pass
ds = gdal.Open('tmp/tiff_write_84.tif')
gdal.SetConfigOption('COMPRESS_OVERVIEW', 'JPEG')
gdal.SetConfigOption('JPEG_QUALITY_OVERVIEW', '90')
ds.BuildOverviews('NEAREST', overviewlist=[2])
cs = ds.GetRasterBand(2).GetOverview(0).Checksum()
ds = None
gdal.SetConfigOption('COMPRESS_OVERVIEW', None)
gdal.SetConfigOption('JPEG_QUALITY_OVERVIEW', None)
gdaltest.tiff_drv.Delete('tmp/tiff_write_84.tif')
assert cs == 0, 'did not get expected checksum'
###############################################################################
# Test SetUnitType()
def test_tiff_write_85():
# First part : test storing and retrieving unittype from internal metadata
ds = gdaltest.tiff_drv.Create('tmp/tiff_write_85.tif', 1, 1)
ds.GetRasterBand(1).SetUnitType('ft')
ds = None
assert not os.path.exists('tmp/tiff_write_85.tif.aux.xml')
ds = gdal.Open('tmp/tiff_write_85.tif')
unittype = ds.GetRasterBand(1).GetUnitType()
assert unittype == 'ft', 'did not get expected values in internal case (1)'
ds = None
# Test CreateCopy()
src_ds = gdal.Open('tmp/tiff_write_85.tif')
ds = gdaltest.tiff_drv.CreateCopy('tmp/tiff_write_85_copy.tif', src_ds)
src_ds = None
ds = None
ds = gdal.Open('tmp/tiff_write_85_copy.tif')
unittype = ds.GetRasterBand(1).GetUnitType()
assert unittype == 'ft', 'did not get expected values in copy'
ds = None
gdaltest.tiff_drv.Delete('tmp/tiff_write_85_copy.tif')
# Second part : test unsetting unittype from internal metadata
ds = gdal.Open('tmp/tiff_write_85.tif', gdal.GA_Update)
ds.GetRasterBand(1).SetUnitType(None)
ds = None
ds = gdal.Open('tmp/tiff_write_85.tif')
unittype = ds.GetRasterBand(1).GetUnitType()
assert unittype == '', 'did not get expected values in internal case (2)'
ds = None
gdaltest.tiff_drv.Delete('tmp/tiff_write_85.tif')
# Third part : test storing and retrieving unittype from PAM metadata
ds = gdaltest.tiff_drv.Create('tmp/tiff_write_85_bis.tif', 1, 1)
assert not ds.GetRasterBand(1).GetUnitType(), 'expected None | |
import maya.mel as mm
import maya.cmds as mc
import glTools.utils.attribute
import glTools.utils.base
import glTools.utils.layer
import glTools.utils.reference
import glTools.utils.shader
import glTools.utils.shape
import glTools.utils.transform
import re
# ===========
# - Cleanup -
# ===========
def toggleCons(state):
'''
Toggle the display state of all joint buffers ('Con') in the scene
@param state: The display state to set the joint buffers to
@type state: bool
'''
# Get List of Con Joints
conList = mc.ls('*Con*_jnt',type='joint')
for conJnt in conList:
# Toggle State
if state:
glTools.utils.base.displayOverride(conJnt,overrideEnable=1,overrideLOD=0)
mc.setAttr(conJnt+'.drawStyle',0) # Bone
else:
glTools.utils.base.displayOverride(conJnt,overrideEnable=1,overrideLOD=1)
mc.setAttr(conJnt+'.drawStyle',2) # None
# Set Joint Radius
if mc.getAttr(conJnt+'.radius',se=True):
mc.setAttr(conJnt+'.radius',0.0)
mc.setAttr(conJnt+'.radius',cb=False)
# Hide Rotate Order
if mc.getAttr(conJnt+'.ro',se=True):
mc.setAttr(conJnt+'.ro',cb=False)
# Return Result
return conList
def toggleEnds(state):
'''
Toggle the display state of all joint buffers ('Con') in the scene
@param state: The display state to set the joint buffers to
@type state: bool
'''
# Get list of End joints
endList = mc.ls('*End_jnt',type='joint')
for endJnt in endList:
# Toggle state
if state:
glTools.utils.base.displayOverride(endJnt,overrideEnable=1,overrideLOD=0)
mc.setAttr(endJnt+'.drawStyle',0) # Bone
else:
glTools.utils.base.displayOverride(endJnt,overrideEnable=1,overrideLOD=1)
mc.setAttr(endJnt+'.drawStyle',2) # None
# Set Joint Radius
if mc.getAttr(endJnt+'.radius',se=True):
mc.setAttr(endJnt+'.radius',0.0)
mc.setAttr(endJnt+'.radius',cb=False)
# Hide Rotate Order
if mc.getAttr(endJnt+'.ro',se=True):
mc.setAttr(endJnt+'.ro',cb=False)
# Return Result
return endList
def disableDrawingOverrides(grp):
'''
Disable drawing overrides for all DAG descendents of the specified transform node.
@param state: The transform under which all descendent node drawing overrides will be disabled.
@type state: bool
'''
# ==========
# - Checks -
# ==========
if not mc.objExists(grp):
raise Exception('Transform "'+grp+'" does not exists!')
if not glTools.utils.transform.isTransform(grp):
raise Exception('Object "'+grp+'" is not a valid transform!')
# Get Descendent Node List
nodeList = mc.ls(mc.listRelatives(grp,ad=True, pa=True) or [],dag=True) or []
if not nodeList: return []
# =============================
# - Disable Drawing Overrides -
# =============================
overrideName = 'overrideEnabled'
for node in nodeList:
# Check Override Attribute
overrideAttr = node+'.'+overrideName
if not mc.attributeQuery(overrideName,n=node,ex=True):
print('Override attribute "'+overrideAttr+'" does not exist! Skipping...')
continue
# Check Override Attribute Connections
overrideConn = mc.listConnections(overrideAttr,s=True,d=False) or []
if overrideConn:
print('Found incoming connection for override attribute "'+overrideAttr+'"! ('+overrideConn[0]+')')
print('Disconnecting attribute and disabling drawing overrides...')
mc.disconnectAttr(overrideConn[0],overrideAttr)
# Disable Drawing Overrides
try: mc.setAttr(overrideAttr,0)
except: pass
# =================
# - Return Result -
# =================
return nodeList
# ==========
# - Checks -
# ==========
def uniqueNameCheck(objList=[],transformsOnly=False):
'''
Return a list of nodes with non unique names
@param objList: List of scene objects to check. If empty, use all existing scene nodes.
@type objList: list
@param transformsOnly: Check transform names only
@type transformsOnly: bool
'''
# Get list of scene nodes
if not objList:
objList = mc.ls()
if transformsOnly:
nodeList = mc.ls(objList,transforms=True)
else:
nodeList = mc.ls(objList,dag=True)
# Determine non unique names
nonUniqueList = [i for i in nodeList if i.count('|')]
# Return result
return nonUniqueList
def validNameCheck(objList=[]):
'''
Check for valid names in the specified list of nodes
@param objList: List of objects to check valid names for. If empty use all scene transforms
@type objList: list
'''
# Check geo list
if not objList: objList = mc.ls()
if not objList: return []
# Remove Default Nodes
defNodes = ['dof1','time1','lambert1','postProcessList1','sequenceManager1','lightLinker1','renderGlobalsList1','dynController1','lightList1','particleCloud1','shaderGlow1']
objList = [obj for obj in objList if not defNodes.count(obj)]
objList = [obj for obj in objList if not obj.startswith('default')]
objList = [obj for obj in objList if not mc.nodeType(obj) == 'objectTypeFilter']
objList = [obj for obj in objList if not mc.nodeType(obj) == 'objectNameFilter']
objList = [obj for obj in objList if not mc.nodeType(obj) == 'objectScriptFilter']
# Check valid names
result = []
for obj in objList:
# Check prefix
#if not obj.startswith('cn_') and not obj.startswith('lf_') and not obj.startswith('rt_'):
# result.append(obj)
# Check "pasted"
if obj.count('pasted'): result.append(obj)
# Check "poly"
if obj.count('poly'): result.append(obj)
# Check double underscore "__"
if obj.count('__'): result.append(obj)
# Check names ending with a digit (0-9)
digitSearch = re.search('(\d+)$', obj)
if digitSearch and glTools.utils.transform.isTransform(obj):
if digitSearch.group(0):
result.append(obj)
# Remove Duplicate Entries
result = list(set(result))
# Return result
return result
def shapeNameCheck( objList = [],
typeList = ['mesh','nurbsCurve','nurbsSurface'],
skipIntermediates = True,
skipMultipleShapes = False,
strict = True ):
'''
Return a list of incorrectly named geometry shape nodes.
@param objList: List of objects to check for valid shape names. If empty, get all nodes of the specified type.
@type objList: list
@param typeList: List of shape types to check for valid names.
@type typeList: list
@param skipIntermediates: Skip intermediate shapes.
@type skipIntermediates: bool
@param skipMultipleShapes: Skip objects with multiple shape nodes.
@type skipMultipleShapes: bool
@param strict: Shape name must match parent+"Shape" to pass.
@type strict: bool
'''
# ==========
# - Checks -
# ==========
if not objList: objList = mc.ls(type=typeList)
# ====================
# - Build Shape List -
# ====================
shapeList = []
for obj in objList:
# Get Shapes from Transform
if glTools.utils.transform.isTransform(obj):
# Check Multiple Shapes
objShapes = mc.listRelatives(obj,s=True,pa=True)
if not objShapes: continue
if (len(objShapes) > 1) and skipMultipleShapes: continue
# Get Shapes
tShapeList = mc.listRelatives(obj,s=True,ni=skipIntermediates,pa=True)
for shape in tShapeList:
shapeList.append(obj)
elif glTools.utils.shape.isShape(obj):
shapeList.append(obj)
else:
print('Unable to determine shape from object "'+obj+'"! Skipping...')
# =====================
# - Check Shape Names -
# =====================
invalidShapeNameList = []
for shape in shapeList:
# Check Type
if not typeList.count(mc.objectType(shape)): continue
# Check Intermediate Object
if skipIntermediates and mc.getAttr(shape+'.intermediateObject'): continue
# Get transform parent name
parent = mc.listRelatives(shape,p=True,pa=True)[0]
# Get Short Names
shapeSN = mc.ls(shape,sn=True)[0]
parentSN = mc.ls(parent,sn=True)[0]
# Check Shape Name
if strict and (shape != parent+'Shape'):
invalidShapeNameList.append(shape)
if not shapeSN.startswith(parentSN):
invalidShapeNameList.append(shape)
elif not shapeSN.count('Shape'):
invalidShapeNameList.append(shape)
# =================
# - Return Result -
# =================
return invalidShapeNameList
def intermediateShapesCheck(objList=[]):
'''
Return a list of intermediate shapes.
@param objList: List of objects to check for intermediate shapes.
@type objList: list
'''
# Check nodeList
if not objList: objList = mc.ls(transforms=True)
else: objList = mc.ls(objList,transforms=True)
# For each node
result = []
for obj in objList:
# Get All Shapes
shapes = mc.listRelatives(obj,s=True,pa=True)
if not shapes: shapes = []
for shape in shapes:
# Check Intermediate Shapes
if mc.objExists(shape+'.intermediateObject'):
if mc.getAttr(shape+'.intermediateObject'):
result.append(shape)
# Return Result
return result
def multipleShapeCheck(objList=[]):
'''
Return a list of transforms with multiple shape nodes
@param objList: List of objects to check for multiple shapes.
@type objList: list
'''
# Get scene transforms
if not objList: objList = mc.ls(transforms=True)
else: objList = mc.ls(objList,dag=True)
# Iterate over scene transforms
result = []
for transform in objList:
# Check Transform
if not glTools.utils.transform.isTransform(transform):
transform = mc.listRelatives(transform,p=True)[0]
# Get transform shape list
shapeList = mc.listRelatives(transform,s=True)
# Check shape list
if not shapeList: continue
shapeList = mc.ls(shapeList,type=['mesh','nurbsSurface','nurbsCurve'])
# Check number of shapes
if len(shapeList) > 1: result.append(transform)
# Return result
return result
def constructionHistoryCheck(geoList=[]):
'''
Return a list of nodes that contain construction history
@param objList: List of objects to check for construction history.
@type objList: list
'''
# Get Scene Geometry
if not geoList:
geoList = mc.ls(geometry=True)
else:
geoList = mc.listRelatives(geoList,s=True,pa=True)
# For each node
result = []
for geo in geoList:
# Check Construction History
hist = mc.listHistory(geo)
# Remove Self
if hist.count(geo): hist.remove(geo)
# Ignore Node Types
ignore = mc.ls(hist,type=['groupId','shadingEngine','transform'])
hist = list(set(hist)-set(ignore))
# Check History
if hist:
obj = mc.listRelatives(geo,p=True,pa=True)
result.extend(obj)
# Remove Duplicate Names
if result: result = list(set(result))
# Return Result
return result
def userAttrCheck(objList=[],includeShapes=False):
'''
Return a list of user defined attributes for a specified list of nodes (and shapes).
@param objList: List of objects to check for user defined attributes.
@type objList: list
@param includeShapes: Also check shapes for user defined attributes.
@type includeShapes: bool
'''
# Initialize Return List
result = []
# Check objList
if not objList: objList = mc.ls()
# For each node
for obj in objList:
userAttrs = mc.listAttr(obj,ud=True)
if not userAttrs: userAttrs = []
for attr in userAttrs:
result.append(obj+'.'+attr)
# Check Shapes
if includeShapes:
shapes = mc.listRelatives(obj,s=True)
if not shapes: shapes = []
for shape in shapes:
userAttrs = mc.listAttr(shape,ud=True)
if not userAttrs: userAttrs = []
for attr in userAttrs:
result.append(shape+'.'+attr)
# Return Result
return result
def emptyGroupCheck(objList=[]):
'''
List empty groups.
@param objList: List of transforms to check.
@type objList: list
'''
# Check objList
if not objList: objList = mc.ls(transforms=True)
else: objList = mc.ls(objList,transforms=True)
# Find Empty Groups
result = []
for grp in objList:
if not mc.listRelatives(grp,ad=True):
result.append(grp)
# Return Result
return result
def emptySetCheck(setList=[]):
'''
Return a list of empty sets
@param setList: List of sets to check.
@type setList: list
'''
# Check setList
if not setList: setList = mc.ls(sets=True)
# Check empty sets
result = []
for setName in setList:
# Check Set
if not mc.ls(setName,sets=True): continue
# Skip Default Sets
if setName.startswith('default'): continue
if setName.startswith('initial'): continue
# Check Set
if not mc.sets(setName,q=True):
result.append(setName)
# Return result
return result
def emptyLayerCheck(layerList=[]):
'''
Return a list if empty layers
@param layerList: List of layers to check. If empty, use all existing layers in current scene.
@type layerList: list
'''
# Check Layer List
if not layerList: layerList = mc.ls(type=['displayLayer','renderLayer','animLayer'])
else: layerList = mc.ls(layerList,type=['displayLayer','renderLayer','animLayer'])
# Check Empty Layers
result = []
for layer in layerList:
# Check Layer
if not mc.ls(layer,type=['displayLayer','renderLayer','animLayer']): continue
# Skip Default Layers
if layer.startswith('default'): continue
# Check Membership
if not glTools.utils.layer.memberList(layer):
result.append(layer)
# Return Result
return result
def animCurveCheck(curveTypeList=['animCurveTL','animCurveTA','animCurveTT','animCurveTU','animCurveUL','animCurveUA','animCurveUT','animCurveUU']):
'''
Return a list of all existing animCurves of a specified type.
@param curveList: List of animCurve types to consider.
@type curveList: list
@param curveTypeList: List of animCurve types to consider.
@type curveTypeList: list
'''
# Initialize Return List
animCurves = []
# List AnimCurve Nodes
for curveType in curveTypeList:
curveList = mc.ls(type=curveType)
if curveList:
animCurves.extend(curveList)
# Return Result
return animCurves
def unusedShadingNodeCheck():
'''
Return a list of unused shading nodes.
'''
return glTools.utils.shader.listUnusedShadingNodes()
def noGeometryShaderCheck(geoList=[]):
'''
Return a list of non intermediate geometry shapes with no shader assignment.
@param geoList: List of geometry to check for shader assignments.
@type geoList: list
'''
# Check Geometry List
if not geoList:
geoList = mc.ls(type=['mesh','nurbsSurface'],ni=True)
else:
geoList += mc.ls(mc.listRelatives(geoList,ad=True,pa=True) or [],type=['mesh','nurbsSurface'],ni=True) or []
geoList = mc.ls(geoList,type=['mesh','nurbsSurface'],ni=True)
# Check Shader Assignment
noShaderList = []
for geo in geoList:
SG = glTools.utils.shader.getSG(geo)
if not SG: noShaderList.append(geo)
# Return Result
return noShaderList
def unusedReferenceCheck():
'''
Return a list of unused reference nodes.
'''
# Initialize Return List
result = []
# Get list of existing references
refList = glTools.utils.reference.listReferences()
# Check Unused Reference
for ref in refList:
try: refFile = glTools.utils.reference.getReferenceFile(ref)
except: result.append(ref)
# Return Result
return result
def unknownNodeCheck():
'''
Return a list of unknown nodes.
'''
result = mc.ls(type='unknown')
if not result: result = []
return result
def checkTransforms(objList=[],tol=0.0000000001):
'''
Check for non-zero transforms
@param objList: List of transforms to check.
@type objList: list
@param tol: Value tolerance.
@type tol: float
'''
# Check Object List
if not objList: objList = mc.ls(transforms=True)
if not objList: return []
# Check Transforms
transformList = []
for obj in objList:
# Skip Default Transforms
if obj == 'persp': continue
if obj == 'front': continue
if obj == 'side': continue
if obj == 'top': | |
<filename>src/netius/common/http2.py
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Hive Netius System
# Copyright (c) 2008-2020 Hive Solutions Lda.
#
# This file is part of Hive Netius System.
#
# Hive Netius System is free software: you can redistribute it and/or modify
# it under the terms of the Apache License as published by the Apache
# Foundation, either version 2.0 of the License, or (at your option) any
# later version.
#
# Hive Netius System is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
#
# You should have received a copy of the Apache License along with
# Hive Netius System. If not, see <http://www.apache.org/licenses/>.
__author__ = "<NAME> <<EMAIL>>"
""" The author(s) of the module """
__version__ = "1.0.0"
""" The version of the module """
__revision__ = "$LastChangedRevision$"
""" The revision number of the module """
__date__ = "$LastChangedDate$"
""" The last change date of the module """
__copyright__ = "Copyright (c) 2008-2020 Hive Solutions Lda."
""" The copyright for the module """
__license__ = "Apache License, Version 2.0"
""" The license for the module """
import struct
import tempfile
import contextlib
import netius
from . import http
from . import parser
HEADER_SIZE = 9
SETTING_SIZE = 6
DATA = 0x00
HEADERS = 0x01
PRIORITY = 0x02
RST_STREAM = 0x03
SETTINGS = 0x04
PUSH_PROMISE = 0x05
PING = 0x06
GOAWAY = 0x07
WINDOW_UPDATE = 0x08
CONTINUATION = 0x09
PROTOCOL_ERROR = 0x01
INTERNAL_ERROR = 0x02
FLOW_CONTROL_ERROR = 0x03
SETTINGS_TIMEOUT = 0x04
STREAM_CLOSED = 0x05
FRAME_SIZE_ERROR = 0x06
REFUSED_STREAM = 0x07
CANCEL = 0x08
COMPRESSION_ERROR = 0x09
CONNECT_ERROR = 0x0a
ENHANCE_YOUR_CALM = 0x0b
INADEQUATE_SECURITY = 0x0c
HTTP_1_1_REQUIRED = 0x0d
SETTINGS_HEADER_TABLE_SIZE = 0x01
SETTINGS_ENABLE_PUSH = 0x02
SETTINGS_MAX_CONCURRENT_STREAMS = 0x03
SETTINGS_INITIAL_WINDOW_SIZE = 0x04
SETTINGS_MAX_FRAME_SIZE = 0x05
SETTINGS_MAX_HEADER_LIST_SIZE = 0x06
HTTP_20 = 4
""" The newly created version of the protocol, note that
this constant value should be created in away that its value
is superior to the ones defined for previous versions """
HEADER_STATE = 1
""" The initial header state for which the header
of the frame is going to be parsed and loaded """
PAYLOAD_STATE = 2
""" The second state of the frame parsing where the
payload of the frame is going to be loaded """
FINISH_STATE = 3
""" The final finish state to be used when the parsing
of the frame has been finished """
HTTP2_WINDOW = 65535
""" The default/initial size of the window used for the
flow control of both connections and streams """
HTTP2_FRAME_SIZE = 16384
""" The base default value for the maximum size allowed
from the frame, this includes the header value """
HTTP2_PREFACE = b"PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"
""" The preface string to be sent by the client upon
the establishment of the connection """
HTTP2_PSEUDO = (":method", ":scheme", ":path", ":authority", ":status")
""" The complete set of HTTP 2 based pseudo-header values
this list should be inclusive and limited """
HTTP2_TUPLES = (
(SETTINGS_HEADER_TABLE_SIZE, "SETTINGS_HEADER_TABLE_SIZE"),
(SETTINGS_ENABLE_PUSH, "SETTINGS_ENABLE_PUSH"),
(SETTINGS_MAX_CONCURRENT_STREAMS, "SETTINGS_MAX_CONCURRENT_STREAMS"),
(SETTINGS_INITIAL_WINDOW_SIZE, "SETTINGS_INITIAL_WINDOW_SIZE"),
(SETTINGS_MAX_FRAME_SIZE, "SETTINGS_MAX_FRAME_SIZE"),
(SETTINGS_MAX_HEADER_LIST_SIZE, "SETTINGS_MAX_HEADER_LIST_SIZE")
)
""" The sequence of tuple that associate the constant value of the
setting with the proper string representation for it """
HTTP2_NAMES = {
DATA : "DATA",
HEADERS : "HEADERS",
PRIORITY : "PRIORITY",
RST_STREAM : "RST_STREAM",
SETTINGS : "SETTINGS",
PUSH_PROMISE : "PUSH_PROMISE",
PING : "PING",
GOAWAY : "GOAWAY",
WINDOW_UPDATE : "WINDOW_UPDATE",
CONTINUATION : "CONTINUATION"
}
""" The association between the various types of frames
described as integers and their representation as strings """
HTTP2_SETTINGS = {
SETTINGS_HEADER_TABLE_SIZE : 4096,
SETTINGS_ENABLE_PUSH : 1,
SETTINGS_MAX_CONCURRENT_STREAMS : 128,
SETTINGS_INITIAL_WINDOW_SIZE : 65535,
SETTINGS_MAX_FRAME_SIZE : 16384,
SETTINGS_MAX_HEADER_LIST_SIZE : 16384
}
""" The default values to be used for settings of a newly
created connection, this should be defined according to specification """
HTTP2_SETTINGS_OPTIMAL = {
SETTINGS_HEADER_TABLE_SIZE : 4096,
SETTINGS_MAX_CONCURRENT_STREAMS : 512,
SETTINGS_INITIAL_WINDOW_SIZE : 1048576,
SETTINGS_MAX_FRAME_SIZE : 131072,
SETTINGS_MAX_HEADER_LIST_SIZE : 16384
}
""" The optimal settings meant to be used by an infra-structure
deployed in a production environment """
HTTP2_SETTINGS_T = netius.legacy.items(HTTP2_SETTINGS)
""" The tuple sequence version of the settings defaults """
HTTP2_SETTINGS_OPTIMAL_T = netius.legacy.items(HTTP2_SETTINGS_OPTIMAL)
""" The tuple sequence version of the settings optimal """
class HTTP2Parser(parser.Parser):
FIELDS = (
"_pid",
"store",
"file_limit",
"state",
"keep_alive",
"length",
"type",
"flags",
"stream",
"end_headers",
"last_type",
"last_stream",
"last_end_headers"
)
def __init__(
self,
owner,
store = False,
file_limit = http.FILE_LIMIT
):
parser.Parser.__init__(self, owner)
self.build()
self.reset(
store = store,
file_limit = file_limit
)
def build(self):
"""
Builds the initial set of states ordered according to
their internal integer definitions, this method provides
a fast and scalable way of parsing data.
"""
parser.Parser.build(self)
self.connection = self.owner
self.states = (
self._parse_header,
self._parse_payload
)
self.state_l = len(self.states)
self.parsers = (
self._parse_data,
self._parse_headers,
self._parse_priority,
self._parse_rst_stream,
self._parse_settings,
self._parse_push_promise,
self._parse_ping,
self._parse_goaway,
self._parse_window_update,
self._parse_continuation
)
self.streams = {}
self._max_stream = 0
self._encoder = None
self._decoder = None
def destroy(self):
"""
Destroys the current structure for the parser meaning that
it's restored to the original values, this method should only
be called on situation where no more parser usage is required.
"""
parser.Parser.destroy(self)
# iterates over the complete set of associated streams to close
# them as the parser is now going to be destroyed and they cannot
# be reached any longer (invalidated state)
streams = netius.legacy.values(self.streams)
for stream in streams: stream.close()
self.connection = None
self.states = ()
self.state_l = 0
self.parsers = ()
self.streams = {}
self._max_stream = 0
self._encoder = None
self._decoder = None
def info_dict(self):
info = parser.Parser.info_dict(self)
info.update(
streams = self.info_streams()
)
return info
def info_streams(self):
info = []
keys = netius.legacy.keys(self.streams)
keys.sort()
for stream in keys:
stream = self.streams[stream]
item = stream.info_dict()
info.append(item)
return info
def reset(
self,
store = False,
file_limit = http.FILE_LIMIT
):
self.store = store
self.file_limit = file_limit
self.state = HEADER_STATE
self.buffer = []
self.keep_alive = True
self.payload = None
self.length = 0
self.type = 0
self.flags = 0
self.stream = 0
self.stream_o = None
self.end_headers = False
self.last_type = 0
self.last_stream = 0
self.last_end_headers = False
def clear(self, force = False, save = True):
if not force and self.state == HEADER_STATE: return
type = self.type
stream = self.stream
end_headers = self.end_headers
self.reset(
store = self.store,
file_limit = self.file_limit
)
if not save: return
self.last_type = type
self.last_stream = stream
self.last_end_headers = end_headers
def close(self):
pass
def parse(self, data):
"""
Parses the provided data chunk, changing the current
state of the parser accordingly and returning the
number of processed bytes from it.
:type data: String
:param data: The string containing the data to be parsed
in the current parse operation.
:rtype: int
:return: The amount of bytes of the data string that have
been "parsed" in the current parse operation.
"""
parser.Parser.parse(self, data)
# in case the current state of the parser is finished, must
# reset the state to the start position as the parser is
# re-starting (probably a new data sequence)
if self.state == FINISH_STATE: self.clear()
# retrieves the size of the data that has been sent for parsing
# and saves it under the size original variable
size = len(data)
size_o = size
# iterates continuously to try to process all that
# data that has been sent for processing
while size > 0:
if self.state <= self.state_l:
method = self.states[self.state - 1]
count = method(data)
if count == -1: break
if count == 0: continue
size -= count
data = data[count:]
continue
elif self.state == FINISH_STATE:
self.clear()
continue
else:
raise netius.ParserError("Invalid state '%d'" % self.state)
# in case not all of the data has been processed
# must add it to the buffer so that it may be used
# latter in the next parsing of the message
if size > 0: self.buffer.append(data)
# returns the number of read (processed) bytes of the
# data that has been sent to the parser
return size_o - size
def get_type_s(self, type):
"""
Retrieves the string based representation of the frame
type according to the HTTP2 specification.
:type type: int
:param type: The frame type as an integer that is going
to be converted to the string representation.
:rtype: String
:return: The string based representation of the frame type.
"""
return HTTP2_NAMES.get(type, None)
def assert_header(self):
"""
Runs a series of assertion operations related with the
header of the frame, making sure it remains compliant
with the HTTP 2 specification.
"""
if self.length > self.owner.settings[SETTINGS_MAX_FRAME_SIZE]:
raise netius.ParserError(
"Headers are greater than | |
<reponame>nasir733/airbnb-clone
# -*- coding: utf-8 -*-
# Copyright (C) 2004-2016 Mag. <NAME>. All rights reserved
# Glasauergasse 32, A--1130 Wien, Austria. <EMAIL>
# ****************************************************************************
#
# This module is licensed under the terms of the BSD 3-Clause License
# <http://www.c-tanzer.at/license/bsd_3c.html>.
# ****************************************************************************
#
#++
# Name
# CAL.Date
#
# Purpose
# Wrapper around `datetime.date`
#
# Revision Dates
# 14-Oct-2004 (CT) Creation
# (derived from MG's CAL.Date_Time and CT's Date_Time)
# 17-Oct-2004 (CT) `__add__` and `__sub__` changed to use `Delta.dt_op`
# 17-Oct-2004 (CT) Doctest for `Month_Delta` added
# 17-Oct-2004 (CT) s/Month_Delta/MY_Delta/
# 19-Oct-2004 (CT) s/MY_Delta/Month_Delta/
# 23-Oct-2004 (CT) `_default_format` added
# 23-Oct-2004 (CT) `_new_object` redefined to handle negative values for
# `day`
# 26-Oct-2004 (CT) `is_weekday` added
# 2-Nov-2004 (CT) `from_string` added
# 10-Nov-2004 (CT) `from_ordinal` added
# 15-Nov-2004 (CT) `wk_ordinal` added
# 6-Jan-2005 (CT) `__main__` script added
# 01-Sep-2005 (MG) Use new decorator syntax for defining classmethod
# 30-Nov-2006 (CT) `__getattr__` changed to delegate to
# `__super.__getattr__`
# 30-Nov-2006 (CT) `CJD`, `MJD`, and `TJD` added
# 10-Dec-2006 (CT) `JD_offset` factored
# 10-Dec-2006 (CT) `from_julian` added
# 12-Dec-2006 (CT) `from_ordinal` changed to use `cls._kind` and
# `cls._Type` instead of `date` and `datetime.date`
# 12-Jan-2007 (CT) Imports fixed
# (import `Command_Line` and `Regexp` from _TFL)
# 11-Aug-2007 (CT) `quarter` added
# 7-Nov-2007 (CT) Use `Getter` instead of `lambda`
# 8-Nov-2007 (CT) `JD2000`, `JC_J2000`, and `julian_epoch` added
# 9-Nov-2007 (CT) Use `Once_Property` instead of `__getattr__`
# 11-Nov-2007 (CT) `sidereal_time` added
# 11-Nov-2007 (CT) `delta_T` added
# 12-Nov-2007 (CT) `JD` added, coverage of `delta_T` extended
# 23-Dec-2007 (CT) Command_Line options `-regexp` and `-xformat` added
# 3-Jan-2008 (CT) `_from_string_match_kw` factored
# 3-Jan-2008 (CT) `date_pattern` changed to make `year` mandatory
# 10-Feb-2008 (CT) `Date_Opt` added (and used for option `delta_to`)
# 15-Feb-2008 (CT) `Date_Opt` corrected (up-call to `__init__`)
# 8-May-2008 (CT) `Date_Opt` changed to use `__super`
# 4-Jan-2010 (CT) `_Date_Arg_` based on `TFL.CAO` added, `Date_Opt` removed
# 28-Feb-2014 (CT) Use future `print_function`
# 4-Mar-2014 (CT) Add subtraction test cases for `Month_Delta`
# 6-May-2015 (CT) Add tests for `jsonified`
# 29-Jan-2016 (CT) Change `_default_format` to "%Y-%m-%d"
# 2-Feb-2016 (CT) Add translation markup `_`
# 3-Feb-2016 (CT) Add `periods`, `inc_month`
# 15-Feb-2016 (CT) Use `CAL.G8R.Months` to support localized month names
# 29-Mar-2016 (CT) Add support for delta to `_Date_Arg_`
# 19-Apr-2016 (CT) DRY `_from_string_match_kw`
# 20-Apr-2016 (CT) Factor `month_from_string`
# 21-Apr-2016 (CT) Add check for tail to `from_string`
# 14-May-2016 (CT) Strip leading `+` from delta arg for `_Date_Arg_`
# 26-Sep-2016 (CT) Move `sidereal_time` to `CAL.Sky`
# 30-Nov-2016 (CT) Use `CAL.G8R.Months.LC`, not `CAL.G8R.Months`
# ««revision-date»»···
#--
from _CAL import CAL
from _TFL import TFL
import _CAL._DTW_
import _CAL.G8R
import _TFL.Accessor
import _TFL.CAO
from _TFL._Meta.Once_Property import Once_Property
from _TFL.I18N import _, _T, _Tn
from _TFL.Math_Func import horner
from _TFL.pyk import pyk
from _TFL.Regexp import *
import datetime
import operator
class Date (CAL._DTW_) :
"""Model a (gregorian) date.
>>> from _CAL.Delta import Date_Delta as Delta
>>> d = Date (2004, 10, 14)
>>> print (d)
2004-10-14
>>> d.year, d.month, d.day, d.date, d.week, d.weekday, d.ordinal
(2004, 10, 14, datetime.date(2004, 10, 14), 42, 3, 731868)
>>> d = d - Delta (3)
>>> d.year, d.month, d.day, d.date, d.week, d.weekday, d.ordinal
(2004, 10, 11, datetime.date(2004, 10, 11), 42, 0, 731865)
>>> d = d - 1
>>> d.year, d.month, d.day, d.date, d.week, d.weekday, d.ordinal
(2004, 10, 10, datetime.date(2004, 10, 10), 41, 6, 731864)
>>> from _CAL.Delta import Month_Delta
>>> print (d, d + Month_Delta (1), d - Month_Delta (1))
2004-10-10 2004-11-10 2004-09-10
>>> print (d, d + Month_Delta (3), d - Month_Delta (3))
2004-10-10 2005-01-10 2004-07-10
>>> print (d, d + Month_Delta (12), d - Month_Delta (12))
2004-10-10 2005-10-10 2003-10-10
>>> print (d, d + Month_Delta (15), d - Month_Delta (15))
2004-10-10 2006-01-10 2003-07-10
>>> print (d, d + Month_Delta (24), d - Month_Delta (24))
2004-10-10 2006-10-10 2002-10-10
>>> print (d, d + Month_Delta (-1), d - Month_Delta (-1))
2004-10-10 2004-09-10 2004-11-10
>>> print (d, d + Month_Delta (-12), d - Month_Delta (-12))
2004-10-10 2003-10-10 2005-10-10
>>> MD = Month_Delta
>>> for x in (d + MD (m) for m in range (-12, 13, 3)):
... print (str (x), ":", x.quarter)
2003-10-10 : 4
2004-01-10 : 1
2004-04-10 : 2
2004-07-10 : 3
2004-10-10 : 4
2005-01-10 : 1
2005-04-10 : 2
2005-07-10 : 3
2005-10-10 : 4
>>> d = Date (day = 1, month = 1, year = 2004)
>>> print (d, d + Month_Delta (11))
2004-01-01 2004-12-01
>>> d1 = Date (2004, 10, 14)
>>> d2 = Date (2004, 10, 16)
>>> print (d1 - d2)
-2 days, 0:00:00
>>> d = Date (day = -1, month = 1, year = 2004)
>>> print (d, d + Month_Delta (1))
2004-01-31 2004-02-29
>>> print (d, d + Month_Delta (2))
2004-01-31 2004-03-31
>>> print (d, d + Month_Delta (3))
2004-01-31 2004-04-30
>>> print (d, d + Month_Delta (11))
2004-01-31 2004-12-31
>>> print (d, d + Month_Delta (12))
2004-01-31 2005-01-31
>>> print (d, d + Month_Delta (13))
2004-01-31 2005-02-28
>>> print (d, d + Month_Delta (-1))
2004-01-31 2003-12-31
>>> print (d, d + Month_Delta (-2))
2004-01-31 2003-11-30
>>> print (d, d + Month_Delta (-3))
2004-01-31 2003-10-31
>>> print (d, d + Month_Delta (-11))
2004-01-31 2003-02-28
>>> print (Date.from_string ("20041102"))
2004-11-02
>>> print (Date.from_string ("2004/11/02"))
2004-11-02
>>> print (Date.from_string ("20041102"))
2004-11-02
>>> print (Date.from_string ("31.10.2004"))
2004-10-31
>>> print (Date.from_string ("31/10/2004"))
2004-10-31
>>> print (Date.from_string ("31.Oct.2004"))
2004-10-31
>>> print (Date.from_string ("Oct 5, 2004"))
2004-10-05
>>> from _TFL.json_dump import to_string as jsonified
>>> print (jsonified ([d]))
["2004-01-31"]
>>> mjd_epoch = Date (1858, 11, 17)
>>> tjd_epoch = Date (1968, 5, 24)
>>> mjd_epoch.ordinal, mjd_epoch.CJD, mjd_epoch.MJD, mjd_epoch.TJD
(678576, 2400000, 0, -40000)
>>> tjd_epoch.ordinal, tjd_epoch.CJD, tjd_epoch.MJD, tjd_epoch.TJD
(718576, 2440000, 40000, 0)
>>> Date.from_julian (2400000)
Date (1858, 11, 17)
>>> Date.from_julian (2440000)
Date (1968, 5, 24)
>>> Date.from_julian (40000, kind = "MJD")
Date (1968, 5, 24)
>>> with TFL.I18N.test_language ("de") :
... print (Date.from_string ("31-Oktober-2004"))
2004-10-31
>>> def _show_periods (d) :
... print (d, "::")
... for p, (h, t) in sorted (pyk.iteritems (d.periods)) :
... print ("%-7s" % p, h, t)
>>> d = Date (2016, 2, 3)
>>> _show_periods (d)
2016-02-03 ::
month 2016-02-01 2016-02-29
quarter 2016-01-01 2016-03-31
week 2016-02-01 2016-02-07
year 2016-01-01 2016-12-31
>>> _show_periods (d + 94)
2016-05-07 ::
month 2016-05-01 2016-05-31
quarter 2016-04-01 2016-06-30
week 2016-05-02 2016-05-08
year 2016-01-01 2016-12-31
>>> _show_periods (d + 194)
2016-08-15 ::
month 2016-08-01 2016-08-31
quarter 2016-07-01 2016-09-30
week 2016-08-15 2016-08-21
year 2016-01-01 2016-12-31
>>> _show_periods (d + 294)
2016-11-23 ::
month 2016-11-01 2016-11-30
quarter 2016-10-01 2016-12-31
week 2016-11-21 2016-11-27
year 2016-01-01 2016-12-31
>>> for i in range (-4, 15, 3) :
... print ("%s + %2d months --> %s" % (d, i, d.inc_month (i)))
2016-02-03 + -4 months --> 2015-10-03
2016-02-03 + -1 months --> 2016-01-03
2016-02-03 + 2 months --> 2016-04-03
2016-02-03 + 5 months --> 2016-07-03
2016-02-03 + 8 months --> 2016-10-03
2016-02-03 + 11 months --> 2017-01-03
2016-02-03 + 14 months --> 2017-04-03
"""
### Julian date offsets to Rata Die (Jan 1, 1)
### http://en.wikipedia.org/wiki/Julian_day_number
### http://en.wikipedia.org/wiki/Epoch_%28astronomy%29
JD_offset = dict \
( CJD = 1721424 ### Chronological JD (based on Jan 1, 4713 BC)
, CJS = 1721424
, JD = 1721424.5 ### Julian day (starts at noon)
, JD2000 = - 730120.5 ### JD relative to J2000.0 (noon)
, MJD = - 678576 ### Modified JD (based on Nov 17, 1858)
, MJS = - 678576
, TJD = - 718576 ### Truncated JD (based on May 24, 1968)
, TJS = - 718576
)
months = \
{ _ ("jan") : 1, _ ("january") : 1, 1 : "jan"
, _ ("feb") : 2, _ ("february") : 2, 2 : "feb"
, _ ("mar") : 3, _ ("march") : 3, 3 : "mar"
, _ ("apr") : 4, _ ("april") : 4, 4 : "apr"
, _ ("may") : 5, 5 : "may"
, _ ("jun") : 6, _ ("june") : 6, 6 : "jun"
, _ ("jul") : 7, _ ("july") : 7, 7 : "jul"
, _ ("aug") : 8, _ ("august") : 8, 8 : "aug"
, _ ("sep") : 9, _ ("september") : 9, 9 : "sep"
, _ ("oct") : 10, _ ("october") : 10, 10 : "oct"
, _ ("nov") : 11, _ ("november") : 11, 11 : "nov"
| |
import logging
from grobid_superconductors.linking.linking_module import CriticalTemperatureClassifier, RuleBasedLinker, \
SpacyPipeline
from tests.utils import get_tokens, get_tokens_and_spans, prepare_doc
LOGGER = logging.getLogger(__name__)
class TestSpacyPipeline:
def test_get_sentence_boundaries(self):
input = "The relatively high superconducting transition tempera- ture in La 3 Ir 2 Ge 2 is noteworthy. " \
"Recently, the isostructural compound La 3 Rh 2 Ge 2 was reported to be a superconducting material " \
"with critical temperature T C = 3.5 K. This value was considered to be the highest in the series of " \
"several La-based superconducting germanides, such as LaGe 2 , LaPd 2 Ge 2 , LaPt 2 Ge 2 , and " \
"LaIr 2 Ge 2 ͑see Ref. 21 and refer- ences therein͒. The critical temperature T C = 4.7 K discov- ered " \
"for La 3 Ir 2 Ge 2 in this work is by about 1.2 K higher than that found for La 3 Rh 2 Ge 2 . It is " \
"also interesting to note that a Y-based ternary germanide, namely, Y 2 PdGe 3 , crystallized in the " \
"hexagonal AlB 2 structure, was found to be a type-II su- perconductor with transition temperature " \
"T C =3 K. The re- sults of band calculations for this system 25,26 reveal that the Y-4d density of " \
"states dominates the Fermi level, and thus the superconductivity in this compound is believed to " \
"origi- nate from Y-4d electrons. In the present case of La 3 Ir 2 Ge 2 or La 3 Rh 2 Ge 2 , " \
"explanation of their superconductivity requires the knowledge of density of La-5d, Ir-5d ͑or Rh-4d͒, " \
"and Ge- 4p states. Hence band-structure calculations are necessary. "
spans = []
words, spaces, spans = get_tokens(input, spans)
target = RuleBasedLinker()
boundaries = target.get_sentence_boundaries(words, spaces)
assert len(boundaries) == 8
class TestRuleBasedLinker:
def test_linking_pressure(self):
text = "The LaFe0.2 Sr 0.4 was discovered to be superconducting at 3K applying a pressure of 5Gpa."
input_spans = [("LaFe0.2 Sr 0.4", "<material>"), ("superconducting", "<tc>"), ("3K", "<tcValue>"),
("5Gpa", "<pressure>")]
tokens, spans = get_tokens_and_spans(text, input_spans)
paragraph = {
"text": text,
"spans": spans,
"tokens": tokens
}
target = RuleBasedLinker(source="<pressure>", destination="<tcValue>")
process_paragraph = target.process_paragraph(paragraph)
print(process_paragraph)
class TestCriticalTemperatureClassifier:
def test_markCriticalTemperature_simple_1(self):
input = "The Tc of the BaClE2 is 30K."
spans = [("Tc", "<tc>"), ("BaClE2", "<material>"), ("30K", "<tcValue>")]
target = CriticalTemperatureClassifier()
doc = prepare_doc(input, spans)
doc2 = target.process_doc(doc)
tcValues = [entity for entity in filter(lambda w: w.ent_type_ in ['<tcValue>'] and w._.linkable is True, doc2)]
assert len(tcValues) == 1
assert tcValues[0].text == "30K"
def test_markCriticalTemperature_simple_2(self):
input = "The material BaClE2 superconducts at 30K."
spans = [("BaClE2", "<material>"), ("superconducts", "<tc>"), ("30K", "<tcValue>")]
target = CriticalTemperatureClassifier()
doc = prepare_doc(input, spans)
doc2 = target.process_doc(doc)
tcValues = [entity for entity in filter(lambda w: w.ent_type_ in ['<tcValue>'] and w._.linkable is True, doc2)]
assert len(tcValues) == 1
assert tcValues[0].text == "30K"
def test_markCriticalTemperature_simple_3(self):
input = "We are explaining some important notions. The material BaClE2 superconducts at 30K. What about going for a beer?"
spans = [("<tc>", "<tc>"), ("BaClE2", "<material>"), ("30K", "<tcValue>")]
target = CriticalTemperatureClassifier()
doc = prepare_doc(input, spans)
doc2 = target.process_doc(doc)
tcValues = [entity for entity in filter(lambda w: w.ent_type_ in ['<tcValue>'] and w._.linkable is True, doc2)]
assert len(tcValues) == 1
assert tcValues[0].text == "30K"
def test_markCriticalTemperature_simple_4(self):
input = "The material BaClE2 has Tc at 30K."
spans = [("BaClE2", "<material>"), ("Tc", "<tc>"), ("30K", "<tcValue>")]
target = CriticalTemperatureClassifier()
doc = prepare_doc(input, spans)
doc2 = target.process_doc(doc)
tcValues = [entity for entity in filter(lambda w: w.ent_type_ in ['<tcValue>'] and w._.linkable is True, doc2)]
assert len(tcValues) == 1
assert tcValues[0].text == "30K"
def test_markCriticalTemperature_1(self):
input = "We also plot in values of U 0 obtained from flux-creep in a BaFe 2−x Ni x As 2 crystal with " \
"similar T c for H c-axis at T = 8 K and for H ab-planes at T = 13 K."
spans = [("BaFe 2−x Ni x As 2 crystal", "<material>"), ("T c", "<tc>"), ("8 K", "<tcValue>"),
("13 K", "<tcValue>")]
target = CriticalTemperatureClassifier()
doc = prepare_doc(input, spans)
doc2 = target.process_doc(doc)
tcValues = [entity for entity in filter(lambda w: w.ent_type_ in ['<tcValue>'] and w._.linkable is True, doc2)]
assert len(tcValues) == 0
def test_markCriticalTemperature_2(self):
input = "(Color online) Effect of electron irradiation on the low-temperature penetration depth ∆λ of two " \
"samples of BaFe2(As1−xPx)2: (a) Tc0 = 28 K and (b) Tc0 = 29 K."
spans = [("BaFe2(As1−xPx)2", "<material>"), ("Tc0", "<tc>"), ("28 K", "<tcValue>"), ("Tc0", "<tc>"),
("29 K", "<tcValue>")]
target = CriticalTemperatureClassifier()
doc = prepare_doc(input, spans)
doc2 = target.process_doc(doc)
tcValues = [entity for entity in filter(lambda w: w.ent_type_ in ['<tcValue>'] and w._.linkable is True, doc2)]
assert len(tcValues) == 2
def test_markCriticalTemperature_3(self):
input = "It is also worth noticing that the T C of this structure is slightly lower (about 5 K lower) than " \
"the T C for the 2×7 superlattice where the two BCO/CCO interfaces are far apart (seven unit " \
"cells of CCO) and no sizeable intralayer interaction is expected.It is also worth noticing that " \
"the T C of this structure is slightly lower (about 5 K lower) than the T C for the 2×7 " \
"superlattice where the two BCO/CCO interfaces are far apart (seven unit cells of CCO) and no " \
"sizeable intralayer interaction is expected."
spans = [("BCO/CCO", "<material>"), ("CCO)", "<material>"), ("T C", "<tc>"), ("5 K", "<tcValue>")]
target = CriticalTemperatureClassifier()
doc = prepare_doc(input, spans)
doc2 = target.process_doc(doc)
tcValues = [entity for entity in filter(lambda w: w.ent_type_ in ['<tcValue>'] and w._.linkable is True, doc2)]
assert len(tcValues) == 0
# def test_markCriticalTemperature_repulsion_for_Curie_temperature(self):
# input = "The corresponding magnetization loop recorded after ZFC to 5 K with the magnetic field " \
# "parallel to the a-b plane for a single La 2/3 Ca 1/3 MnO 3−x film of thickness ϳ200 nm on LSAT " \
# "is shown in A Curie temperature T C of about 220 K and a magnetic moment ͑T → 0 K͒ Ͼ 2 B per Mn ion " \
# "were derived from these curves."
#
# spans = [("5 K", "<tcValue>"), ("La 2/3 Ca 1/3 MnO 3−x film", "<material>"), ("T C", "<tc>"), ("220 K", "<tcValue>"), ]
#
# doc = prepare_doc(input, spans)
# doc2 = markCriticalTemperature(doc)
#
# tcValues = [entity for entity in filter(lambda w: w.ent_type_ in ['<temperature-tc>'], doc2)]
#
# assert len(tcValues) == 0
def test_markCriticalTemperature_relative_critical_temperature(self):
input = "The R versus T curves (figure 2(c) for samples B1 and B2 (with 6 wt% Ag) show that the HIP process " \
"increases T c by 0.8 K and reduces the resistance in the normal state by about 10%."
spans = [("B1", "<material>"), ("B2 (with 6 wt% Ag)", "<material>"),
("0.8 K", "<tcValue>"), ]
target = CriticalTemperatureClassifier()
doc = prepare_doc(input, spans)
doc2 = target.process_doc(doc)
tcValues = [entity for entity in filter(lambda w: w.ent_type_ in ['<tcValue>'] and w._.linkable is True, doc2)]
assert len(tcValues) == 0
def test_markCriticalTemperature_relative_critical_temperature_2(self):
input = "The critical temperature T C = 4.7 K discovered for La 3 Ir 2 Ge 2 in this work is by about 1.2 K " \
"higher than that found for La 3 Rh 2 Ge 2 ."
spans = [("critical temperature", "<tc>"), ("T C", "<tc>"), ("4.7 K", "<tcValue>"),
("La 3 Ir 2 Ge 2", "<material>"),
("La 3 Rh 2 Ge 2", "<material>")]
target = CriticalTemperatureClassifier()
doc = prepare_doc(input, spans)
doc2 = target.process_doc(doc)
tcValues = [entity for entity in filter(lambda w: w.ent_type_ in ['<tcValue>'] and w._.linkable is True, doc2)]
assert len(tcValues) == 1
assert tcValues[0].text == "4.7 K"
def test_markCriticalTemperature_relative_critical_temperature_3(self):
input = "The material BaClE2 has Tc at 30K higher than 77K."
spans = [("BaClE2", "<material>"), ("<tc>", "<tc>"), ("30K", "<tcValue>")]
target = CriticalTemperatureClassifier()
doc = prepare_doc(input, spans)
doc2 = target.process_doc(doc)
tcValues = [entity for entity in filter(lambda w: w.ent_type_ in ['<tcValue>'] and w._.linkable is True, doc2)]
assert len(tcValues) == 0
| |
them still useful to the user.
n_measure : int
The number of equally-spaced measurement points on the x-axis
for each curve. If the parameter 'log_scale' is set to True, the
points will be equally-spaced only if depicted on a logarithmic
x-axis. Otherwise, they will be equally-spaced on linear scales.
direction_maximum : int
The maximum number of gravity flips, i.e. direction changes.
This value determines the upper end of the range from which a
number of gravity direction change points is sample uniformly
as integers, with 0 as the lower end of the sampling range.
convergence_point : list with two single floats, defaults to None
The point in which all curves should perfectly converge, as
[x-axis value, y-axis value]. Normally, this refers to left-side
convergence if the parameter 'right_convergence' isn't set to
True. If 'convergence_point' isn't set, projectile starting
points are sampled uniformly random from the y-axis interval,
but the projectile will still start at a zero launch angle.
log_scale : bool
The indicator whether the measurements on the x-axis should be
on a logarithmic scale, while retaining the behavior of a code
calculation for a linear scale. This means that the steps will
be equally-spaced when displayed with a logarithmic x-axis.
random_launch : bool
The indicator whether no initial zero launch angle is necessary,
i.e. projectiles will start at random angles sampled uniformly
between -90 and 90 degrees for each curve separately.
right_convergence : bool
The indicator whether curves should converge on the right side
instead of the left side. After computing the curves, their
y-axis measurement vector will be flipped. If 'log_scale' is set
to True and a value for 'start_force' is provided, this means
that the 'start_force' threshold value for the first deviation
from unity on the y-axis is calculated for left-side convergence
before being flipped, which should be considered in the inputs.
change_range : list
The x-axis percentiles below and above which no gravity flips
should take place to avoid extreme bends in the curves due to
the gravitational magnitude being sampled up to the maximum
allowable force to hit the upper limit of the y-axis interval,
as [lower percentile, upper percentile]. The default behavior if
the parameter isn't set is to use the 10th and 90th percentile.
change_spacing : int
The minimum space on the x-axis in full steps that is required
between gravitational direction changes, with hiher values
resulting in increased smoothness. The parameter has to be small
enough that the provided 'n_measure' parameter divided by the
the 'change_spacing' parameter is equal to or larger than the
'direction_maximum' parameter, i.e. the number of measurements
divided by the minimum x-axis spacing has to be >= the maximum
number of direction changes so that all possibilities will fit.
change_ratio : float
The value by which the gravitational force of the previous
partial trajectory of a given curve is multiplied to get the
upper limit of the range from which the next partial trajectory
of the same curve is sampled. Like 'change_spacing', this
parameter is a way to enforce further smoothness.
start_force : float
The x-axis point before which no y-axis deviation with regard to
the projectile's starting point should happen. This is useful
if a function perturbation should only happen after a certain
point, which can be specified by setting this parameter.
Returns:
--------
None
Attributes:
-----------
None
"""
# Create a boolean vector to mark all incorrect inputs
incorrect_inputs = np.zeros(15, dtype = bool)
# Check if the number of curves is a positive integer
if type(n_curves) is not int:
incorrect_inputs[0] = True
elif n_curves < 1:
incorrect_inputs[0] = True
# Check if the x-axis interval is a list of two floats
if type(x_interval) is not list:
incorrect_inputs[1] = True
elif ((len(x_interval) is not 2)
or (type(x_interval[0]) is not float)
or (type(x_interval[1]) is not float)):
incorrect_inputs[1] = True
# Check if the y-axis interval is a list of two floats
if type(y_interval) is not list:
incorrect_inputs[2] = True
elif ((len(y_interval) is not 2)
or (type(y_interval[0]) is not float)
or (type(y_interval[1]) is not float)):
incorrect_inputs[2] = True
# Check if the number of measurements is a valid integer
if ((type(n_measure) is not int)
or (n_measure < 0)):
incorrect_inputs[3] = True
# Check whether the change maximum is a valid integer
if ((type(direction_maximum) is not int)
or (direction_maximum < 0)):
incorrect_inputs[4] = True
# Check if the convergence point is None or valid
if ((type(convergence_point) is not list)
and (convergence_point is not None)):
incorrect_inputs[5] = True
elif type(convergence_point) is list:
if ((len(convergence_point) is not 2)
or (type(convergence_point[0]) is not float)
or (type(convergence_point[1]) is not float)):
incorrect_inputs[5] = True
elif convergence_point[0] != x_interval[0]:
incorrect_inputs[5] = True
# Check if the log-scale indicator is a boolean
if type(log_scale) is not bool:
incorrect_inputs[6] = True
# Check if the random launch indicator is a boolean
if type(random_launch) is not bool:
incorrect_inputs[7] = True
# Check if the convergence indicator is a boolean
if type(right_convergence) is not bool:
incorrect_inputs[8] = True
# Check if the change percentiles are valid inputs
if ((type(change_range) is not list)
and (change_range is not None)):
incorrect_inputs[9] = True
elif change_range is not None:
if ((len(change_range) is not 2)
or (type(change_range[0]) is not float)
or (type(change_range[1]) is not float)):
incorrect_inputs[9] = True
elif ((change_range[0] < 0)
or (change_range[0] > 1)
or (change_range[1] < 0)
or (change_range[1] > 1)):
incorrect_inputs[9] = True
# Check if the change spacing is a valid input
if change_spacing is not None:
if type(change_spacing) is not int:
incorrect_inputs[10] = True
elif change_spacing <= 0:
incorrect_inputs[10] = True
elif change_spacing > (np.divide(n_measure, direction_maximum)):
incorrect_inputs[10] = True
# Check if the change ratio is a valid float
if change_ratio is not None:
if type(change_ratio) is not float:
incorrect_inputs[11] = True
elif change_ratio <= 0:
incorrect_inputs[11] = True
# Check if the first deviation point is a valid float
if start_force is not None:
if ((type(start_force) is not float)
or (start_force < x_interval[0])
or (start_force > x_interval[1])):
incorrect_inputs[12] = True
# Check whether inputs are valid for a log-scale
if (log_scale is True):
if ((np.log10(x_interval[0]).is_integer() is False)
or (np.log10(x_interval[1]).is_integer() is False)):
incorrect_inputs[13] = True
if convergence_point is not None:
if np.log10(convergence_point[0]).is_integer() is False:
incorrect_inputs[14] = True
# Define error messages for each unsuitable parameter input
errors = ['ERROR: n_curves: Must be an integer > 0',
'ERROR: x_interval: Must be a list of length 2, ' +
'with each element being a single float value ' +
'and x_interval[0] < x_interval[1]',
'ERROR: y_interval: Must be a list of length 2, ' +
'with each element being a single float value ' +
'and y_interval[0] < y_interval[1]',
'ERROR: n_measure: Must be an integer > 0',
'ERROR: direction_maximum: Must be an integer > 0',
'ERROR: convergence_point: Must be either None ' +
'or a list of length 2, with each element being ' +
'a single float value and the first element being ' +
'identical to the first element of x_interval',
'ERROR: log_scale: Must be a boolean value',
'ERROR: random_launch: Must be a boolean value ',
'ERROR: right_convergence: Must be a boolean value ',
'ERROR: change_range: Must be either None or a ' +
'list of length two, with each element being a ' +
'single float value between 0.0 and 1.0',
'ERROR: change_spacing: Must be either None or a ' +
'single integer > 0, so that n_measures divived' +
'by direction_maximum is equal to or larger than' +
'the value provided for this parameter',
'ERROR: change_ratio: Must be either None or a ' +
'single float > 0',
'ERROR: start_force: Must be either None or a ' +
'float value between the first and the second ' +
'element of x_interval',
'ERROR: x_interval, log_scale: If log_scale is ' +
'True, the float values in x_interval have to | |
NameError :
print "API key and Secret key are not set."
return
except :
print "The API Key and Secret Key are not valid"
return False
if (self.filename) :
try :
self.flickr.upload(self.filename,self.filehandle)
except :
print "Uploading Failed !"
return False
else :
import tempfile
tf=tempfile.NamedTemporaryFile(suffix='.jpg')
self.save(tf.name)
temp = Image(tf.name)
self.flickr.upload(tf.name,temp.filehandle)
return True
def scale(self, width, height = -1):
"""
**SUMMARY**
Scale the image to a new width and height.
If no height is provided, the width is considered a scaling value.
**PARAMETERS**
* *width* - either the new width in pixels, if the height parameter is > 0, or if this value
is a floating point value, this is the scaling factor.
* *height* - the new height in pixels.
**RETURNS**
The resized image.
**EXAMPLE**
>>> img.scale(200, 100) #scales the image to 200px x 100px
>>> img.scale(2.0) #enlarges the image to 2x its current size
.. Warning::
The two value scale command is deprecated. To set width and height
use the resize function.
:py:meth:`resize`
"""
w, h = width, height
if height == -1:
w = int(self.width * width)
h = int(self.height * width)
if( w > MAX_DIMENSION or h > MAX_DIMENSION or h < 1 or w < 1 ):
logger.warning("Holy Heck! You tried to make an image really big or impossibly small. I can't scale that")
return self
scaled_bitmap = cv.CreateImage((w, h), 8, 3)
cv.Resize(self.getBitmap(), scaled_bitmap)
return Image(scaled_bitmap, colorSpace=self._colorSpace)
def resize(self, w=None,h=None):
"""
**SUMMARY**
This method resizes an image based on a width, a height, or both.
If either width or height is not provided the value is inferred by keeping the aspect ratio.
If both values are provided then the image is resized accordingly.
**PARAMETERS**
* *width* - The width of the output image in pixels.
* *height* - The height of the output image in pixels.
**RETURNS**
Returns a resized image, if the size is invalid a warning is issued and
None is returned.
**EXAMPLE**
>>> img = Image("lenna")
>>> img2 = img.resize(w=1024) # h is guessed from w
>>> img3 = img.resize(h=1024) # w is guessed from h
>>> img4 = img.resize(w=200,h=100)
"""
retVal = None
if( w is None and h is None ):
logger.warning("Image.resize has no parameters. No operation is performed")
return None
elif( w is not None and h is None):
sfactor = float(w)/float(self.width)
h = int( sfactor*float(self.height) )
elif( w is None and h is not None):
sfactor = float(h)/float(self.height)
w = int( sfactor*float(self.width) )
if( w > MAX_DIMENSION or h > MAX_DIMENSION ):
logger.warning("Image.resize Holy Heck! You tried to make an image really big or impossibly small. I can't scale that")
return retVal
scaled_bitmap = cv.CreateImage((w, h), 8, 3)
cv.Resize(self.getBitmap(), scaled_bitmap)
return Image(scaled_bitmap, colorSpace=self._colorSpace)
def smooth(self, algorithm_name='gaussian', aperture=(3,3), sigma=0, spatial_sigma=0, grayscale=False, aperature=None):
"""
**SUMMARY**
Smooth the image, by default with the Gaussian blur. If desired,
additional algorithms and apertures can be specified. Optional parameters
are passed directly to OpenCV's cv.Smooth() function.
If grayscale is true the smoothing operation is only performed on a single channel
otherwise the operation is performed on each channel of the image.
for OpenCV versions >= 2.3.0 it is advisible to take a look at
- :py:meth:`bilateralFilter`
- :py:meth:`medianFilter`
- :py:meth:`blur`
- :py:meth:`gaussianBlur`
**PARAMETERS**
* *algorithm_name* - valid options are 'blur' or gaussian, 'bilateral', and 'median'.
* `Median Filter <http://en.wikipedia.org/wiki/Median_filter>`_
* `Gaussian Blur <http://en.wikipedia.org/wiki/Gaussian_blur>`_
* `Bilateral Filter <http://en.wikipedia.org/wiki/Bilateral_filter>`_
* *aperture* - A tuple for the aperture of the gaussian blur as an (x,y) tuple.
- Note there was rampant spelling mistakes in both smooth & sobel,
aperture is spelled as such, and not "aperature". This code is backwards
compatible.
.. Warning::
These must be odd numbers.
* *sigma* -
* *spatial_sigma* -
* *grayscale* - Return just the grayscale image.
**RETURNS**
The smoothed image.
**EXAMPLE**
>>> img = Image("Lenna")
>>> img2 = img.smooth()
>>> img3 = img.smooth('median')
**SEE ALSO**
:py:meth:`bilateralFilter`
:py:meth:`medianFilter`
:py:meth:`blur`
"""
# see comment on argument documentation (spelling error)
aperture = aperature if aperature else aperture
if is_tuple(aperture):
win_x, win_y = aperture
if win_x <= 0 or win_y <= 0 or win_x % 2 == 0 or win_y % 2 == 0:
logger.warning("The aperture (x,y) must be odd number and greater than 0.")
return None
else:
raise ValueError("Please provide a tuple to aperture, got: %s" % type(aperture))
#gauss and blur can work in-place, others need a buffer frame
#use a string to ID rather than the openCV constant
if algorithm_name == "blur":
algorithm = cv.CV_BLUR
elif algorithm_name == "bilateral":
algorithm = cv.CV_BILATERAL
win_y = win_x #aperture must be square
elif algorithm_name == "median":
algorithm = cv.CV_MEDIAN
win_y = win_x #aperture must be square
else:
algorithm = cv.CV_GAUSSIAN #default algorithm is gaussian
if grayscale:
newimg = self.getEmpty(1)
cv.Smooth(self._getGrayscaleBitmap(), newimg, algorithm, win_x, win_y, sigma, spatial_sigma)
else:
newimg = self.getEmpty(3)
r = self.getEmpty(1)
g = self.getEmpty(1)
b = self.getEmpty(1)
ro = self.getEmpty(1)
go = self.getEmpty(1)
bo = self.getEmpty(1)
cv.Split(self.getBitmap(), b, g, r, None)
cv.Smooth(r, ro, algorithm, win_x, win_y, sigma, spatial_sigma)
cv.Smooth(g, go, algorithm, win_x, win_y, sigma, spatial_sigma)
cv.Smooth(b, bo, algorithm, win_x, win_y, sigma, spatial_sigma)
cv.Merge(bo,go,ro, None, newimg)
return Image(newimg, colorSpace=self._colorSpace)
def medianFilter(self, window='',grayscale=False):
"""
**SUMMARY**
Smooths the image, with the median filter. Performs a median filtering operation to denoise/despeckle the image.
The optional parameter is the window size.
see : http://en.wikipedia.org/wiki/Median_filter
**Parameters**
* *window* - should be in the form a tuple (win_x,win_y). Where win_x should be equal to win_y.
- By default it is set to 3x3, i.e window = (3x3).
**Note**
win_x and win_y should be greater than zero, a odd number and equal.
For OpenCV versions <= 2.3.0
-- this acts as Convience function derived from the :py:meth:`smooth` method. Which internally calls cv.Smooth
For OpenCV versions >= 2.3.0
-- cv2.medianBlur function is called.
"""
try:
import cv2
new_version = True
except :
new_version = False
pass
if is_tuple(window):
win_x, win_y = window
if ( win_x>=0 and win_y>=0 and win_x%2==1 and win_y%2==1 ) :
if win_x != win_y :
win_x=win_y
else :
logger.warning("The aperture (win_x,win_y) must be odd number and greater than 0.")
return None
elif( is_number(window) ):
win_x = window
else :
win_x = 3 #set the default aperture window size (3x3)
if ( not new_version ) :
grayscale_ = grayscale
return self.smooth(algorithm_name='median', aperture=(win_x,win_y),grayscale=grayscale_)
else :
if (grayscale) :
img_medianBlur = cv2.medianBlur(self.getGrayNumpy(),win_x)
return Image(img_medianBlur, colorSpace=ColorSpace.GRAY)
else :
img_medianBlur = cv2.medianBlur(self.getNumpy()[:,:, ::-1].transpose([1,0,2]),win_x)
img_medianBlur = img_medianBlur[:,:, ::-1].transpose([1,0,2])
return Image(img_medianBlur, colorSpace=self._colorSpace)
def bilateralFilter(self, diameter=5,sigmaColor=10, sigmaSpace=10,grayscale=False):
"""
**SUMMARY**
Smooths the image, using bilateral filtering. Potential of bilateral filtering is for the removal of texture.
The optional parameter are diameter, sigmaColor, sigmaSpace.
Bilateral Filter
see : http://en.wikipedia.org/wiki/Bilateral_filter
see : http://homepages.inf.ed.ac.uk/rbf/CVonline/LOCAL_COPIES/MANDUCHI1/Bilateral_Filtering.html
**Parameters**
* *diameter* - A tuple for the window of the form (diameter,diameter). By default window = (3x3). ( for OpenCV versions <= 2.3.0)
- Diameter of each pixel neighborhood that is used during filtering. ( for OpenCV versions >= 2.3.0)
* *sigmaColor* - Filter the specified value in the color space. A larger value of the parameter means that farther colors within the pixel neighborhood (see sigmaSpace ) will be mixed together, resulting in larger areas of semi-equal color.
* *sigmaSpace* - Filter the specified value in the coordinate space. A larger value of the parameter means that farther pixels will influence each other as long as their colors are close enough
**NOTE**
| |
<reponame>jcwright77/pleiades
from abc import ABCMeta, abstractmethod, abstractproperty
from collections import Iterable
from warnings import warn, simplefilter
import math
import numpy as np
from scipy.special import ellipk, ellipe
from multiprocessing import Pool, sharedctypes
from pleiades.mesh import Mesh
import pleiades.checkvalue as cv
from pleiades.transforms import rotate
class FieldsOperator(metaclass=ABCMeta):
"""Mixin class for computing fields on meshes
Parameters
----------
mesh : pleiades.Mesh object, optional
The mesh to use for calculating fields
rank : int (1 or 2)
Indicator of whether the current attribute is a scalar or vector
Variables
---------
current : float or ndarray
Current values in this object
rzw : ndarray or list of ndarray
Nx3 arrays of centroid positions and weights
mesh : pleiades.Mesh object
The mesh to use for calculating fields
"""
def __init__(self, mesh=None, rank=1, **kwargs):
# mesh should accept 2d, 3d or 2 1d or 2 2d)
self._gpsi = None
self._gBR = None
self._gBZ = None
if rank == 1:
self._uptodate = False
self.rank = rank
self.mesh = mesh
@abstractproperty
def current(self):
pass
@abstractproperty
def rzw(self):
pass
@property
def mesh(self):
return self._mesh
@mesh.setter
@cv.flag_greens_on_set
def mesh(self, mesh):
if not isinstance(mesh, Mesh) and mesh is not None:
mesh = Mesh.from_array(mesh)
self._mesh = mesh
def gpsi(self, mesh=None):
"""Compute the Green's function for magnetic flux, :math:`psi`.
Parameters
----------
mesh : ndarray, optional
An Nx2 array of points representing (R, Z) coordinates at which to
calculate the magnetic flux. Defaults to None, in which case the
CurrentFilamentSet.mesh attribute is used.
Returns
-------
gpsi : ndarray
1D array representing the Green's function for flux and whose size
is equal to the number of mesh.
"""
if mesh is None:
if not self._uptodate:
self._compute_greens()
return self._gpsi
return compute_greens(self.rzw, Mesh.to_points(mesh))[0]
def gBR(self, mesh=None):
"""Compute the Green's function for the radial magnetic field, BR
Parameters
----------
mesh : ndarray, optional
An Nx2 array of points representing (R, Z) coordinates at which to
calculate BR. Defaults to None, in which case the
CurrentFilamentSet.mesh attribute is used.
Returns
-------
gBR : ndarray
1D array representing the Green's function for BR and whose size
is equal to the number of mesh.
"""
if mesh is None:
if not self._uptodate:
self._compute_greens()
return self._gBR
return compute_greens(self.rzw, Mesh.to_points(mesh))[1]
def gBZ(self, mesh=None):
"""Compute the Green's function for the vertical magnetic field, BZ
Parameters
----------
mesh : ndarray, optional
An Nx2 array of points representing (R, Z) coordinates at which to
calculate BZ. Defaults to None, in which case the
CurrentFilamentSet.mesh attribute is used.
Returns
-------
gBZ : ndarray
1D array representing the Green's function for BZ and whose size
is equal to the number of mesh.
"""
if mesh is None:
if not self._uptodate:
self._compute_greens()
return self._gBZ
return compute_greens(self.rzw, Mesh.to_points(mesh))[2]
def psi(self, current=None, mesh=None):
"""Compute the magnetic flux, :math:`psi`.
Parameters
----------
current : float, optional
Specify a current value in amps to use instead of
CurrentFilamentSet.current. Defaults to None, in which case the
current attribute is used to calculate the flux.
mesh : ndarray, optional
An Nx2 array of points representing (R, Z) coordinates at which to
calculate the magnetic flux. Defaults to None, in which case the
CurrentFilamentSet.mesh attribute is used.
Returns
-------
psi : ndarray
"""
current = current if current is not None else self.current
if self.rank == 1:
return current*self.gpsi(mesh=mesh)
if self.rank == 2:
return current @ self.gpsi(mesh=mesh)
def BR(self, current=None, mesh=None):
"""Compute the radial component of the magnetic field, BR.
Parameters
----------
current : float, optional
Specify a current value to override the current attribute for
calculating the field. Defaults to None, which causes the current
attribute to be used for the calculation
Returns
-------
BR : np.array
"""
current = current if current is not None else self.current
if self.rank == 1:
return current*self.gBR(mesh=mesh)
if self.rank == 2:
return current @ self.gBR(mesh=mesh)
def BZ(self, current=None, mesh=None):
"""Compute the z component of the magnetic field, BZ.
Parameters
----------
current : float, optional
Specify a current value to override the current attribute for
calculating the field. Defaults to None, which causes the current
attribute to be used for the calculation
Returns
-------
BZ : np.array
"""
current = current if current is not None else self.current
if self.rank == 1:
return current*self.gBZ(mesh=mesh)
if self.rank == 2:
return current @ self.gBZ(mesh=mesh)
def _compute_greens(self):
"""Compute and assign the Green's functions for psi, BR, and BZ"""
# Calculate Green's functions
if self.rank == 1:
gpsi, gBR, gBZ = compute_greens(self.rzw, Mesh.to_points(self.mesh))
if self.rank == 2:
m = len(self.current)
n = len(self.R.ravel())
gpsi = np.empty((m, n))
gBR = np.empty((m, n))
gBZ = np.empty((m, n))
for i, cset in enumerate(self):
gpsi[i, :] = cset.gpsi().ravel()
gBR[i, :] = cset.gBR().ravel()
gBZ[i, :] = cset.gBZ().ravel()
self._gpsi = gpsi
self._gBR = gBR
self._gBZ = gBZ
# Notify instance that the Green's functions are up to date only if it's
# rank 1. Rank 2 FieldOperators get their status from associated rank 1s
if self.rank == 1:
self._uptodate = True
def compute_greens(rzw, rz_pts):
"""Compute axisymmetric Green's functions for magnetic fields
Parameters
----------
rzw: ndarray or iterable of ndarray
An Nx3 array whose columns are r locations, z locations, and current
weights respectively for the current filaments.
rz_pts: Nx2 np.array
An Nx2 array whose columns are r locations and z locations for the mesh
points where we want to calculate the Green's functions.
Returns
-------
tuple :
3-tuple of 1D np.array representing the Green's function for psi, BR,
and Bz respectively.
"""
if isinstance(rzw, list):
return _compute_greens_2d(rzw, rz_pts)
else:
return _compute_greens_1d(rzw, rz_pts)
def _compute_greens_1d(rzw, rz_pts):
"""Compute axisymmetric Green's functions for magnetic fields
Parameters
----------
rzw: Nx3 np.array
An Nx3 array whose columns are r locations, z locations, and current
weights respectively for the current filaments.
rz_pts: Nx2 np.array
An Nx2 array whose columns are r locations and z locations for the mesh
points where we want to calculate the Green's functions.
Returns
-------
tuple :
3-tuple of 1D np.array representing the Green's function for psi, BR,
and Bz respectively.
"""
simplefilter('ignore', RuntimeWarning)
# Begin calculation of Green's functions based on vector potential
# psi = R*A_phi from a current loop at r0, z0 on a mesh specified by
# r and z in cylindrical coordinates and with SI units.
r, z = rz_pts[:, 0], rz_pts[:, 1]
n = len(r)
gpsi = np.zeros(n)
gBR = np.zeros(n)
gBZ = np.zeros(n)
r2 = r*r
# Prefactor c1 for vector potential is mu_0/4pi = 1E-7
c1 = 1E-7
for r0, z0, wgt in rzw:
# Check if the coil position is close to 0 if so skip it
if np.isclose(r0, 0, rtol=0, atol=1E-12):
continue
# Compute factors that are reused in equations
fac0 = (z - z0)*(z - z0)
d = np.sqrt(fac0 + (r + r0)*(r + r0))
d_ = np.sqrt(fac0 + (r - r0)*(r - r0))
k_2 = 4*r*r0 / (d*d)
K = ellipk(k_2)
E = ellipe(k_2)
denom = d*d_ *d_
fac1 = K*d_ *d_
fac2 = (fac0 + r2 + r0*r0)*E
# Compute Green's functions for psi, BR, BZ
gpsi_tmp = wgt*c1*r*r0*4 / d / k_2*((2 - k_2)*K - 2*E)
gBR_tmp = -2*wgt*c1*(z - z0)*(fac1 - fac2) / (r*denom)
gBZ_tmp = 2*wgt*c1*(fac1 - (fac2 - 2*r0*r0*E)) / denom
# Correct for infinities and add sum
gpsi_tmp[~np.isfinite(gpsi_tmp)] = 0
gpsi += gpsi_tmp
gBR_tmp[~np.isfinite(gBR_tmp)] = 0
gBR += gBR_tmp
gBZ_tmp[~np.isfinite(gBZ_tmp)] = 0
gBZ += gBZ_tmp
return gpsi, gBR, gBZ
def _compute_greens_2d(rzw_list, rz_pts):
"""Compute axisymmetric Green's functions for magnetic fields
Parameters
----------
rzw: list
A list of Nx3 arrays whose columns are r locations, z locations, and
current weights respectively for the current filaments.
rz_pts: Nx2 np.array
An Nx2 array whose columns are r locations and z locations for the mesh
points where we want to calculate the Green's functions.
Returns
-------
tuple :
3-tuple of 1D np.array representing the Green's function for psi, BR,
and Bz respectively.
"""
simplefilter('ignore', RuntimeWarning)
# Begin calculation of Green's functions based on vector potential
# psi = R*A_phi from a current loop | |
Counts tags of Design.
"""
pass
def test_portals_id_designs_nk_tags_delete(self):
"""
Test case for portals_id_designs_nk_tags_delete
Deletes all tags of this model.
"""
pass
def test_portals_id_designs_nk_tags_fk_delete(self):
"""
Test case for portals_id_designs_nk_tags_fk_delete
Delete a related item by id for tags.
"""
pass
def test_portals_id_designs_nk_tags_fk_get(self):
"""
Test case for portals_id_designs_nk_tags_fk_get
Find a related item by id for tags.
"""
pass
def test_portals_id_designs_nk_tags_fk_put(self):
"""
Test case for portals_id_designs_nk_tags_fk_put
Update a related item by id for tags.
"""
pass
def test_portals_id_designs_nk_tags_get(self):
"""
Test case for portals_id_designs_nk_tags_get
Queries tags of Design.
"""
pass
def test_portals_id_designs_nk_tags_post(self):
"""
Test case for portals_id_designs_nk_tags_post
Creates a new instance in tags of this model.
"""
pass
def test_portals_id_designs_nk_tags_rel_fk_delete(self):
"""
Test case for portals_id_designs_nk_tags_rel_fk_delete
Remove the tags relation to an item by id.
"""
pass
def test_portals_id_designs_nk_tags_rel_fk_head(self):
"""
Test case for portals_id_designs_nk_tags_rel_fk_head
Check the existence of tags relation to an item by id.
"""
pass
def test_portals_id_designs_nk_tags_rel_fk_put(self):
"""
Test case for portals_id_designs_nk_tags_rel_fk_put
Add a related item by id for tags.
"""
pass
def test_portals_id_designs_nk_team_get(self):
"""
Test case for portals_id_designs_nk_team_get
Fetches belongsTo relation team.
"""
pass
def test_portals_id_designs_nk_template_get(self):
"""
Test case for portals_id_designs_nk_template_get
Fetches belongsTo relation template.
"""
pass
def test_portals_id_designs_post(self):
"""
Test case for portals_id_designs_post
Creates a new instance in designs of this model.
"""
pass
def test_portals_id_exists_get(self):
"""
Test case for portals_id_exists_get
Check whether a model instance exists in the data source.
"""
pass
def test_portals_id_get(self):
"""
Test case for portals_id_get
Find a model instance by {{id}} from the data source.
"""
pass
def test_portals_id_head(self):
"""
Test case for portals_id_head
Check whether a model instance exists in the data source.
"""
pass
def test_portals_id_image_folders_count_get(self):
"""
Test case for portals_id_image_folders_count_get
Counts imageFolders of Portal.
"""
pass
def test_portals_id_image_folders_delete(self):
"""
Test case for portals_id_image_folders_delete
Deletes all imageFolders of this model.
"""
pass
def test_portals_id_image_folders_fk_delete(self):
"""
Test case for portals_id_image_folders_fk_delete
Delete a related item by id for imageFolders.
"""
pass
def test_portals_id_image_folders_fk_get(self):
"""
Test case for portals_id_image_folders_fk_get
Find a related item by id for imageFolders.
"""
pass
def test_portals_id_image_folders_fk_put(self):
"""
Test case for portals_id_image_folders_fk_put
Update a related item by id for imageFolders.
"""
pass
def test_portals_id_image_folders_get(self):
"""
Test case for portals_id_image_folders_get
Queries imageFolders of Portal.
"""
pass
def test_portals_id_image_folders_post(self):
"""
Test case for portals_id_image_folders_post
Creates a new instance in imageFolders of this model.
"""
pass
def test_portals_id_image_folders_rel_fk_delete(self):
"""
Test case for portals_id_image_folders_rel_fk_delete
Remove the imageFolders relation to an item by id.
"""
pass
def test_portals_id_image_folders_rel_fk_head(self):
"""
Test case for portals_id_image_folders_rel_fk_head
Check the existence of imageFolders relation to an item by id.
"""
pass
def test_portals_id_image_folders_rel_fk_put(self):
"""
Test case for portals_id_image_folders_rel_fk_put
Add a related item by id for imageFolders.
"""
pass
def test_portals_id_invitation_tickets_fk_delete(self):
"""
Test case for portals_id_invitation_tickets_fk_delete
Delete InvitationTickets for this Portal
"""
pass
def test_portals_id_invitation_tickets_fk_get(self):
"""
Test case for portals_id_invitation_tickets_fk_get
Get InvitationTicket by Id for this Portal
"""
pass
def test_portals_id_invitation_tickets_get(self):
"""
Test case for portals_id_invitation_tickets_get
List InvitationTickets for this Portal
"""
pass
def test_portals_id_logo_put(self):
"""
Test case for portals_id_logo_put
Change logo
"""
pass
def test_portals_id_members_count_get(self):
"""
Test case for portals_id_members_count_get
Counts members of Portal.
"""
pass
def test_portals_id_members_delete(self):
"""
Test case for portals_id_members_delete
Deletes all members of this model.
"""
pass
def test_portals_id_members_fk_delete(self):
"""
Test case for portals_id_members_fk_delete
Delete a related item by id for members.
"""
pass
def test_portals_id_members_fk_get(self):
"""
Test case for portals_id_members_fk_get
Find a related item by id for members.
"""
pass
def test_portals_id_members_fk_put(self):
"""
Test case for portals_id_members_fk_put
Update a related item by id for members.
"""
pass
def test_portals_id_members_get(self):
"""
Test case for portals_id_members_get
Queries members of Portal.
"""
pass
def test_portals_id_members_post(self):
"""
Test case for portals_id_members_post
Creates a new instance in members of this model.
"""
pass
def test_portals_id_members_rel_fk_delete(self):
"""
Test case for portals_id_members_rel_fk_delete
Remove the members relation to an item by id.
"""
pass
def test_portals_id_members_rel_fk_head(self):
"""
Test case for portals_id_members_rel_fk_head
Check the existence of members relation to an item by id.
"""
pass
def test_portals_id_members_rel_fk_put(self):
"""
Test case for portals_id_members_rel_fk_put
Add a related item by id for members.
"""
pass
def test_portals_id_patch(self):
"""
Test case for portals_id_patch
Patch attributes for a model instance and persist it into the data source.
"""
pass
def test_portals_id_permission_delete(self):
"""
Test case for portals_id_permission_delete
Deletes permission of this model.
"""
pass
def test_portals_id_permission_get(self):
"""
Test case for portals_id_permission_get
Fetches hasOne relation permission.
"""
pass
def test_portals_id_permission_post(self):
"""
Test case for portals_id_permission_post
Creates a new instance in permission of this model.
"""
pass
def test_portals_id_permission_put(self):
"""
Test case for portals_id_permission_put
Update permission of this model.
"""
pass
def test_portals_id_portal_members_count_get(self):
"""
Test case for portals_id_portal_members_count_get
Counts portalMembers of Portal.
"""
pass
def test_portals_id_portal_members_delete(self):
"""
Test case for portals_id_portal_members_delete
Deletes all portalMembers of this model.
"""
pass
def test_portals_id_portal_members_fk_delete(self):
"""
Test case for portals_id_portal_members_fk_delete
Delete a related item by id for portalMembers.
"""
pass
def test_portals_id_portal_members_fk_get(self):
"""
Test case for portals_id_portal_members_fk_get
Find a related item by id for portalMembers.
"""
pass
def test_portals_id_portal_members_fk_put(self):
"""
Test case for portals_id_portal_members_fk_put
Update a related item by id for portalMembers.
"""
pass
def test_portals_id_portal_members_get(self):
"""
Test case for portals_id_portal_members_get
Queries portalMembers of Portal.
"""
pass
def test_portals_id_portal_members_post(self):
"""
Test case for portals_id_portal_members_post
Creates a new instance in portalMembers of this model.
"""
pass
def test_portals_id_put(self):
"""
Test case for portals_id_put
Replace attributes for a model instance and persist it into the data source.
"""
pass
def test_portals_id_replace_post(self):
"""
Test case for portals_id_replace_post
Replace attributes for a model instance and persist it into the data source.
"""
pass
def test_portals_id_team_get(self):
"""
Test case for portals_id_team_get
Fetches belongsTo relation team.
"""
pass
def test_portals_id_template_folders_count_get(self):
"""
Test case for portals_id_template_folders_count_get
Counts templateFolders of Portal.
"""
pass
def test_portals_id_template_folders_delete(self):
"""
Test case for portals_id_template_folders_delete
Deletes all templateFolders of this model.
"""
pass
def test_portals_id_template_folders_fk_delete(self):
"""
Test case for portals_id_template_folders_fk_delete
Delete a related item by id for templateFolders.
"""
pass
def test_portals_id_template_folders_fk_get(self):
"""
Test case for portals_id_template_folders_fk_get
Find a related item by id for templateFolders.
"""
pass
def test_portals_id_template_folders_fk_put(self):
"""
Test case for portals_id_template_folders_fk_put
Update a related item by id for templateFolders.
"""
pass
def test_portals_id_template_folders_get(self):
"""
Test case for portals_id_template_folders_get
Queries templateFolders of Portal.
"""
pass
def test_portals_id_template_folders_nk_templates_fk_rel_delete(self):
"""
Test case for portals_id_template_folders_nk_templates_fk_rel_delete
Unlink folder with Template and Portal
"""
pass
def test_portals_id_template_folders_nk_templates_fk_rel_put(self):
"""
Test case for portals_id_template_folders_nk_templates_fk_rel_put
Link folder with Template and Portal
"""
pass
def test_portals_id_template_folders_post(self):
"""
Test case for portals_id_template_folders_post
Creates a new instance in templateFolders of this model.
"""
pass
def test_portals_id_template_folders_root_templates_get(self):
"""
Test case for portals_id_template_folders_root_templates_get
List templates on root folder
"""
pass
def test_portals_id_template_rels_count_get(self):
"""
Test case for portals_id_template_rels_count_get
Counts templateRels of Portal.
"""
pass
def test_portals_id_template_rels_delete(self):
"""
Test case for portals_id_template_rels_delete
Deletes all templateRels of this model.
"""
pass
def test_portals_id_template_rels_fk_delete(self):
"""
Test case for portals_id_template_rels_fk_delete
Delete a related item by id for templateRels.
"""
pass
def test_portals_id_template_rels_fk_get(self):
"""
Test case for portals_id_template_rels_fk_get
Find a related item by id for templateRels.
"""
pass
def test_portals_id_template_rels_fk_put(self):
"""
Test case for portals_id_template_rels_fk_put
Update a related item by id for templateRels.
"""
pass
def test_portals_id_template_rels_get(self):
"""
Test case for portals_id_template_rels_get
Queries templateRels of Portal.
"""
pass
def test_portals_id_template_rels_post(self):
"""
Test case for portals_id_template_rels_post
Creates a new instance in templateRels of this model.
"""
pass
def test_portals_id_templates_count_get(self):
"""
Test case for portals_id_templates_count_get
Counts templates of Portal.
"""
pass
def test_portals_id_templates_delete(self):
"""
Test case for portals_id_templates_delete
Deletes all templates of this model.
"""
pass
def test_portals_id_templates_fk_delete(self):
"""
Test case for portals_id_templates_fk_delete
Delete a related item by id for templates.
"""
pass
def test_portals_id_templates_fk_designs_generate_bulk_post(self):
"""
Test case for portals_id_templates_fk_designs_generate_bulk_post
Generate Design from Template
"""
pass
def test_portals_id_templates_fk_designs_generate_post(self):
"""
Test case for portals_id_templates_fk_designs_generate_post
Generate Design from Template
"""
pass
def test_portals_id_templates_fk_get(self):
"""
Test case for portals_id_templates_fk_get
Find a related item by id for templates.
"""
pass
def test_portals_id_templates_fk_put(self):
"""
Test case for portals_id_templates_fk_put
Update a related item by id for templates.
"""
pass
def test_portals_id_templates_get(self):
"""
Test case for portals_id_templates_get
Queries templates of Portal.
"""
pass
def test_portals_id_templates_post(self):
"""
Test case for | |
p.ForeignKeyField(Operation, null=False)
class Meta:
indexes = ((("command", "operator", "operation"), True),)
database = mythic_db
def to_json(self):
r = {
"id": getattr(self, "id"),
"command": self.command.cmd,
"command_id": self.command.id,
"payload_type": self.command.payload_type.ptype,
"operator": self.operator.username,
"operation": self.operation.name
}
return r
def __str__(self):
return json.dumps(self.to_json())
# because operators and operations are a many-to-many relationship, we need a join table to facilitate
# this means operator class doesn't mention operation, and operation doesn't mention operator - odd, I know
class OperatorOperation(p.Model):
operator = p.ForeignKeyField(Operator)
operation = p.ForeignKeyField(Operation)
timestamp = p.DateTimeField(default=datetime.datetime.utcnow, null=False)
base_disabled_commands = p.ForeignKeyField(DisabledCommandsProfile, null=True)
view_mode = p.TextField(null=False, default="operator")
class Meta:
indexes = ((("operator", "operation"), True),)
database = mythic_db
def to_json(self):
r = {
"id": getattr(self, "id"),
"operator": self.operator.username,
"operation": self.operation.name,
"timestamp": self.timestamp.strftime("%m/%d/%Y %H:%M:%S"),
"base_disabled_commands": self.base_disabled_commands.name,
"view_mode": self.view_mod
}
return r
def __str__(self):
return json.dumps(self.to_json())
# an instance of a c2profile
class C2Profile(p.Model):
name = p.TextField(null=False, unique=True)
description = p.TextField(null=True, default="")
# list of payload types that are supported (i.e. have a corresponding module created for them on the client side
# This has information about supported payload types, but that information is in a separate join table
creation_time = p.DateTimeField(default=datetime.datetime.utcnow, null=False)
# indicates if the c2 profile is running
running = p.BooleanField(null=False, default=False)
last_heartbeat = p.DateTimeField(null=False, default=datetime.datetime.utcnow)
# indicates if the c2 profile container is up and able to receive tasking
container_running = p.BooleanField(null=False, default=False)
author = p.TextField(null=False, default="")
# identify if this is a p2p protocol or not, we treat those a bit differently
is_p2p = p.BooleanField(null=False, default=False)
# server_routed means the server specifies the specific route for sending messages
is_server_routed = p.BooleanField(null=False, default=False)
# indicate if mythic should do the encryption/decryption for the profile
# or if the profile will handle it
mythic_encrypts = p.BooleanField(null=False, default=True)
deleted = p.BooleanField(null=False, default=False)
class Meta:
database = mythic_db
def to_json(self):
r = {
"id": getattr(self, "id"),
"name": self.name,
"description": self.description,
"creation_time": self.creation_time.strftime("%m/%d/%Y %H:%M:%S"),
"running": self.running,
"last_heartbeat": self.last_heartbeat.strftime("%m/%d/%Y %H:%M:%S"),
"container_running": self.container_running,
"author": self.author,
"is_p2p": self.is_p2p,
"is_server_routed": self.is_server_routed,
"mythic_encrypts": self.mythic_encrypts,
"deleted": self.deleted
}
return r
def __str__(self):
return json.dumps(self.to_json())
# this is a join table between the many to many relationship between payload_types and c2profiles
# ex: apfell PayloadType instance should be tied to default/twitter/etc c2profiles
# and default c2profile should be tied to apfell, apfell-swift, etc
class PayloadTypeC2Profile(p.Model):
payload_type = p.ForeignKeyField(PayloadType)
c2_profile = p.ForeignKeyField(C2Profile)
class Meta:
indexes = ((("payload_type", "c2_profile"), True),)
database = mythic_db
def to_json(self):
r = {
"id": getattr(self, "id"),
"payload_type": self.payload_type.ptype,
"payload_type_id": self.payload_type.id,
"c2_profile": self.c2_profile.name,
"c2_profile_id": self.c2_profile.id,
"c2_profile_description": self.c2_profile.description
}
return r
def __str__(self):
return json.dumps(self.to_json())
# this is an instance of a payload
class Payload(p.Model):
# this is actually a sha256 from other information about the payload
uuid = p.TextField(unique=True, null=False)
# tag a payload with information like spearphish, custom bypass, lat mov, etc (indicates "how")
tag = p.TextField(null=True)
# creator of the payload, cannot be null! must be attributed to somebody (indicates "who")
operator = p.ForeignKeyField(Operator, null=False)
creation_time = p.DateTimeField(default=datetime.datetime.utcnow, null=False)
# this is fine because this is an instance of a payload, so it's tied to one PayloadType
payload_type = p.ForeignKeyField(PayloadType, null=False)
# this will signify if a current callback made / spawned a new callback that's checking in
# this helps track how we're getting callbacks (which payloads/tags/parents/operators)
pcallback = p.DeferredForeignKey("Callback", null=True)
# c2_profile = p.ForeignKeyField(C2Profile, null=False) # identify which C2 profile is being used
operation = p.ForeignKeyField(Operation, null=False)
wrapped_payload = p.ForeignKeyField("self", null=True)
deleted = p.BooleanField(null=False, default=False)
# if the payload is in the build process: building, success, error
build_container = p.TextField(null=False)
build_phase = p.TextField(null=False, default="building")
# capture error or any other info
build_message = p.TextField(null=False, default="")
# if there is a slack webhook for the operation, decide if this payload should generate an alert or not
callback_alert = p.BooleanField(null=False, default=True)
# when dealing with auto-generated payloads for lateral movement or spawning new callbacks
auto_generated = p.BooleanField(null=False, default=False)
task = p.DeferredForeignKey("Task", null=True)
file_id = p.DeferredForeignKey("FileMeta", null=True)
class Meta:
database = mythic_db
def to_json(self):
r = {
"id": getattr(self, "id"),
"uuid": self.uuid,
"tag": self.tag,
"operator": self.operator.username,
"creation_time": self.creation_time.strftime("%m/%d/%Y %H:%M:%S"),
"payload_type": self.payload_type.ptype,
"pcallback": self.pcallback.id if self.pcallback is not None else None,
"operation": self.operation.name,
"wrapped_payload": self.wrapped_payload.uuid if self.wrapped_payload is not None else None,
"deleted": self.deleted,
"build_container": self.build_container,
"build_phase": self.build_phase,
"build_message": self.build_message,
"callback_alert": self.callback_alert,
"auto_generated": self.auto_generated,
"task": self.task.to_json() if self.task is not None else None,
"file_id": self.file_id.to_json() if self.file_id is not None else None
}
return r
def __str__(self):
return json.dumps(self.to_json())
# this is an instance of a payload
class PayloadOnHost(p.Model):
host = p.TextField(null=False)
payload = p.ForeignKeyField(Payload, null=False)
deleted = p.BooleanField(default=False, null=False)
operation = p.ForeignKeyField(Operation, null=False)
timestamp = p.DateTimeField(default=datetime.datetime.utcnow, null=False)
task = p.DeferredForeignKey("Task", null=True)
class Meta:
database = mythic_db
def to_json(self):
r = {
"id": getattr(self, "id"),
"host": self.host,
"payload": self.payload.to_json(),
"deleted": self.deleted,
"operation": self.operation.name,
"timestamp": self.timestamp.strftime("%m/%d/%Y %H:%M:%S"),
"task": self.task.to_json() if self.task is not None else None
}
return r
def __str__(self):
return json.dumps(self.to_json())
class BuildParameterInstance(p.Model):
# this is the instance of actual values used to create a specific payload instance
build_parameter = p.ForeignKeyField(BuildParameter, null=False)
payload = p.ForeignKeyField(Payload, null=False)
parameter = p.TextField(null=True) # what the user picked
class Meta:
database = mythic_db
def to_json(self):
r = {
"id": getattr(self, "id"),
"build_parameter": self.build_parameter.to_json(),
"payload": self.payload.uuid,
"parameter": self.parameter
}
return r
def __str__(self):
return json.dumps(self.to_json())
# a specific payload instance has multiple commands associated with it, so we need to track that
# commands can be loaded/unloaded at run time, so we need to track creation_time
class PayloadCommand(p.Model):
payload = p.ForeignKeyField(Payload, null=False)
# this is how we can tell what commands are in a payload by default and if they might be out of date
command = p.ForeignKeyField(Command, null=False)
creation_time = p.DateTimeField(default=datetime.datetime.utcnow, null=False)
# version of a command when the payload is created might differ from later in time, so save it off
version = p.IntegerField(null=False)
class Meta:
indexes = ((("payload", "command"), True),)
database = mythic_db
def to_json(self):
r = {
"id": getattr(self, "id"),
"payload": self.payload.uuid,
"command": self.command.cmd,
"creation_time": self.creation_time.strftime("%m/%d/%Y %H:%M:%S"),
"version": self.version
}
return r
def __str__(self):
return json.dumps(self.to_json())
# C2 profiles will have various parameters that need to be stamped in at payload creation time
# this will specify the name and value to look for
class C2ProfileParameters(p.Model):
c2_profile = p.ForeignKeyField(C2Profile, null=False)
# what the parameter is called. ex: Callback address
description = p.TextField(null=False)
name = p.TextField(null=False) # what the stamping should look for. ex: XXXXX
# Hint for the user when setting the parameters
default_value = p.TextField(null=False, default="")
randomize = p.BooleanField(null=False, default=False)
format_string = p.TextField(null=False, default="")
parameter_type = p.TextField(null=False, default="String")
required = p.BooleanField(null=False, default=True)
verifier_regex = p.TextField(null=False, default="")
class Meta:
indexes = ((("c2_profile", "name"), True),)
database = mythic_db
def to_json(self):
r = {
"id": getattr(self, "id"),
"c2_profile": self.c2_profile.name,
"description": self.description,
"name": self.name,
"default_value": self.default_value,
"randomize": self.randomize,
"format_string": self.format_string,
"parameter_type": self.parameter_type,
"required": self.required,
"verifier_regex": self.verifier_regex
}
return r
def __str__(self):
return json.dumps(self.to_json())
class Callback(p.Model):
agent_callback_id = p.TextField(unique=True, null=False, default=gen_uuid)
init_callback = p.DateTimeField(default=datetime.datetime.utcnow, null=False)
last_checkin = p.DateTimeField(default=datetime.datetime.utcnow, null=False)
user = p.CharField(null=False)
host = p.CharField(null=False)
pid = p.IntegerField(null=False)
ip = p.CharField(max_length=100, null=False)
external_ip = p.TextField(null=True)
description = p.TextField(null=True)
operator = p.ForeignKeyField(Operator, null=False)
active = p.BooleanField(default=True, null=False)
# keep track of the parent callback from this one
pcallback = p.DeferredForeignKey("Callback", null=True)
# what payload is associated with this callback
registered_payload = p.ForeignKeyField(Payload, null=False)
integrity_level = p.IntegerField(null=True, default=2)
# an operator can lock a callback to themselves so that other users cannot issue commands as well
locked = p.BooleanField(default=False)
locked_operator = p.ForeignKeyField(
Operator, null=True, backref="locked_operator"
)
operation = p.ForeignKeyField(Operation, null=False)
# the following information comes from the c2 profile if it wants to provide some form of encryption
# the kind of encryption on this callback (aes, xor, rc4, etc)
encryption_type = p.CharField(null=True)
# base64 of the key to use to decrypt traffic
decryption_key = p.TextField(null=True)
# base64 of the key to use to encrypt traffic
encryption_key = p.TextField(null=True)
os = p.TextField(null=True)
architecture = p.TextField(null=True)
domain = p.TextField(null=True)
# associated socks information
port = p.IntegerField(null=True)
socks_task = p.DeferredForeignKey("Task", null=True)
# if you need to define extra context for a callback, like | |
<filename>model.py
# ******************************************************************************
# Copyright 2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
import numpy as np
import math
import torch
import torch.nn as nn
from reparameterized_layers import DynamicLinear,DynamicConv2d
from parameterized_tensors import SparseTensor,TiedTensor
import torch.utils.model_zoo as model_zoo
import torch.nn.functional as F
class DynamicNetworkBase(nn.Module):
def __init__(self):
super(DynamicNetworkBase, self).__init__()
self.split_state = False
def prune(self,prune_fraction_fc,prune_fraction_conv,prune_fraction_fc_special = None):
for x in [x for x in self.modules() if isinstance(x,SparseTensor)]:
if x.conv_tensor:
x.prune_small_connections(prune_fraction_conv)
else:
if x.s_tensor.size(0) == 10 and x.s_tensor.size(1) == 100:
x.prune_small_connections(prune_fraction_fc_special)
else:
x.prune_small_connections(prune_fraction_fc)
def get_model_size(self):
def get_tensors_and_test(tensor_type):
relevant_tensors = [x for x in self.modules() if isinstance(x,tensor_type)]
relevant_params = [p for x in relevant_tensors for p in x.parameters()]
is_relevant_param = lambda x : [y for y in relevant_params if x is y]
return relevant_tensors,is_relevant_param
sparse_tensors,is_sparse_param = get_tensors_and_test(SparseTensor)
tied_tensors,is_tied_param = get_tensors_and_test(TiedTensor)
sparse_params = [p for x in sparse_tensors for p in x.parameters()]
is_sparse_param = lambda x : [y for y in sparse_params if x is y]
sparse_size = sum([x.get_sparsity()[0].item() for x in sparse_tensors])
tied_size = 0
for k in tied_tensors:
unique_reps = k.weight_alloc.cpu().unique()
subtensor_size = np.prod(list(k.bank.size())[1:])
tied_size += unique_reps.size(0) * subtensor_size
fixed_size = sum([p.data.nelement() for p in self.parameters() if (not is_sparse_param(p) and not is_tied_param(p))])
model_size = {'sparse': sparse_size,'tied' : tied_size, 'fixed':fixed_size,'learnable':fixed_size + sparse_size + tied_size}
return model_size
class mnist_mlp(DynamicNetworkBase):
def __init__(self, initial_sparsity = 0.98,sparse = True,no_batch_norm = False):
super(mnist_mlp, self).__init__()
self.fc1 = DynamicLinear(784, 300, initial_sparsity,bias = no_batch_norm,sparse = sparse)
self.fc_int = DynamicLinear(300, 100, initial_sparsity,bias = no_batch_norm,sparse = sparse)
#self.fc2 = DynamicLinear(100, 10, min(0.5,initial_sparsity),bias = False,sparse = sparse)
self.fc2 = DynamicLinear(100, 10, initial_sparsity,bias = no_batch_norm,sparse = sparse)
if no_batch_norm:
self.bn1 = lambda x : x
self.bn2 = lambda x : x
self.bn3 = lambda x : x
else:
self.bn1 = nn.BatchNorm1d(300)
self.bn2 = nn.BatchNorm1d(100)
self.bn3 = nn.BatchNorm1d(10)
def forward(self, x):
x = F.relu(self.bn1(self.fc1(x.view(-1, 784))))
x = F.relu(self.bn2(self.fc_int(x)))
y = self.bn3(self.fc2(x))
return y
#########Definition of wide resnets
class BasicBlock(nn.Module):
def __init__(self, in_planes, out_planes, stride, dropRate=0.0,widen_factor = 10,initial_sparsity = 0.5,sub_kernel_granularity = False,sparse = True):
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu1 = nn.ReLU(inplace=True)
self.conv1 = DynamicConv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False,initial_sparsity = initial_sparsity,sub_kernel_granularity = sub_kernel_granularity,sparse = sparse)
self.bn2 = nn.BatchNorm2d(out_planes)
self.relu2 = nn.ReLU(inplace=True)
self.conv2 = DynamicConv2d(out_planes, out_planes, kernel_size=3, stride=1,
padding=1, bias=False,initial_sparsity = initial_sparsity,sub_kernel_granularity = sub_kernel_granularity,sparse = sparse)
self.droprate = dropRate
self.equalInOut = (in_planes == out_planes)
self.convShortcut = (not self.equalInOut) and nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,
padding=0, bias=False) or None
def forward(self, x):
if not self.equalInOut:
x = self.relu1(self.bn1(x))
else:
out = self.relu1(self.bn1(x))
out = self.relu2(self.bn2(self.conv1(out if self.equalInOut else x)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, training=self.training)
out = self.conv2(out)
return torch.add(x if self.equalInOut else self.convShortcut(x), out)
class NetworkBlock(nn.Module):
def __init__(self, nb_layers, in_planes, out_planes, block, stride, dropRate=0.0,widen_factor = 10,initial_sparsity = 0.5,sub_kernel_granularity = False,sparse = True):
super(NetworkBlock, self).__init__()
self.layer = self._make_layer(block, in_planes, out_planes, nb_layers, stride, dropRate,widen_factor,initial_sparsity = initial_sparsity,
sub_kernel_granularity = sub_kernel_granularity,sparse = sparse)
def _make_layer(self, block, in_planes, out_planes, nb_layers, stride, dropRate,widen_factor,initial_sparsity = 0.5,sub_kernel_granularity = False,sparse = True):
layers = []
for i in range(int(nb_layers)):
layers.append(block(i == 0 and in_planes or out_planes, out_planes, i == 0 and stride or 1, dropRate,widen_factor = widen_factor,
initial_sparsity = initial_sparsity,sub_kernel_granularity = sub_kernel_granularity,sparse = sparse))
return nn.Sequential(*layers)
def forward(self, x):
return self.layer(x)
class cifar10_WideResNet(DynamicNetworkBase):
def __init__(self, depth, num_classes=10, widen_factor=1, dropRate=0.0,initial_sparsity_conv = 0.5,initial_sparsity_fc = 0.95,sub_kernel_granularity = 4,sparse = True):
super(cifar10_WideResNet, self).__init__()
nChannels = np.round(np.array([16, 16*widen_factor, 32*widen_factor, 64*widen_factor])).astype('int32')
assert((depth - 4) % 6 == 0)
n = (depth - 4) / 6
block = BasicBlock
# 1st conv before any network block
self.conv1 = nn.Conv2d(3, nChannels[0], kernel_size=3, stride=1,
padding=1, bias=False)
# 1st block
self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate,widen_factor = widen_factor,
initial_sparsity = initial_sparsity_conv,sub_kernel_granularity = sub_kernel_granularity,sparse = sparse)
# 2nd block
self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2, dropRate,widen_factor = widen_factor,
initial_sparsity = initial_sparsity_conv,sub_kernel_granularity = sub_kernel_granularity,sparse = sparse)
# 3rd block
self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2, dropRate,widen_factor = widen_factor,
initial_sparsity = initial_sparsity_conv,sub_kernel_granularity = sub_kernel_granularity,sparse = sparse)
# global average pooling and classifier
self.bn1 = nn.BatchNorm2d(nChannels[3])
self.relu = nn.ReLU(inplace=True)
self.fc = nn.Linear(nChannels[3],num_classes) #DynamicLinear(nChannels[3], num_classes,initial_sparsity = initial_sparsity_fc,sparse = sparse)
self.nChannels = nChannels[3]
self.split_state = False
self.reset_parameters()
def reset_parameters(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, DynamicConv2d):
n = m.kernel_size * m.kernel_size * m.n_output_maps
if m.sparse:
m.d_tensor.s_tensor.data.normal_(0, math.sqrt(2. / n))
else:
m.d_tensor.bank.data.normal_(0, math.sqrt(2. / n))
if isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, x):
out = self.conv1(x)
out = self.block1(out)
out = self.block2(out)
out = self.block3(out)
out = self.relu(self.bn1(out))
out = F.avg_pool2d(out, 8)
out = out.view(-1, self.nChannels)
return self.fc(out)
###Resnet Definition
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None,widen_factor = 1,vanilla_conv1 = True,vanilla_conv3 = True,initial_sparsity = 0.5,
sub_kernel_granularity = 4,sparse = True):
super(Bottleneck, self).__init__()
adjusted_planes = planes#np.round(widen_factor * planes).astype('int32')
if vanilla_conv1:
self.conv1 = nn.Conv2d(inplanes, adjusted_planes, kernel_size=1, bias=False)
self.conv3 = nn.Conv2d(adjusted_planes, planes * 4, kernel_size=1, bias=False)
else:
self.conv1 = DynamicConv2d(inplanes, adjusted_planes, kernel_size=1, bias=False , initial_sparsity = initial_sparsity,
sub_kernel_granularity = sub_kernel_granularity,sparse = sparse )
self.conv3 = DynamicConv2d(adjusted_planes, planes * 4, kernel_size=1, bias=False , initial_sparsity = initial_sparsity,
sub_kernel_granularity = sub_kernel_granularity,sparse = sparse)
if vanilla_conv3:
self.conv2 = nn.Conv2d(adjsuted_planes, adjusted_planes, kernel_size=3, stride=stride,padding=1, bias=False)
else:
self.conv2 = DynamicConv2d(adjusted_planes, adjusted_planes, kernel_size=3, stride=stride,
padding=1, bias=False,initial_sparsity = initial_sparsity, sub_kernel_granularity = sub_kernel_granularity,sparse = sparse)
self.bn1 = nn.BatchNorm2d(adjusted_planes)
self.bn2 = nn.BatchNorm2d(adjusted_planes)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(DynamicNetworkBase):
def __init__(self, block, layers, num_classes=1000,widen_factor = 1,vanilla_downsample = True,vanilla_conv1 = True,vanilla_conv3 = True,
initial_sparsity_conv = 0.5,initial_sparsity_fc = 0.95,sub_kernel_granularity = 4,sparse = True):
self.inplanes = np.round(64 * widen_factor).astype('int32')
super(ResNet, self).__init__()
self.widen_factor = widen_factor
self.vanilla_conv1 = vanilla_conv1
self.vanilla_conv3 = vanilla_conv3
self.vanilla_downsample = vanilla_downsample
self.initial_sparsity_conv = initial_sparsity_conv
self.initial_sparsity_fc = initial_sparsity_fc
self.sub_kernel_granularity = sub_kernel_granularity
self.sparse = sparse
self.conv1 = nn.Conv2d(3, np.round(64 * widen_factor).astype('int32'), kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(np.round(64 * widen_factor).astype('int32'))
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, np.round(64 * widen_factor).astype('int32'), layers[0])
self.layer2 = self._make_layer(block, np.round(64 * widen_factor).astype('int32')*2, layers[1], stride=2)
self.layer3 = self._make_layer(block, np.round(64 * widen_factor).astype('int32')*4, layers[2], stride=2)
self.layer4 = self._make_layer(block, np.round(64 * widen_factor).astype('int32')*8, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7, stride=1)
self.fc = DynamicLinear(np.round(64 * widen_factor).astype('int32') * block.expansion * 8, num_classes,initial_sparsity = self.initial_sparsity_fc,sparse = sparse)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, DynamicConv2d):
n = m.kernel_size * m.kernel_size * m.n_output_maps
if m.sparse:
m.d_tensor.s_tensor.data.normal_(0, math.sqrt(2. / n))
else:
m.d_tensor.bank.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False) if self.vanilla_downsample else \
DynamicConv2d(self.inplanes, planes * block.expansion,kernel_size=1,stride=stride, bias=False,
initial_sparsity = self.initial_sparsity_conv,sub_kernel_granularity = self.sub_kernel_granularity,sparse = self.sparse),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample,widen_factor = self.widen_factor,
vanilla_conv1 = self.vanilla_conv1,vanilla_conv3 = self.vanilla_conv3,initial_sparsity = self.initial_sparsity_conv,
sub_kernel_granularity = self.sub_kernel_granularity,sparse = self.sparse))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes,widen_factor = self.widen_factor,
vanilla_conv1 = self.vanilla_conv1,vanilla_conv3 = self.vanilla_conv3,initial_sparsity = self.initial_sparsity_conv,
sub_kernel_granularity = self.sub_kernel_granularity,sparse = self.sparse))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
| |
<gh_stars>0
import numpy as np
import argparse
import os
import cv2
import tensorflow as tf
from models.fcn8_vgg import FCN8VGG
from collections import namedtuple
from utils import *
#--------------------------------------------------------------------------------
# Definitions
#--------------------------------------------------------------------------------
# a label and all meta information
Label = namedtuple( 'Label' , [
'name' , # The identifier of this label, e.g. 'car', 'person', ... .
# We use them to uniquely name a class
'id' , # An integer ID that is associated with this label.
# The IDs are used to represent the label in ground truth images
# An ID of -1 means that this label does not have an ID and thus
# is ignored when creating ground truth images (e.g. license plate).
# Do not modify these IDs, since exactly these IDs are expected by the
# evaluation server.
'trainId' , # Feel free to modify these IDs as suitable for your method. Then create
# ground truth images with train IDs, using the tools provided in the
# 'preparation' folder. However, make sure to validate or submit results
# to our evaluation server using the regular IDs above!
# For trainIds, multiple labels might have the same ID. Then, these labels
# are mapped to the same class in the ground truth images. For the inverse
# mapping, we use the label that is defined first in the list below.
# For example, mapping all void-type classes to the same ID in training,
# might make sense for some approaches.
# Max value is 255!
'category' , # The name of the category that this label belongs to
'categoryId' , # The ID of this category. Used to create ground truth images
# on category level.
'hasInstances', # Whether this label distinguishes between single instances or not
'ignoreInEval', # Whether pixels having this class as ground truth label are ignored
# during evaluations or not
'color' , # The color of this label
] )
#--------------------------------------------------------------------------------
# A list of all labels
#--------------------------------------------------------------------------------
# Please adapt the train IDs as appropriate for you approach.
# Note that you might want to ignore labels with ID 255 during training.
# Further note that the current train IDs are only a suggestion. You can use whatever you like.
# Make sure to provide your results using the original IDs and not the training IDs.
# Note that many IDs are ignored in evaluation and thus you never need to predict these!
labels = [
# name id trainId category catId hasInstances ignoreInEval color
Label( 'unlabeled' , 0 , 255 , 'void' , 0 , False , True , ( 0, 0, 0) ),
Label( 'ego vehicle' , 1 , 255 , 'void' , 0 , False , True , ( 0, 0, 0) ),
Label( 'rectification border' , 2 , 255 , 'void' , 0 , False , True , ( 0, 0, 0) ),
Label( 'out of roi' , 3 , 255 , 'void' , 0 , False , True , ( 0, 0, 0) ),
Label( 'static' , 4 , 255 , 'void' , 0 , False , True , ( 0, 0, 0) ),
Label( 'dynamic' , 5 , 255 , 'void' , 0 , False , True , (111, 74, 0) ),
Label( 'ground' , 6 , 255 , 'void' , 0 , False , True , ( 81, 0, 81) ),
Label( 'road' , 7 , 0 , 'flat' , 1 , False , False , (128, 64,128) ),
Label( 'sidewalk' , 8 , 1 , 'flat' , 1 , False , False , (244, 35,232) ),
Label( 'parking' , 9 , 255 , 'flat' , 1 , False , True , (250,170,160) ),
Label( 'rail track' , 10 , 255 , 'flat' , 1 , False , True , (230,150,140) ),
Label( 'building' , 11 , 2 , 'construction' , 2 , False , False , ( 70, 70, 70) ),
Label( 'wall' , 12 , 3 , 'construction' , 2 , False , False , (102,102,156) ),
Label( 'fence' , 13 , 4 , 'construction' , 2 , False , False , (2550,153,153) ),
Label( 'guard rail' , 14 , 255 , 'construction' , 2 , False , True , (180,165,180) ),
Label( 'bridge' , 15 , 255 , 'construction' , 2 , False , True , (150,100,100) ),
Label( 'tunnel' , 16 , 255 , 'construction' , 2 , False , True , (150,120, 90) ),
Label( 'pole' , 17 , 5 , 'object' , 3 , False , False , (153,153,153) ),
Label( 'polegroup' , 18 , 255 , 'object' , 3 , False , True , (153,153,153) ),
Label( 'traffic light' , 19 , 6 , 'object' , 3 , False , False , (250,170, 30) ),
Label( 'traffic sign' , 20 , 7 , 'object' , 3 , False , False , (220,220, 0) ),
Label( 'vegetation' , 21 , 8 , 'nature' , 4 , False , False , (107,142, 35) ),
Label( 'terrain' , 22 , 9 , 'nature' , 4 , False , False , (152,251,152) ),
Label( 'sky' , 23 , 10 , 'sky' , 5 , False , False , ( 70,130,180) ),
Label( 'person' , 24 , 11 , 'human' , 6 , True , False , (220, 20, 60) ),
Label( 'rider' , 25 , 12 , 'human' , 6 , True , False , (255, 0, 0) ),
Label( 'car' , 26 , 13 , 'vehicle' , 7 , True , False , ( 0, 0,142) ),
Label( 'truck' , 27 , 14 , 'vehicle' , 7 , True , False , ( 0, 0, 70) ),
Label( 'bus' , 28 , 15 , 'vehicle' , 7 , True , False , ( 0, 60,100) ),
Label( 'caravan' , 29 , 255 , 'vehicle' , 7 , True , True , ( 0, 0, 90) ),
Label( 'trailer' , 30 , 255 , 'vehicle' , 7 , True , True , ( 0, 0,110) ),
Label( 'train' , 31 , 16 , 'vehicle' , 7 , True , False , ( 0, 80,100) ),
Label( 'motorcycle' , 32 , 17 , 'vehicle' , 7 , True , False , ( 0, 0,230) ),
Label( 'bicycle' , 33 , 18 , 'vehicle' , 7 , True , False , (1255, 11, 32) ),
]
trainId2name = { label.trainId : label.name for label in labels }
ignore_label = 255
parser = argparse.ArgumentParser(description='Evaluation on the cityscapes validation set')
parser.add_argument('--checkpoint_dir', type=str, help='folder containing checkpoints', required=True)
parser.add_argument('--gt_file', type=str, help='path to filelist.txt', required=True)
parser.add_argument('--num_classes', type=int, default= 19, help='num classes')
parser.add_argument('--output_path', type=str, default='validation.txt')
args = parser.parse_args()
### INPUTS ###
image_placeholder = tf.placeholder(tf.float32)
sem_gt_placeholder = tf.placeholder(tf.int32)
input_images = tf.cast(tf.expand_dims(image_placeholder, axis=0),tf.float32)
sem_gt = tf.expand_dims(sem_gt_placeholder, axis=0)
with tf.name_scope("content_vgg"):
vgg_fcn = FCN8VGG()
vgg_fcn.build(input_images,train=False, debug=False, num_classes=args.num_classes)
sem_pred = vgg_fcn.pred_up
### MIOU ###
weightsValue = tf.to_float(tf.not_equal(sem_gt,ignore_label))
sem_gt = tf.where(tf.equal(sem_gt, ignore_label), tf.zeros_like(sem_gt), sem_gt)
sem_pred = tf.where(tf.equal(sem_pred, ignore_label), tf.zeros_like(sem_pred), sem_pred)
miou, update_op = tf.metrics.mean_iou(labels=tf.reshape(sem_gt,[-1]),predictions=tf.reshape(sem_pred,[-1]), num_classes=args.num_classes, weights=tf.reshape(weightsValue,[-1]))
summary_miou = tf.summary.scalar("miou",miou)
print('Finished building Network.')
if not os.path.exists(os.path.join(args.checkpoint_dir,"val")):
os.mkdir(os.path.join(args.checkpoint_dir,"val"))
writer = tf.summary.FileWriter(os.path.join(args.checkpoint_dir,"val"))
init = [tf.global_variables_initializer(),tf.local_variables_initializer()]
list_checkpoints = {}
while True:
print("Waiting for new checkpoint", end='\r')
best=0
for f in sorted(os.listdir(args.checkpoint_dir)):
if "fcn8s-" in f:
output = open(args.output_path,"a")
basename=f.split(".")[0]
if basename not in list_checkpoints.keys():
list_checkpoints[basename]=os.path.join(args.checkpoint_dir, basename)
with tf.Session() as sess:
sess.run(init)
step = load(sess,list_checkpoints[basename])
print("Loading last checkpoint")
if step >= 0:
print("Restored step: ", step)
print(" [*] Load SUCCESS")
else:
step=0
print(" [!] Load failed...")
coord = tf.train.Coordinator()
tf.train.start_queue_runners()
print('Thread running')
print('Running the Network')
lenght=len(open(args.gt_file).readlines())
with open(args.gt_file) as filelist:
for idx,line in enumerate(filelist):
print("Image evaluated: ",idx + 1,"/",lenght,end='\r')
image = cv2.imread(line.split(";")[0])
semgt = cv2.imread(line.split(";")[-1].strip(),cv2.IMREAD_GRAYSCALE)
_=sess.run(update_op,feed_dict={image_placeholder : image , sem_gt_placeholder : semgt})
miou_value =sess.run(miou,feed_dict={image_placeholder : image , sem_gt_placeholder : semgt})
sum_str = sess.run(summary_miou)
writer.add_summary(sum_str,step)
if miou_value > best:
output.write("!!!!!!!!NEW BEST!!!!!!!!\n")
best = miou_value
output.write("########" + str(step) + "########\n")
mean=0
confusion_matrix=tf.get_default_graph().get_tensor_by_name("mean_iou/total_confusion_matrix:0").eval()
for cl in range(confusion_matrix.shape[0]):
tp_fn = np.sum(confusion_matrix[cl,:])
tp_fp = np.sum(confusion_matrix[:,cl])
tp = confusion_matrix[cl,cl]
IoU_cl = tp / (tp_fn + tp_fp - tp)
output.write(trainId2name[cl] + ": {:.4f}\n".format(IoU_cl))
output.write("#######################\n")
output.write("mIoU: " + str(miou_value) +"\n")
| |
import asyncio
import inspect
import re
from copy import copy, deepcopy
from functools import partial
from typing import (
Any,
Awaitable,
Callable,
Dict,
Generator,
Iterable,
List,
Optional,
Set,
Tuple,
Type,
TypeVar,
Union,
)
from pypika import Order, Query, Table
from pypika.terms import Term
from tortoise.backends.base.client import BaseDBAsyncClient
from tortoise.exceptions import (
ConfigurationError,
DoesNotExist,
IncompleteInstanceError,
IntegrityError,
OperationalError,
ParamsError,
TransactionManagementError,
)
from tortoise.fields.base import Field
from tortoise.fields.data import IntField
from tortoise.fields.relational import (
BackwardFKRelation,
BackwardOneToOneRelation,
ForeignKeyFieldInstance,
ManyToManyFieldInstance,
ManyToManyRelation,
NoneAwaitable,
OneToOneFieldInstance,
ReverseRelation,
)
from tortoise.filters import get_filters_for_field
from tortoise.functions import Function
from tortoise.indexes import Index
from tortoise.manager import Manager
from tortoise.queryset import BulkUpdateQuery, ExistsQuery, Q, QuerySet, QuerySetSingle, RawSQLQuery
from tortoise.router import router
from tortoise.signals import Signals
from tortoise.transactions import current_transaction_map, in_transaction
MODEL = TypeVar("MODEL", bound="Model")
EMPTY = object()
# TODO: Define Filter type object. Possibly tuple?
def get_together(meta: "Model.Meta", together: str) -> Tuple[Tuple[str, ...], ...]:
_together = getattr(meta, together, ())
if _together and isinstance(_together, (list, tuple)) and isinstance(_together[0], str):
_together = (_together,)
# return without validation, validation will be done further in the code
return _together
def prepare_default_ordering(meta: "Model.Meta") -> Tuple[Tuple[str, Order], ...]:
ordering_list = getattr(meta, "ordering", ())
parsed_ordering = tuple(
QuerySet._resolve_ordering_string(ordering) for ordering in ordering_list
)
return parsed_ordering
def _fk_setter(
self: "Model",
value: "Optional[Model]",
_key: str,
relation_field: str,
to_field: str,
) -> None:
setattr(self, relation_field, getattr(value, to_field) if value else None)
setattr(self, _key, value)
def _fk_getter(
self: "Model", _key: str, ftype: "Type[Model]", relation_field: str, to_field: str
) -> Awaitable:
try:
return getattr(self, _key)
except AttributeError:
value = getattr(self, relation_field)
if value:
return ftype.filter(**{to_field: value}).first()
return NoneAwaitable
def _rfk_getter(
self: "Model", _key: str, ftype: "Type[Model]", frelfield: str, from_field: str
) -> ReverseRelation:
val = getattr(self, _key, None)
if val is None:
val = ReverseRelation(ftype, frelfield, self, from_field)
setattr(self, _key, val)
return val
def _ro2o_getter(
self: "Model", _key: str, ftype: "Type[Model]", frelfield: str, from_field: str
) -> "QuerySetSingle[Optional[Model]]":
if hasattr(self, _key):
return getattr(self, _key)
val = ftype.filter(**{frelfield: getattr(self, from_field)}).first()
setattr(self, _key, val)
return val
def _m2m_getter(
self: "Model", _key: str, field_object: ManyToManyFieldInstance
) -> ManyToManyRelation:
val = getattr(self, _key, None)
if val is None:
val = ManyToManyRelation(self, field_object)
setattr(self, _key, val)
return val
def _get_comments(cls: "Type[Model]") -> Dict[str, str]:
"""
Get comments exactly before attributes
It can be multiline comment. The placeholder "{model}" will be replaced with the name of the
model class. We require that the comments are in #: (with a colon) format, so you can
differentiate between private and public comments.
:param cls: The class we need to extract comments from its source.
:return: The dictionary of comments by field name
"""
try:
source = inspect.getsource(cls)
except (TypeError, OSError): # pragma: nocoverage
return {}
comments = {}
for cls_ in reversed(cls.__mro__):
if cls_ is object:
continue
matches = re.findall(r"((?:(?!\n|^)[^\w\n]*#:.*?\n)+?)[^\w\n]*(\w+)\s*[:=]", source)
for match in matches:
field_name = match[1]
# Extract text
comment = re.sub(r"(^\s*#:\s*|\s*$)", "", match[0], flags=re.MULTILINE)
# Class name template
comments[field_name] = comment.replace("{model}", cls_.__name__)
return comments
class MetaInfo:
__slots__ = (
"abstract",
"db_table",
"app",
"fields",
"db_fields",
"m2m_fields",
"o2o_fields",
"backward_o2o_fields",
"fk_fields",
"backward_fk_fields",
"fetch_fields",
"fields_db_projection",
"_inited",
"fields_db_projection_reverse",
"filters",
"fields_map",
"default_connection",
"basequery",
"basequery_all_fields",
"basetable",
"_filters",
"unique_together",
"manager",
"indexes",
"pk_attr",
"generated_db_fields",
"_model",
"table_description",
"pk",
"db_pk_column",
"db_native_fields",
"db_default_fields",
"db_complex_fields",
"_default_ordering",
"_ordering_validated",
)
def __init__(self, meta: "Model.Meta") -> None:
self.abstract: bool = getattr(meta, "abstract", False)
self.manager: Manager = getattr(meta, "manager", Manager())
self.db_table: str = getattr(meta, "table", "")
self.app: Optional[str] = getattr(meta, "app", None)
self.unique_together: Tuple[Tuple[str, ...], ...] = get_together(meta, "unique_together")
self.indexes: Tuple[Tuple[str, ...], ...] = get_together(meta, "indexes")
self._default_ordering: Tuple[Tuple[str, Order], ...] = prepare_default_ordering(meta)
self._ordering_validated: bool = False
self.fields: Set[str] = set()
self.db_fields: Set[str] = set()
self.m2m_fields: Set[str] = set()
self.fk_fields: Set[str] = set()
self.o2o_fields: Set[str] = set()
self.backward_fk_fields: Set[str] = set()
self.backward_o2o_fields: Set[str] = set()
self.fetch_fields: Set[str] = set()
self.fields_db_projection: Dict[str, str] = {}
self.fields_db_projection_reverse: Dict[str, str] = {}
self._filters: Dict[str, Dict[str, dict]] = {}
self.filters: Dict[str, dict] = {}
self.fields_map: Dict[str, Field] = {}
self._inited: bool = False
self.default_connection: Optional[str] = None
self.basequery: Query = Query()
self.basequery_all_fields: Query = Query()
self.basetable: Table = Table("")
self.pk_attr: str = getattr(meta, "pk_attr", "")
self.generated_db_fields: Tuple[str] = None # type: ignore
self._model: Type["Model"] = None # type: ignore
self.table_description: str = getattr(meta, "table_description", "")
self.pk: Field = None # type: ignore
self.db_pk_column: str = ""
self.db_native_fields: List[Tuple[str, str, Field]] = []
self.db_default_fields: List[Tuple[str, str, Field]] = []
self.db_complex_fields: List[Tuple[str, str, Field]] = []
@property
def full_name(self) -> str:
return f"{self.app}.{self._model.__name__}"
def add_field(self, name: str, value: Field) -> None:
if name in self.fields_map:
raise ConfigurationError(f"Field {name} already present in meta")
value.model = self._model
self.fields_map[name] = value
value.model_field_name = name
if value.has_db_field:
self.fields_db_projection[name] = value.source_field or name
if isinstance(value, ManyToManyFieldInstance):
self.m2m_fields.add(name)
elif isinstance(value, BackwardOneToOneRelation):
self.backward_o2o_fields.add(name)
elif isinstance(value, BackwardFKRelation):
self.backward_fk_fields.add(name)
field_filters = get_filters_for_field(
field_name=name, field=value, source_field=value.source_field or name
)
self._filters.update(field_filters)
self.finalise_fields()
@property
def db(self) -> BaseDBAsyncClient:
try:
return current_transaction_map[self.default_connection].get()
except KeyError:
raise ConfigurationError("No DB associated to model")
@property
def ordering(self) -> Tuple[Tuple[str, Order], ...]:
if not self._ordering_validated:
unknown_fields = {f for f, _ in self._default_ordering} - self.fields
raise ConfigurationError(
f"Unknown fields {','.join(unknown_fields)} in "
f"default ordering for model {self._model.__name__}"
)
return self._default_ordering
def get_filter(self, key: str) -> dict:
return self.filters[key]
def finalise_model(self) -> None:
"""
Finalise the model after it had been fully loaded.
"""
self.finalise_fields()
self._generate_filters()
self._generate_lazy_fk_m2m_fields()
self._generate_db_fields()
def finalise_fields(self) -> None:
self.db_fields = set(self.fields_db_projection.values())
self.fields = set(self.fields_map.keys())
self.fields_db_projection_reverse = {
value: key for key, value in self.fields_db_projection.items()
}
self.fetch_fields = (
self.m2m_fields
| self.backward_fk_fields
| self.fk_fields
| self.backward_o2o_fields
| self.o2o_fields
)
generated_fields = []
for field in self.fields_map.values():
if not field.generated:
continue
generated_fields.append(field.source_field or field.model_field_name)
self.generated_db_fields = tuple(generated_fields) # type: ignore
self._ordering_validated = True
for field_name, _ in self._default_ordering:
if field_name.split("__")[0] not in self.fields:
self._ordering_validated = False
break
def _generate_lazy_fk_m2m_fields(self) -> None:
# Create lazy FK fields on model.
for key in self.fk_fields:
_key = f"_{key}"
fk_field_object: ForeignKeyFieldInstance = self.fields_map[key] # type: ignore
relation_field = fk_field_object.source_field
to_field = fk_field_object.to_field_instance.model_field_name
setattr(
self._model,
key,
property(
partial(
_fk_getter,
_key=_key,
ftype=fk_field_object.related_model,
relation_field=relation_field,
to_field=to_field,
),
partial(
_fk_setter,
_key=_key,
relation_field=relation_field,
to_field=to_field,
),
partial(
_fk_setter,
value=None,
_key=_key,
relation_field=relation_field,
to_field=to_field,
),
),
)
# Create lazy reverse FK fields on model.
for key in self.backward_fk_fields:
_key = f"_{key}"
backward_fk_field_object: BackwardFKRelation = self.fields_map[key] # type: ignore
setattr(
self._model,
key,
property(
partial(
_rfk_getter,
_key=_key,
ftype=backward_fk_field_object.related_model,
frelfield=backward_fk_field_object.relation_field,
from_field=backward_fk_field_object.to_field_instance.model_field_name,
)
),
)
# Create lazy one to one fields on model.
for key in self.o2o_fields:
_key = f"_{key}"
o2o_field_object: OneToOneFieldInstance = self.fields_map[key] # type: ignore
relation_field = o2o_field_object.source_field
to_field = o2o_field_object.to_field_instance.model_field_name
setattr(
self._model,
key,
property(
partial(
_fk_getter,
_key=_key,
ftype=o2o_field_object.related_model,
relation_field=relation_field,
to_field=to_field,
),
partial(
_fk_setter,
_key=_key,
relation_field=relation_field,
to_field=to_field,
),
partial(
_fk_setter,
value=None,
_key=_key,
relation_field=relation_field,
to_field=to_field,
),
),
)
# Create lazy reverse one to one fields on model.
for key in self.backward_o2o_fields:
_key = f"_{key}"
backward_o2o_field_object: BackwardOneToOneRelation = self.fields_map[ # type: ignore
key
]
setattr(
self._model,
key,
property(
partial(
_ro2o_getter,
_key=_key,
ftype=backward_o2o_field_object.related_model,
frelfield=backward_o2o_field_object.relation_field,
from_field=backward_o2o_field_object.to_field_instance.model_field_name,
),
),
)
# Create lazy M2M fields on model.
for key in self.m2m_fields:
_key = f"_{key}"
setattr(
self._model,
key,
property(partial(_m2m_getter, _key=_key, field_object=self.fields_map[key])),
)
def _generate_db_fields(self) -> None:
self.db_default_fields.clear()
self.db_complex_fields.clear()
self.db_native_fields.clear()
for key in self.db_fields:
model_field = self.fields_db_projection_reverse[key]
field = self.fields_map[model_field]
default_converter = field.__class__.to_python_value is Field.to_python_value
if (
field.skip_to_python_if_native
and field.field_type in self.db.executor_class.DB_NATIVE
):
self.db_native_fields.append((key, model_field, field))
elif not default_converter:
self.db_complex_fields.append((key, model_field, field))
elif field.field_type in self.db.executor_class.DB_NATIVE:
self.db_native_fields.append((key, model_field, field))
else:
self.db_default_fields.append((key, model_field, field))
def _generate_filters(self) -> None:
get_overridden_filter_func = self.db.executor_class.get_overridden_filter_func
for key, filter_info in self._filters.items():
overridden_operator = get_overridden_filter_func(
filter_func=filter_info["operator"] # type: ignore
)
if overridden_operator:
filter_info = copy(filter_info)
filter_info["operator"] = overridden_operator # type: ignore
self.filters[key] = filter_info
class ModelMeta(type):
__slots__ = ()
def __new__(mcs, name: str, bases: Tuple[Type, ...], attrs: dict):
fields_db_projection: Dict[str, str] = {}
fields_map: Dict[str, Field] = {}
filters: Dict[str, Dict[str, dict]] = {}
fk_fields: Set[str] = set()
m2m_fields: Set[str] = set()
o2o_fields: Set[str] = set()
meta_class: "Model.Meta" = attrs.get("Meta", type("Meta", (), {}))
pk_attr: str = "id"
# Searching for Field attributes in the class hierarchy
def __search_for_field_attributes(base: Type, attrs: dict) -> None:
"""
Searching for class attributes of type fields.Field
in the given class.
If an attribute of the class is an instance of fields.Field,
then it will be added to the fields dict. But only, if the
key is not already in the dict. So | |
<gh_stars>1-10
# Copyright 2010 Fluidinfo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
import time
import urllib
from twisted.internet import defer, task
from twisted.python import log, failure
from twisted.web import http, error
from txfluiddb.client import Object, Tag, Namespace
from tickery import twitter, query, error as terror
from tickery.www import defaults
from tickery.www.defaults import (TICKERY_URL, TWITTER_USERNAME,
TWITTER_USERS_NAMESPACE_NAME, TWITTER_ID_TAG_NAME,
TWITTER_SCREENNAME_TAG_NAME, TWITTER_UPDATED_AT_TAG_NAME,
TWITTER_FRIENDS_NAMESPACE_NAME, TWITTER_N_FRIENDS_TAG_NAME,
TWITTER_N_FOLLOWERS_TAG_NAME, TWITTER_N_STATUSES_TAG_NAME)
# This is the maximum requests that will be sent out at once by anything
# using a task.Cooperator. That's usually Fluidinfo, but it can also mean
# this many requests going at once to Twitter to pick up user details.
MAX_SIMULTANEOUS_REQUESTS = 5
BITLY_URL_LEN = 20
WORK_TO_CREATE_A_FRIEND = 3
WORK_TO_TAG_A_FRIEND = 1
# User addition priorities. Lower values = higher priority.
PRIORITY_ADMIN = 1
PRIORITY_NORMAL = 5
PRIORITY_BULK = 10
# Put this into www/defaults and re-org stuff in www/*.py in another branch.
tabName = {
'simple': 'simple',
'intermediate': 'intermediate',
'advanced': 'advanced',
}
_hashTag = '#tickery'
idTag = Tag(TWITTER_USERNAME,
TWITTER_USERS_NAMESPACE_NAME,
TWITTER_ID_TAG_NAME)
screennameTag = Tag(TWITTER_USERNAME,
TWITTER_USERS_NAMESPACE_NAME,
TWITTER_SCREENNAME_TAG_NAME)
updatedTag = Tag(TWITTER_USERNAME,
TWITTER_USERS_NAMESPACE_NAME,
TWITTER_UPDATED_AT_TAG_NAME)
nFriendsTag = Tag(TWITTER_USERNAME,
TWITTER_USERS_NAMESPACE_NAME,
TWITTER_N_FRIENDS_TAG_NAME)
nFollowersTag = Tag(TWITTER_USERNAME,
TWITTER_USERS_NAMESPACE_NAME,
TWITTER_N_FOLLOWERS_TAG_NAME)
nStatusesTag = Tag(TWITTER_USERNAME,
TWITTER_USERS_NAMESPACE_NAME,
TWITTER_N_STATUSES_TAG_NAME)
# The Twitter tags we additionally add to user objects. There are several
# others we could add to this.
extraTags = {
TWITTER_N_FRIENDS_TAG_NAME: nFriendsTag,
TWITTER_N_FOLLOWERS_TAG_NAME: nFollowersTag,
TWITTER_N_STATUSES_TAG_NAME: nStatusesTag,
}
class UnknownScreenname(Exception):
pass
class UnaddedScreennames(Exception):
pass
class ScreennameErrors(Exception):
pass
class NonExistentScreennames(Exception):
pass
class ProtectedScreenname(Exception):
pass
class ProtectedScreennames(Exception):
pass
class TooManyFriends(Exception):
pass
class FluidinfoParseError(Exception):
pass
class FluidinfoNonexistentAttribute(Exception):
pass
class FluidinfoPermissionDenied(Exception):
pass
class FluidinfoError(Exception):
pass
class Canceled(Exception):
pass
def _ignoreHTTPStatus(fail, status):
fail.trap(error.Error)
if int(fail.value.status) != status:
return fail
@defer.inlineCallbacks
def addUserByScreenname(cache, endpoint, userJob):
# We must at least create the user.
userJob.workToDo = WORK_TO_CREATE_A_FRIEND
userJob.workDone = 0
screenname = userJob.screenname
log.msg('Adding user %r' % screenname)
def catchUnknownScreenname(fail):
fail.trap(error.Error)
if int(fail.value.status) != http.NOT_FOUND:
return fail
return defer.fail(UnknownScreenname(screenname))
def catchProtectedScreenname(fail):
fail.trap(error.Error)
if int(fail.value.status) != http.UNAUTHORIZED:
return fail
return defer.fail(ProtectedScreenname(screenname))
d = cache.friendsIdCache[screenname]
d.addErrback(catchUnknownScreenname)
d.addErrback(catchProtectedScreenname)
friendUids = yield d
log.msg('Got %d friends for user %r' % (len(friendUids), screenname))
# Make a tag for this new user to mark their friends with.
ns = Namespace(TWITTER_USERNAME, TWITTER_FRIENDS_NAMESPACE_NAME)
d = ns.createTag(endpoint, screenname.lower(),
"A tag used to mark %s's Twitter friends." % screenname, False)
# TODO: check the X-FluidDB-Error-Class header in the errback to make
# sure it really got a namespace already exists error.
d.addErrback(_ignoreHTTPStatus, http.PRECONDITION_FAILED)
yield d
# Note: the call to createTag (above) will return a Tag instance when
# txFluidDB gets fixed.
friendTag = Tag(TWITTER_USERNAME, TWITTER_FRIENDS_NAMESPACE_NAME,
screenname.lower())
friendTagPath = friendTag.getPath()
log.msg('Created Twitter friends tag %s' % friendTagPath)
def _madeUserDone(userObject, user):
userJob.workDone += WORK_TO_CREATE_A_FRIEND
cache.extraTwitterTagsPool.add(
addExtraTwitterTags(endpoint, userObject, user))
return userObject
def _madeUserErr(failure):
userJob.workDone += WORK_TO_CREATE_A_FRIEND
return failure
def _tagFriendDone():
userJob.workDone += WORK_TO_TAG_A_FRIEND
def makeUser(user, thisIndex=None, totalToAdd=None):
newName = user['screen_name']
if thisIndex is not None:
log.msg('Making user %r, friend %d/%d of %r.' %
(newName, thisIndex, totalToAdd, screenname))
else:
log.msg('Making user %r.' % newName)
d = cache.oidUidScreennameCache.objectByUid(user['id'], newName)
d.addCallbacks(_madeUserDone, _madeUserErr, callbackArgs=(user,))
return d
def _ignore404uid(fail, uid):
fail.trap(error.Error)
if int(fail.value.status) == http.NOT_FOUND:
log.msg('Twitter uid %d is no longer found (404). Ignoring.' % uid)
cache.userCache.removeUid(uid)
cache.oidUidScreennameCache.removeUid(uid)
else:
log.msg('Failure fetching Twitter uid %d:' % uid)
log.err(fail)
def makeCreateUserJobs(friendsToAdd):
nToAdd = len(friendsToAdd)
for i, friendUid in enumerate(friendsToAdd):
if userJob.canceled():
log.msg('Detected cancelation of screenname %r.' % screenname)
raise StopIteration
d = cache.userCache.userByUid(friendUid)
d.addCallbacks(makeUser, _ignore404uid,
callbackArgs=(i + 1, nToAdd),
errbackArgs=(friendUid,))
yield d
@defer.inlineCallbacks
def addFriend(friendName, thisIndex, totalToAdd):
log.msg('About to mark user %r as a friend %d/%d of %r.' %
(friendName, thisIndex, totalToAdd, screenname))
d = cache.oidUidScreennameCache.objectIdByScreenname(friendName)
d.addErrback(log.err)
objectId = yield d
log.msg('Marking user %r as a friend %d/%d of %r' %
(friendName, thisIndex, totalToAdd, screenname))
if objectId is not None:
o = Object(objectId)
yield o.set(endpoint, friendTag, None)
log.msg('Marked user %r as a friend %d/%d of %r' %
(friendName, thisIndex, totalToAdd, screenname))
_tagFriendDone()
def makeTagFriendsJobs():
nFriendUids = len(friendUids)
for i, friendUid in enumerate(friendUids):
if userJob.canceled():
log.msg('Detected cancelation of screenname %r.' % screenname)
raise StopIteration
d = cache.userCache.screennameByUid(friendUid)
d.addCallbacks(addFriend, _ignore404uid,
callbackArgs=(i + 1, nFriendUids),
errbackArgs=(friendUid,))
yield d
# Get screename's id and add them as a Twitter user.
user = yield cache.userCache.userByScreenname(screenname)
userObject = yield makeUser(user)
log.msg('User object for %r is %r' % (screenname, userObject))
# Add the amount of work will it be to tag all friends.
userJob.workToDo += (len(friendUids) * WORK_TO_TAG_A_FRIEND)
# Figure out the work will it be to create whatever friends are needed.
friendsToAdd = [fid for fid in friendUids
if not cache.oidUidScreennameCache.knownUid(fid)]
nFriendsToAdd = len(friendsToAdd)
log.msg('Must create %d new user objects as friends of %r.' %
(nFriendsToAdd, screenname))
if nFriendsToAdd and not userJob.canceled():
userJob.workToDo += (nFriendsToAdd * WORK_TO_CREATE_A_FRIEND)
start = time.time()
# Create Fluidinfo objects for all the friends that we don't yet know
# about.
jobs = makeCreateUserJobs(friendsToAdd)
deferreds = []
coop = task.Cooperator()
for i in xrange(MAX_SIMULTANEOUS_REQUESTS):
d = coop.coiterate(jobs)
d.addErrback(log.err)
deferreds.append(d)
yield defer.DeferredList(deferreds)
if not userJob.canceled():
elapsed = time.time() - start
log.msg('Created %d new friend (of %r) objects in %.2f seconds. '
'Mean %.4f' % (nFriendsToAdd, screenname, elapsed,
float(elapsed / nFriendsToAdd)))
if friendUids and not userJob.canceled():
# Tag all friends.
start = time.time()
jobs = makeTagFriendsJobs()
deferreds = []
coop = task.Cooperator()
for i in xrange(MAX_SIMULTANEOUS_REQUESTS):
d = coop.coiterate(jobs)
d.addErrback(log.err)
deferreds.append(d)
log.msg('About to yield friend tagging DL for %r' % screenname)
yield defer.DeferredList(deferreds)
log.msg('Friend tagging DL finished for %r' % screenname)
if not userJob.canceled():
elapsed = time.time() - start
nFriendsUids = len(friendUids)
log.msg('Tagged %d objects as being a friend of %r in %.2f '
'seconds. Mean = %.4f' % (nFriendsUids, screenname,
elapsed, float(elapsed / nFriendsUids)))
if userJob.canceled():
log.msg('Canceled addUserByScreenname for %r.' % screenname)
raise Canceled(screenname)
else:
# Add the updated tag to the user's object.
log.msg('Adding updated tag to user object for %r' % screenname)
yield userObject.set(endpoint, updatedTag, int(time.time()))
log.msg('Successfully added screenname %r.' % (screenname,))
userJob.workDone = userJob.workToDo
def friendOf(cache, endpoint, screenname1, screenname2):
# Does the object for the user screenname2 have a screenname1
# follows tag on it?
def filter404(fail):
fail.trap(error.Error)
if int(fail.value.status) == http.NOT_FOUND:
return False
else:
return fail
def checkTagOnObject(objectId):
o = Object(objectId)
tag = Tag(TWITTER_USERNAME, TWITTER_FRIENDS_NAMESPACE_NAME,
screenname1.lower())
d = o.get(endpoint, tag)
d.addCallback(lambda _: True)
return d
d = cache.oidUidScreennameCache.objectIdByScreenname(screenname2)
d.addCallback(checkTagOnObject)
d.addErrback(filter404)
return d
def intermediateQuery(cache, endpoint, queryTree):
screennames = set()
query.queryTreeExtractScreennames(queryTree, screennames)
d = cache.userCache.usersByScreennames(screennames)
d.addCallback(cb_intermediateQuery, screennames, cache, endpoint,
queryTree)
return d
def cb_intermediateQuery(users, screennames, cache, endpoint, queryTree):
# Make sure all users exist, that there were no other errors in
# fetching them, and that none of them are protected.
notFound = []
otherError = []
for name, user in users.items():
if isinstance(user, failure.Failure):
if user.check(error.Error):
status = int(user.value.status)
if status == http.NOT_FOUND:
notFound.append(name)
else:
log.msg(user)
otherError.append(name)
else:
log.msg(user)
otherError.append(name)
if notFound:
raise NonExistentScreennames(notFound)
if otherError:
raise ScreennameErrors(otherError)
protected = [name for name in users if users[name]['protected']]
if protected:
raise ProtectedScreennames(protected)
# Make sure to use defaults.FRIENDS_LIMIT here, not to import
# that value. That's because we can change the value (in the
# defaults module) using the admin interface.
tooMany = [(name, users[name]['friends_count']) for name in users if
(users[name]['friends_count'] > defaults.FRIENDS_LIMIT and
not cache.adderCache.added(name))]
if tooMany:
raise TooManyFriends(tooMany)
# Enqueue screennames of users that are not yet known.
unknown = [name for name in users if not cache.adderCache.known(name)]
if unknown:
for name in unknown:
cache.adderCache.put(name, users[name]['friends_count'],
PRIORITY_NORMAL)
# Get the status of all the queried screennames.
statusSummary = cache.adderCache.statusSummary(screennames)
# Raise if not all queried screennames are added.
if len(statusSummary['added']) != len(screennames):
raise UnaddedScreennames(statusSummary)
# We're good to go.
queryStr = query.queryTreeToString(
queryTree, TWITTER_USERNAME, TWITTER_FRIENDS_NAMESPACE_NAME)
try:
result = cache.queryCache.lookupQueryResult(queryStr)
except KeyError:
def _cacheResult(result, queryStr, cache, screennames):
cache.queryCache.storeQueryResult(queryStr, result, screennames)
return result
log.msg('Cache miss on query %r.' % queryStr)
d = Object.query(endpoint, queryStr)
d.addCallback(lambda results: [r.uuid for r in results])
d.addCallback(_cacheResult, queryStr, cache, screennames)
return d
else:
log.msg('Query cache hit (size %d) for | |
<reponame>UTS-AnimalLogicAcademy/nuke-ML-server
# Copyright (c) 2020 Foundry.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from __future__ import division, print_function, absolute_import
from builtins import input # python 2/3 forward-compatible (raw_input)
import sys
import os
import time
import random
import argparse
from datetime import datetime
import numpy as np
import tensorflow as tf
print(tf.__version__)
tf.compat.v1.enable_eager_execution()
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from common.model_builder import baseline_model
from common.util import get_filepaths_from_dir, get_ckpt_list, print_
from common.util import is_exr, read_resize_exr, linear_to_srgb
def enable_deterministic_training(seed, no_gpu_patch=False):
"""Set all seeds for deterministic training
Args:
no_gpu_patch (bool): if False, apply a patch to TensorFlow to have
deterministic GPU operations, if True the training is much faster
but slightly less deterministic.
This function needs to be called before any TensorFlow code.
"""
import numpy as np
import os
import random
import tfdeterminism
if not no_gpu_patch:
# Patch stock TensorFlow to have deterministic GPU operation
tfdeterminism.patch() # then use tf as normal
# If PYTHONHASHSEED environment variable is not set or set to random,
# a random value is used to seed the hashes of str, bytes and datetime
# objects. (Necessary for Python >= 3.2.3)
os.environ['PYTHONHASHSEED']=str(seed)
# Set python built-in pseudo-random generator at a fixed value
random.seed(seed)
# Set seed for random Numpy operation (e.g. np.random.randint)
np.random.seed(seed)
# Set seed for random TensorFlow operation (e.g. tf.image.random_crop)
tf.compat.v1.random.set_random_seed(seed)
## DATA PROCESSING
def histogram(tensor, value_range=[0.0, 1.0], nbins=100):
"""Return histogram of tensor"""
h, w, c = tensor.shape
hist = tf.histogram_fixed_width(tensor, value_range, nbins=nbins)
hist = tf.divide(hist, h * w * c)
return hist
def gamma_correction(img, gamma):
"""Apply gamma correction to image img
Returns:
hists: stack of both original and graded image histograms
"""
# Check number of parameter is one
if gamma.shape[0] != 1:
raise ValueError("Parameter for gamma correction must be of "
"size (1,), not {}.\n\tCheck your self.output_param_number, ".format(gamma.shape)
+ "you may need to implement your own input_data preprocessing.")
# Create groundtruth graded image
img_grade = tf.math.pow(img, gamma)
# Compute histograms
img_hist = histogram(img)
img_grade_hist = histogram(img_grade)
hists = tf.stack([img_hist, img_grade_hist], axis=0)
return hists
## CUSTOM TRAINING METRICS
def bin_acc(y_true, y_pred, delta=0.02):
"""Bin accuracy metric equals 1.0 if diff between true
and predicted value is inferior to delta.
"""
diff = tf.keras.backend.abs(y_true - y_pred)
# If diff is less that delta --> true (1.0), otherwise false (0.0)
correct = tf.keras.backend.less(diff, delta)
# Return percentage accuracy
return tf.keras.backend.mean(correct)
class TrainModel(object):
"""Train Regression model from the given data"""
def __init__(self, args):
# Training hyperparameters
self.learning_rate = args.learning_rate
self.batch_size = args.batch_size
self.epoch = args.epoch
self.patch_size = 50
self.channels = 3 # input / output channels
self.output_param_number = 1
self.no_resume = args.no_resume
# A random seed (!=None) allows you to reproduce your training results
self.seed = args.seed
if self.seed is not None:
# Set all seeds necessary for deterministic training
enable_deterministic_training(self.seed, args.no_gpu_patch)
# Training and validation dataset paths
train_data_path = './data/train/'
val_data_path = './data/validation/'
# Where to save and load model weights (=checkpoints)
self.ckpt_dir = './checkpoints'
if not os.path.exists(self.ckpt_dir):
os.makedirs(self.ckpt_dir)
self.ckpt_save_name = args.ckpt_save_name
# Where to save tensorboard summaries
self.summaries_dir = './summaries/'
if not os.path.exists(self.summaries_dir):
os.makedirs(self.summaries_dir)
# Get training dataset as list of image paths
self.train_data_list = get_filepaths_from_dir(train_data_path)
if not self.train_data_list:
raise ValueError("No training data found in folder {}".format(train_data_path))
elif (len(self.train_data_list) < self.batch_size):
raise ValueError("Batch size must be smaller than the dataset (batch size = {}, number of training data = {})"
.format(self.batch_size, len(self.train_data_list)))
self.is_exr = is_exr(self.train_data_list[0])
# Compute and print training hyperparameters
self.batch_per_epoch = (len(self.train_data_list)) // self.batch_size
max_steps = int(self.epoch * (self.batch_per_epoch))
print_("Number of training data: {}\nNumber of batches per epoch: {} (batch size = {})\nNumber of training steps for {} epochs: {}\n"
.format(len(self.train_data_list), self.batch_per_epoch, self.batch_size, self.epoch, max_steps), 'm')
# Get validation dataset if provided
self.has_val_data = True
self.val_data_list = get_filepaths_from_dir(val_data_path)
if not self.val_data_list:
print("No validation data found in {}".format(val_data_path))
self.has_val_data = False
elif (len(self.val_data_list) < self.batch_size):
raise ValueError("Batch size must be smaller than the dataset (batch size = {}, number of validation data = {})"
.format(self.batch_size, len(self.val_data_list)))
else:
val_is_exr = is_exr(self.val_data_list[0])
if (val_is_exr and not self.is_exr) or (not val_is_exr and self.is_exr):
raise TypeError("Train and validation data should have the same file format")
self.val_batch_per_epoch = (len(self.val_data_list)) // self.batch_size
print("Number of validation data: {}\nNumber of validation batches per epoch: {} (batch size = {})"
.format(len(self.val_data_list), self.val_batch_per_epoch, self.batch_size))
def get_data(self, data_list, batch_size=16, epoch=100, shuffle_buffer_size=1000):
def read_and_preprocess_data(path_img, param):
"""Read image in path_img, resize it to patch_size,
convert to grayscale and apply a random gamma grade to it
Returns:
input_data: stack of both original and graded image histograms
param: groundtruth gamma value
"""
if self.is_exr: # ['exr', 'EXR']
img = tf.numpy_function(read_resize_exr,
[path_img, self.patch_size], [tf.float32])
img = tf.numpy_function(linear_to_srgb, [img], [tf.float32])
img = tf.reshape(img, [self.patch_size, self.patch_size, self.channels])
img = tf.image.rgb_to_grayscale(img)
else: # ['jpg', 'jpeg', 'png', 'bmp', 'JPG', 'JPEG', 'PNG', 'BMP']
img_raw = tf.io.read_file(path_img)
img_tensor = tf.image.decode_png(img_raw, channels=3)
img = tf.cast(img_tensor, tf.float32) / 255.0
img = tf.image.rgb_to_grayscale(img)
img = tf.image.resize(img, [self.patch_size, self.patch_size])
# Depending on what parameter(s) you want to learn, modify the training
# input data. Here to learn gamma correction, our input data trainX is
# a stack of both original and gamma-graded histograms.
input_data = gamma_correction(img, param)
return input_data, param
with tf.compat.v1.variable_scope('input'):
# Ensure preprocessing is done on the CPU (to let the GPU focus on training)
with tf.device('/cpu:0'):
data_tensor = tf.convert_to_tensor(data_list, dtype=tf.string)
path_dataset = tf.data.Dataset.from_tensor_slices((data_tensor))
path_dataset = path_dataset.shuffle(shuffle_buffer_size).repeat(epoch)
# Depending on what parameter(s) you want to learn, modify the random
# uniform range. Here create random gamma values between 0.2 and 5
param_tensor = tf.random.uniform(
[len(data_list)*epoch, self.output_param_number], 0.2, 5.0)
param_dataset = tf.data.Dataset.from_tensor_slices((param_tensor))
dataset = tf.data.Dataset.zip((path_dataset, param_dataset))
# Apply read_and_preprocess_data function to all input in the path_dataset
dataset = dataset.map(read_and_preprocess_data, num_parallel_calls=4)
dataset = dataset.batch(batch_size)
# Always prefetch one batch and make sure there is always one ready
dataset = dataset.prefetch(buffer_size=1)
return dataset
def tensorboard_callback(self, writer):
"""Return custom Tensorboard callback for logging main metrics"""
def log_metrics(epoch, logs):
"""Log training/validation loss and accuracy to Tensorboard"""
with writer.as_default(), tf.contrib.summary.always_record_summaries():
tf.contrib.summary.scalar('train_loss', logs['loss'], step=epoch)
tf.contrib.summary.scalar('train_bin_acc', logs['bin_acc'], step=epoch)
if self.has_val_data:
tf.contrib.summary.scalar('val_loss', logs['val_loss'], step=epoch)
tf.contrib.summary.scalar('val_bin_acc', logs['val_bin_acc'], step=epoch)
tf.contrib.summary.flush()
return tf.keras.callbacks.LambdaCallback(on_epoch_end=log_metrics)
def get_compiled_model(self, input_shape):
model = baseline_model(
input_shape,
output_param_number=self.output_param_number)
adam = tf.keras.optimizers.Adam(lr=self.learning_rate)
model.compile(optimizer=adam,
loss='mean_squared_error',
metrics=[bin_acc])
return model
def train(self):
# Create a session so that tf.keras don't allocate all GPU memory at once
sess = tf.compat.v1.Session(
config=tf.compat.v1.ConfigProto(
gpu_options=tf.compat.v1.GPUOptions(allow_growth=True)))
tf.compat.v1.keras.backend.set_session(sess)
# Get training and validation dataset
ds_train = self.get_data(
self.train_data_list,
self.batch_size,
self.epoch)
for x, y in ds_train.take(1): # take one batch from ds_train
trainX, trainY = x, y
print("Input shape {}, target shape: {}".format(trainX.shape, trainY.shape))
if self.has_val_data:
ds_val = self.get_data(
self.val_data_list,
self.batch_size,
self.epoch)
print("********Data Created********")
# Build model
model = self.get_compiled_model(trainX.shape[1:])
# Check if there are intermediate trained model to load
if self.no_resume or not self.load(model):
print_("Starting training from scratch\n", 'm')
# Callback for creating Tensorboard summary
summary_name = ("data{}_bch{}_ep{}".format(
len(self.train_data_list), self.batch_size, self.epoch))
summary_name += ("_seed{}".format(self.seed) if self.seed is not None else "")
summary_writer = tf.contrib.summary.create_file_writer(
os.path.join(self.summaries_dir, summary_name))
tb_callback = self.tensorboard_callback(summary_writer)
# Callback for saving model's weights
ckpt_path = os.path.join(self.ckpt_dir, self.ckpt_save_name + "-ep{epoch:02d}")
ckpt_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=ckpt_path,
# save best model based on monitor value
monitor='val_loss' if self.has_val_data else 'loss',
verbose=1,
save_best_only=True,
save_weights_only=True)
# Evaluate the model before training
if self.has_val_data:
val_loss, val_bin_acc = model.evaluate(ds_val.take(20), verbose=1)
print("Initial Loss on validation dataset: {:.4f}".format(val_loss))
# TRAIN model
print_("--------Start of training--------\n", 'm')
print("NOTE:\tDuring training, the latest model is saved only if its\n"
"\t(validation) loss is better than the last best model.")
train_start = time.time()
model.fit(
ds_train,
validation_data=ds_val if self.has_val_data else None,
epochs=self.epoch,
steps_per_epoch=self.batch_per_epoch,
validation_steps=self.val_batch_per_epoch if self.has_val_data else None,
callbacks=[ckpt_callback, tb_callback],
verbose=1)
print_("Training duration: {:0.4f}s\n".format(time.time() - train_start), 'm')
print_("--------End of training--------\n", 'm')
# Show predictions on the first batch of training data
print("Parameter prediction (PR) compared to groundtruth | |
# Copyright (C) 2015-2018 Regents of the University of California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from future.utils import with_metaclass
from abc import ABCMeta, abstractmethod
from builtins import object
from functools import total_ordering
import logging
import os.path
from toil import subprocess
from toil import applianceSelf
from toil.lib.retry import never
a_short_time = 5
log = logging.getLogger(__name__)
@total_ordering
class Shape(object):
"""
Represents a job or a node's "shape", in terms of the dimensions of memory, cores, disk and
wall-time allocation.
The wallTime attribute stores the number of seconds of a node allocation, e.g. 3600 for AWS,
or 60 for Azure. FIXME: and for jobs?
The memory and disk attributes store the number of bytes required by a job (or provided by a
node) in RAM or on disk (SSD or HDD), respectively.
"""
def __init__(self, wallTime, memory, cores, disk, preemptable):
self.wallTime = wallTime
self.memory = memory
self.cores = cores
self.disk = disk
self.preemptable = preemptable
def __eq__(self, other):
return (self.wallTime == other.wallTime and
self.memory == other.memory and
self.cores == other.cores and
self.disk == other.disk and
self.preemptable == other.preemptable)
def greater_than(self, other):
if self.preemptable < other.preemptable:
return True
elif self.preemptable > other.preemptable:
return False
elif self.memory > other.memory:
return True
elif self.memory < other.memory:
return False
elif self.cores > other.cores:
return True
elif self.cores < other.cores:
return False
elif self.disk > other.disk:
return True
elif self.disk < other.disk:
return False
elif self.wallTime > other.wallTime:
return True
elif self.wallTime < other.wallTime:
return False
else:
return False
def __gt__(self, other):
return self.greater_than(other)
def __repr__(self):
return "Shape(wallTime=%s, memory=%s, cores=%s, disk=%s, preemptable=%s)" % \
(self.wallTime,
self.memory,
self.cores,
self.disk,
self.preemptable)
def __str__(self):
return self.__repr__()
def __hash__(self):
# Since we replaced __eq__ we need to replace __hash__ as well.
return hash(
(self.wallTime,
self.memory,
self.cores,
self.disk,
self.preemptable))
class AbstractProvisioner(with_metaclass(ABCMeta, object)):
"""
An abstract base class to represent the interface for provisioning worker nodes to use in a
Toil cluster.
"""
LEADER_HOME_DIR = '/root/' # home directory in the Toil appliance on an instance
def __init__(self, clusterName=None, zone=None, nodeStorage=50):
"""
Initialize provisioner.
:param clusterName: The cluster identifier.
:param zone: The zone the cluster runs in.
:param nodeStorage: The amount of storage on the worker instances, in gigabytes.
"""
self.clusterName = clusterName
self._zone = zone
self._nodeStorage = nodeStorage
self._leaderPrivateIP = None
def readClusterSettings(self):
"""
Initialize class from an existing cluster. This method assumes that
the instance we are running on is the leader.
"""
raise NotImplementedError
def setAutoscaledNodeTypes(self, nodeTypes):
"""
Set node types, shapes and spot bids. Preemptable nodes will have the form "type:spotBid".
:param nodeTypes: A list of node types
"""
self._spotBidsMap = {}
self.nodeShapes = []
self.nodeTypes = []
for nodeTypeStr in nodeTypes:
nodeBidTuple = nodeTypeStr.split(":")
if len(nodeBidTuple) == 2:
# This is a preemptable node type, with a spot bid
nodeType, bid = nodeBidTuple
self.nodeTypes.append(nodeType)
self.nodeShapes.append(self.getNodeShape(nodeType, preemptable=True))
self._spotBidsMap[nodeType] = bid
else:
self.nodeTypes.append(nodeTypeStr)
self.nodeShapes.append(self.getNodeShape(nodeTypeStr, preemptable=False))
@staticmethod
def retryPredicate(e):
"""
Return true if the exception e should be retried by the cluster scaler.
For example, should return true if the exception was due to exceeding an API rate limit.
The error will be retried with exponential backoff.
:param e: exception raised during execution of setNodeCount
:return: boolean indicating whether the exception e should be retried
"""
return never(e)
@abstractmethod
def launchCluster(self, leaderNodeType, leaderStorage, owner, **kwargs):
"""
Initialize a cluster and create a leader node.
:param leaderNodeType: The leader instance.
:param leaderStorage: The amount of disk to allocate to the leader in gigabytes.
:param owner: Tag identifying the owner of the instances.
"""
raise NotImplementedError
@abstractmethod
def addNodes(self, nodeType, numNodes, preemptable, spotBid=None):
"""
Used to add worker nodes to the cluster
:param numNodes: The number of nodes to add
:param preemptable: whether or not the nodes will be preemptable
:param spotBid: The bid for preemptable nodes if applicable (this can be set in config, also).
:return: number of nodes successfully added
"""
raise NotImplementedError
@abstractmethod
def terminateNodes(self, nodes):
"""
Terminate the nodes represented by given Node objects
:param nodes: list of Node objects
"""
raise NotImplementedError
@abstractmethod
def getLeader(self):
"""
:return: The leader node.
"""
raise NotImplementedError
@abstractmethod
def getProvisionedWorkers(self, nodeType, preemptable):
"""
Gets all nodes of the given preemptability from the provisioner.
Includes both static and autoscaled nodes.
:param preemptable: Boolean value indicating whether to return preemptable nodes or
non-preemptable nodes
:return: list of Node objects
"""
raise NotImplementedError
@abstractmethod
def getNodeShape(self, nodeType=None, preemptable=False):
"""
The shape of a preemptable or non-preemptable node managed by this provisioner. The node
shape defines key properties of a machine, such as its number of cores or the time
between billing intervals.
:param str nodeType: Node type name to return the shape of.
:rtype: Shape
"""
raise NotImplementedError
@abstractmethod
def destroyCluster(self):
"""
Terminates all nodes in the specified cluster and cleans up all resources associated with the
cluser.
:param clusterName: identifier of the cluster to terminate.
"""
raise NotImplementedError
def _setSSH(self):
"""
Generate a key pair, save it in /root/.ssh/id_rsa.pub, and return the public key.
The file /root/.sshSuccess is used to prevent this operation from running twice.
:return Public key.
"""
if not os.path.exists('/root/.sshSuccess'):
subprocess.check_call(['ssh-keygen', '-f', '/root/.ssh/id_rsa', '-t', 'rsa', '-N', ''])
with open('/root/.sshSuccess', 'w') as f:
f.write('written here because of restrictive permissions on .ssh dir')
os.chmod('/root/.ssh', 0o700)
subprocess.check_call(['bash', '-c', 'eval $(ssh-agent) && ssh-add -k'])
with open('/root/.ssh/id_rsa.pub') as f:
masterPublicKey = f.read()
masterPublicKey = masterPublicKey.split(' ')[1] # take 'body' of key
# confirm it really is an RSA public key
assert masterPublicKey.startswith('<KEY>'), masterPublicKey
return masterPublicKey
cloudConfigTemplate = """#cloud-config
write_files:
- path: "/home/core/volumes.sh"
permissions: "0777"
owner: "root"
content: |
#!/bin/bash
set -x
ephemeral_count=0
drives=""
directories="toil mesos docker cwl"
for drive in /dev/xvd{{a..z}} /dev/nvme{{0..26}}n1; do
echo checking for $drive
if [ -b $drive ]; then
echo found it
if mount | grep $drive; then
echo "already mounted, likely a root device"
else
ephemeral_count=$((ephemeral_count + 1 ))
drives="$drives $drive"
echo increased ephemeral count by one
fi
fi
done
if (("$ephemeral_count" == "0" )); then
echo no ephemeral drive
for directory in $directories; do
sudo mkdir -p /var/lib/$directory
done
exit 0
fi
sudo mkdir /mnt/ephemeral
if (("$ephemeral_count" == "1" )); then
echo one ephemeral drive to mount
sudo mkfs.ext4 -F $drives
sudo mount $drives /mnt/ephemeral
fi
if (("$ephemeral_count" > "1" )); then
echo multiple drives
for drive in $drives; do
dd if=/dev/zero of=$drive bs=4096 count=1024
done
sudo mdadm --create -f --verbose /dev/md0 --level=0 --raid-devices=$ephemeral_count $drives # determine force flag
sudo mkfs.ext4 -F /dev/md0
sudo mount /dev/md0 /mnt/ephemeral
fi
for directory in $directories; do
sudo mkdir -p /mnt/ephemeral/var/lib/$directory
sudo mkdir -p /var/lib/$directory
sudo mount --bind /mnt/ephemeral/var/lib/$directory /var/lib/$directory
done
coreos:
update:
reboot-strategy: off
units:
- name: "volume-mounting.service"
command: "start"
content: |
[Unit]
Description=mounts ephemeral volumes & bind mounts toil directories
Before=docker.service
[Service]
Type=oneshot
Restart=no
ExecStart=/usr/bin/bash /home/core/volumes.sh
- name: "toil-{role}.service"
command: "start"
content: |
[Unit]
Description=toil-{role} container
After=docker.service
[Service]
Restart=on-failure
RestartSec=2
ExecStartPre=-/usr/bin/docker rm toil_{role}
ExecStart=/usr/bin/docker run \
--entrypoint={entrypoint} \
--net=host \
-v /var/run/docker.sock:/var/run/docker.sock \
-v /var/lib/mesos:/var/lib/mesos \
-v /var/lib/docker:/var/lib/docker \
-v /var/lib/toil:/var/lib/toil \
-v /var/lib/cwl:/var/lib/cwl \
-v /tmp:/tmp \
--name=toil_{role} \
{dockerImage} \
{mesosArgs}
- name: "node-exporter.service"
command: "start"
content: |
[Unit]
Description=node-exporter container
After=docker.service
[Service]
Restart=on-failure
RestartSec=2
ExecStartPre=-/usr/bin/docker rm node_exporter
ExecStart=/usr/bin/docker run \
-p 9100:9100 \
-v /proc:/host/proc \
-v /sys:/host/sys \
-v /:/rootfs \
--name node-exporter \
--restart always \
prom/node-exporter:v0.15.2 \
--path.procfs /host/proc \
--path.sysfs /host/sys \
--collector.filesystem.ignored-mount-points ^/(sys|proc|dev|host|etc)($|/)
"""
sshTemplate = """ssh_authorized_keys:
- "ssh-rsa {sshKey}"
"""
# If keys are rsynced, then the mesos-slave needs to be started after the keys have been
# transferred. The waitForKey.sh script loops on the new VM until |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.