input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
import telegram
import sqlite3
import function
import cardfunction
import thread_lock
import ast
import air
import drawmap
import os
from telegram import InlineKeyboardButton, InlineKeyboardMarkup
from telegram.ext import Updater, CommandHandler, CallbackQueryHandler, MessageHandler, Filters
org_dir = os.getcwd()
class handler():
def __init__(self, type_, active_country_id, lock_id, passive_country_id = None, card_id = None, space_id = None, piece_id = None):
self.type_ = type_
self.active_country_id = active_country_id
self.passive_country_id = passive_country_id
self.card_id = card_id
self.space_id = space_id
self.piece_id = piece_id
self.lock_id = lock_id
self.message_id = {'ge':None, 'jp':None, 'it':None, 'uk':None, 'su':None, 'us':None, 'fr':None, 'ch':None}
self.no_respone = {'ge':True, 'jp':True, 'it':True, 'uk':True, 'su':True, 'us':True, 'fr':True, 'ch':True}
self.one_side_pass = False
self.air_defense = False
self.air_attack = False
self.first = True
text = "status_handler add: "
info_list = {"type_":type_, "active_country_id":active_country_id, "passive_country_id":passive_country_id, "card_id":card_id, "space_id":space_id, "piece_id":piece_id, "lock_id":lock_id}
for info in info_list:
if info_list[info] != None:
text += " [" + info + ": " + str(info_list[info]) + "]"
print(text)
enemy_country_list = {'ge':['uk', 'su', 'us', 'fr', 'ch'],
'jp':['uk', 'su', 'us', 'fr', 'ch'],
'it':['uk', 'su', 'us', 'fr', 'ch'],
'uk':['ge', 'jp', 'it'],
'su':['ge', 'jp', 'it'],
'us':['ge', 'jp', 'it'],
'fr':['ge', 'jp', 'it'],
'ch':['ge', 'jp', 'it'] }
friendly_country_list = {'ge':['ge', 'jp', 'it'],
'jp':['ge', 'jp', 'it'],
'it':['ge', 'jp', 'it'],
'uk':['uk', 'su', 'us', 'fr', 'ch'],
'su':['uk', 'su', 'us', 'fr', 'ch'],
'us':['uk', 'su', 'us', 'fr', 'ch'],
'fr':['uk', 'su', 'us', 'fr', 'ch'],
'ch':['uk', 'su', 'us', 'fr', 'ch']}
def send_status_card(bot, active_country_id, type_, lock_id, session, passive_country_id = None, card_id = None, space_id = None, piece_id = None):
db = sqlite3.connect(session.get_db_dir())
session.draw_map()
session.handler_list.append(handler(type_, active_country_id, lock_id, passive_country_id, card_id, space_id, piece_id))
print("status_handler_id: " + str(len(session.handler_list)-1))
handler_id = len(session.handler_list)-1
#enemy_country_list = db.execute("select id from country where side = (select enemy from country where id = :country);", {'country':active_country_id}).fetchall()
pass_ = True
for country in enemy_country_list[active_country_id]:
info = info_list[type_](country, handler_id, session)
if info[2] == None:
session.handler_list[handler_id].no_respone[country] = True
else:
print('have - response ' + country)
session.handler_list[handler_id].no_respone[country] = False
pass_ = False
status_message_id = bot.send_photo(chat_id = info[0], caption = info[1], reply_markup = info[2], parse_mode=telegram.ParseMode.HTML, photo=open(session.get_dir() + '/tmp.jpg', 'rb'))
session.handler_list[handler_id].message_id[country] = status_message_id.message_id
if pass_:
air.check_reposition(bot, session)
session.handler_list[handler_id].first = False
session.handler_list[handler_id].one_side_pass = True
#friendly_country_list = db.execute("select id from country where side = (select side from country where id = :country);", {'country':active_country_id}).fetchall()
pass_ = True
for country in friendly_country_list[active_country_id]:
info = info_list[type_](country, handler_id, session)
if info[2] == None:
session.handler_list[handler_id].no_respone[country] = True
else:
print('have - response ' + country)
session.handler_list[handler_id].no_respone[country] = False
pass_ = False
status_message_id = bot.send_photo(chat_id = info[0], caption = info[1], reply_markup = info[2], parse_mode=telegram.ParseMode.HTML, photo=open(session.get_dir() + '/tmp.jpg', 'rb'))
print(country + ' add status_message_id:' + str(status_message_id.message_id))
session.handler_list[handler_id].message_id[country] = status_message_id.message_id
if pass_:
air.check_reposition(bot, session)
session.handler_list.pop(handler_id)
session.release_lock(lock_id)
else:
session.thread_lock(lock_id)
else:
session.thread_lock(lock_id)
def send_status_card_cb(bot, query, query_list, session):
db = sqlite3.connect(session.get_db_dir())
handler_id = query_list[2]
info_type = session.handler_list[handler_id].type_
lock_id = session.handler_list[handler_id].lock_id
if session.handler_list[handler_id].card_id != None:
card_name = db.execute("select name from card where cardid = :cardid;",{'cardid':session.handler_list[handler_id].card_id}).fetchall()
if query_list[3] == 'pass':
session.handler_list[handler_id].first = False
session.handler_list[handler_id].no_respone[query_list[1]] = True
#friendly_country_list = db.execute("select id, playerid from country where side = (select side from country where id = :country);", {'country':query_list[1]}).fetchall()
if not all([session.handler_list[handler_id].no_respone[country] for country in friendly_country_list[query_list[1]]]):
if session.handler_list[handler_id].card_id != None:
text = "<b>[" + card_name[0][0] + " - " + info_type + "]</b> - You pass, waiting other players..."
else:
text = "<b>[" + info_type + "]</b> - You pass, waiting other players..."
bot.edit_message_caption(chat_id=query.message.chat_id, message_id=query.message.message_id, caption=text , parse_mode=telegram.ParseMode.HTML)
return
for country in friendly_country_list[query_list[1]]:
message_id = session.handler_list[handler_id].message_id[country]
print(country + ' status_message_id: ' + str(message_id))
if message_id != None:
chat_id = db.execute("select playerid from country where id =:country;", {'country':country}).fetchall()
bot.delete_message(chat_id=chat_id[0][0], message_id = message_id)
session.handler_list[handler_id].message_id[country] = None
if session.handler_list[handler_id].one_side_pass:
session.handler_list.pop(handler_id)
session.release_lock(lock_id)
return
session.handler_list[handler_id].one_side_pass = True
session.handler_list[handler_id].first = False
elif query_list[3] == 'confirm':
if query_list[-1] == 'air_a':
text = "<b>[" + info_type + "]</b> - You used Air Attack, processsing..."
elif query_list[-1] == 'air_d':
text = "<b>[" + info_type + "]</b> - You used Air Defense, processsing..."
elif session.handler_list[handler_id].card_id != None:
used_card_name = db.execute("select name from card where cardid = :card;",{'card':query_list[-1]}).fetchall()
text = "<b>[" + card_name[0][0] + " - " + info_type + "]</b> - You used " + used_card_name[0][0] + ", processsing..."
else:
used_card_name = db.execute("select name from card where cardid = :card;",{'card':query_list[-1]}).fetchall()
text = "<b>[" + info_type + "]</b> - You used " + used_card_name[0][0] + ", processsing..."
bot.edit_message_caption(chat_id=query.message.chat_id, message_id=query.message.message_id, caption=text, parse_mode=telegram.ParseMode.HTML)
for country in friendly_country_list[query_list[1]]:
if country != query_list[1]:
message_id = session.handler_list[handler_id].message_id[country]
if message_id != None:
chat_id = db.execute("select playerid from country where id =:country;", {'country':country}).fetchall()
bot.delete_message(chat_id=chat_id[0][0], message_id = message_id)
session.handler_list[handler_id].message_id[country] = None
session.handler_list[handler_id].one_side_pass = False
#card execute
if query_list[-1] == 'air_a':
air_a_lock_id = session.add_lock()
air.air_attack_list.append(air.air_attack(query_list[2], air_a_lock_id, session))
print("air_attack_id: " + str(len(air.air_attack_list)-1))
air_attack_id = len(air.air_attack_list)-1
info = air.air_attack_list[air_attack_id].air_attack_info(session)
bot.send_message(chat_id = info[0], text = info[1], reply_markup = info[2])
session.thread_lock(air_a_lock_id)
elif query_list[-1] == 'air_d':
air.air_defense(bot, query_list[2], session)
else:
cardfunction.play_status(bot, query_list[-1], query_list[1], query_list[2], session)
#card execute
bot.delete_message(chat_id=query.message.chat_id, message_id=query.message.message_id)
session.handler_list[handler_id].message_id[query_list[1]] = None
session.handler_list[handler_id].first = False
else:
if query_list[3] == 'back':
info = info_list[info_type](query_list[1], handler_id, session)
chat_id = info[0]
text = info[1]
reply_markup = info[2]
else:
selected = db.execute("select name, type, text from card where cardid = :cardid;", {'cardid':query_list[-1]}).fetchall()
text = "<b>" + selected[0][0] + "</b> - " + selected[0][1] + " - " + selected[0][2]
keyboard = []
if query_list[3] != 'no_play':
keyboard += [[InlineKeyboardButton('Confirm', callback_data="['{}', '{}', {}, 'confirm', {}]".format(query_list[0], query_list[1], query_list[2], query_list[-1]))]]
keyboard += [[InlineKeyboardButton('Back', callback_data="['{}', '{}', {}, 'back']".format(query_list[0], query_list[1], query_list[2]))]]
reply_markup = InlineKeyboardMarkup(keyboard)
bot.edit_message_caption(chat_id=query.message.chat_id, message_id=query.message.message_id, caption=text, reply_markup=reply_markup, parse_mode=telegram.ParseMode.HTML)
if query_list[3] in ['pass', 'confirm']:
#enemy_country_list = db.execute("select id, playerid from country where side = (select enemy from country where id = :country);", {'country':query_list[1]}).fetchall()
pass_ = True
for country in enemy_country_list[query_list[1]]:
info = info_list[info_type](country, handler_id, session)
message_id = session.handler_list[handler_id].message_id[country]
if info[2] == None: #No response
if message_id != None:
bot.delete_message(chat_id= info[0], message_id = message_id)
session.handler_list[handler_id].message_id[country] = None
session.handler_list[handler_id].no_respone[country] = True
else: #Have response
print('have - response ' + country)
session.handler_list[handler_id].no_respone[country] = False
pass_ = False
if message_id == None:
status_message_id = bot.send_photo(chat_id = info[0], caption = info[1], reply_markup = info[2], parse_mode=telegram.ParseMode.HTML, photo=open(session.get_dir() + '/tmp.jpg', 'rb'))
session.handler_list[handler_id].message_id[country] = status_message_id.message_id
else:
bot.edit_message_caption(chat_id = info[0], message_id = message_id, caption = info[1], reply_markup = info[2], parse_mode=telegram.ParseMode.HTML)
if pass_:
air.check_reposition(bot, session)
if session.handler_list[handler_id].one_side_pass:
session.handler_list.pop(handler_id)
session.release_lock(lock_id)
return
session.handler_list[handler_id].one_side_pass = True
pass_ = True
#friendly_country_list = db.execute("select id, playerid from country where side = (select side from country where id = :country);", {'country':query_list[1]}).fetchall()
for country in friendly_country_list[query_list[1]]:
info = info_list[info_type](country, handler_id, session)
message_id = session.handler_list[handler_id].message_id[country]
if info[2] == None: #No respone
if message_id != None:
bot.delete_message(chat_id= info[0], message_id = message_id)
session.handler_list[handler_id].message_id[country] = None
session.handler_list[handler_id].no_respone[country] = True
else: #Have respone
print('have - response ' + country)
session.handler_list[handler_id].no_respone[country] = False
pass_ = False
if message_id == None:
status_message_id = bot.send_photo(chat_id = info[0], caption = info[1], reply_markup = info[2], parse_mode=telegram.ParseMode.HTML, photo=open(session.get_dir() + '/tmp.jpg', 'rb'))
session.handler_list[handler_id].message_id[country] = status_message_id.message_id
else:
bot.edit_message_caption(chat_id = info[0], message_id = message_id, caption = info[1], reply_markup = info[2], parse_mode=telegram.ParseMode.HTML)
if pass_:
air.check_reposition(bot, session)
session.handler_list.pop(handler_id)
session.release_lock(lock_id)
#------------------------------------------Status Handler Info------------------------------------------
#--------------------------------------------Battle---------------------------------------------
def status_battle_handler(bot, active_country, passive_country, space, session):
print('in status_battle_handler - ' + active_country)
db = sqlite3.connect(session.get_db_dir())
s = [41, 47, 52, 347]
space_info = db.execute("select distinct spaceid, type, name from space where spaceid = :space", {'space':space}).fetchall()
questionmarks = '?' * len(s)
avaliable_card = db.execute("select cardid, name from card where location = 'played' and cardid in ({});".format(','.join(questionmarks)), (s)).fetchall()
if len(avaliable_card) > 0:
for card in avaliable_card:
if card[0] == 41 and passive_country in ('ge','jp','it') and space == 12:
cardfunction.c41(bot, active_country, session)
if card[0] == 47 and passive_country == 'ge' and space_info[0][1] == 'land':
cardfunction.c47(bot, active_country, session)
db.execute("update card set location = 'turn' where cardid = 47")
if card[0] == 52 and passive_country in ('ge','jp','it') and space == 16:
cardfunction.c52(bot, active_country, session)
db.execute("update card set location = 'turn' where cardid = 52")
if card[0] == 347 and passive_country =='ch':
cardfunction.c347(bot, session)
db.commit()
def status_battle_handler_info(country, handler_id, session):
print('in status_battle_handler_info - ' + country)
db = sqlite3.connect(session.get_db_dir())
s = {'ge':[43, 45], 'jp':[97, 98, 99, 101, 102, 104, 107, 109, 112, 119, 120], 'it':[167, 168, 170], 'uk':[229, 230, 231, 232, 234, 242], 'su':[276, 284, 286, 287, 288, 289, 291, 292, 296, 303], 'us':[344, 346, 350, 363], 'fr':[], 'ch':[]}
chat_id = db.execute("select playerid from country where id = :id;",{'id':country}).fetchall()
active_country = session.handler_list[handler_id].active_country_id
| |
from datetime import datetime, date
import numpy
import pandas
import copy
import uuid
from past.builtins import basestring # pip install future
from pandas.io.formats.style import Styler
from functools import partial, reduce
from .offline import iplot, plot
from IPython.core.display import HTML, display
import plotly.io as pio
pio.renderers.default = 'iframe_connected' #required to return a 'text/html' iframe bundle that can then be dropped as html
DEFAULT_COLORS = [
'#1f77b4', # muted blue
'#ff7f0e', # safety orange
'#2ca02c', # cooked asparagus green
'#d62728', # brick red
'#9467bd', # muted purple
'#8c564b', # chestnut brown
'#e377c2', # raspberry yogurt pink
'#7f7f7f', # middle gray
'#bcbd22', # curry yellow-green
'#17becf' # blue-teal
]
from itertools import zip_longest
def grouped(iterable, n):
"group a sequence of objects into a sequence of tuples each containing 'n' objects"
"s -> (s0,s1,s2,...sn-1), (sn,sn+1,sn+2,...s2n-1), (s2n,s2n+1,s2n+2,...s3n-1), ..."
#from http://stackoverflow.com/a/5389547/1280629
return zip_longest(*[iter(iterable)]*n)
def charts_table(charts, cols):
"draw a sequence of HTML charts (e.g. plotly interactive charts) as 'subplots' in a table with 'cols' columns"
table_content = """
<style>
/* for chart subplots tables produced by 'draw_charts_table' */
.table-no-border {
border: none !important;
}
</style>
<table class="table-no-border">
"""
div_ids = []
for row in grouped(charts, cols):
table_content += '<tr class="table-no-border">'
for chart in row:
if chart is not None:
if isinstance(chart, _PlotlyChartBundle):
#odd re-writing of width and height needed to ensure they are not
#overwritten by multiple charts plotted simultaneously
if 'layout' in chart.data_layout:
layout = chart.data_layout['layout']
layout['width'] = chart.width
layout['height'] = chart.height
bundle = chart._repr_mimebundle_()
bundle_content = None
for k in ['text/html', 'image/svg+xml']:
bundle_content = bundle.get(k, None)
if bundle_content is not None:
break
if bundle_content is None:
if 'image/png' in bundle:
base64_img = bundle['image/png']
bundle_content = f'<img src="data:image/png;base64,{base64_img}"</img>'
else:
raise ValueError('No html, svg or png bundle available (only %s available) - check value of plotly.pio.renderers.default'
% ', '.join(bundle.keys()))
elif isinstance(chart, Styler):
bundle_content = chart.render()
table_content += '<td class="table-no-border">%s</td>' % bundle_content
table_content += '</tr>'
table_content += '</table>'
display(HTML(table_content))
def percent_axis(axis_settings = {}, tick_precision = 0, hover_precision = 2):
return dict(axis_settings, **{
'tickformat': ',.%d%%' % tick_precision,
'hoverformat': ',.%d%%' % hover_precision,
})
default_layout = {
#'yaxis': {
# 'hoverformat': '.2f',
#},
#'xaxis': {
# 'hoverformat': '.2f',
#},
'template': 'plotly_white',
'margin': {
'l': 60,
'r': 50,
'b': 50,
't': 50,
'pad': 4
},
'autosize': True
}
default_config = {'showLink': False}
def dict_merge(a, b, path=None):
"merges b into a, recursively copying any subdicts"
if path is None: path = []
for key, bval in b.items():
if key in a and isinstance(a[key], dict) and isinstance(bval, dict):
dict_merge(a[key], bval, path + [str(key)])
elif isinstance(bval, dict):
a[key] = copy.deepcopy(bval)
else:
a[key] = bval
return a
class _PlotlyChartBundle(object):
"""Class for returning a displayable object wrapping a plotly chart.
This is used to wrap a plotted chart so we can then drop it into a table if required."""
def __init__(self, data_layout, width, height, config):
self.data_layout = data_layout
self.width = width
self.height = height
self.config = config
def _repr_mimebundle_(self, *args, **kwargs):
#use iplot to return a renderable bundle
bundle = iplot(self.data_layout,
image_width = self.width,
image_height = self.height,
config = self.config,
return_bundle = True)
return bundle
def set_title(self, title):
self.data_layout['layout']['title'] = title
return self
def _get_or_create_subdict(self, path):
d = self.data_layout
for k in path:
if k not in d:
d[k] = {}
d = d[k]
return d
def set_axislabel(self, axis, title, replace_existing = True):
axis_dict = self._get_or_create_subdict(('layout', '%saxis' % axis))
if replace_existing or 'title' not in axis_dict:
axis_dict['title'] = title
return self
def set_xlabel(self, title, replace_existing = True):
return self.set_axislabel('x', title, replace_existing)
def set_ylabel(self, title, replace_existing = True):
return self.set_axislabel('y', title, replace_existing)
def write_image(self, filename, scale, *args, **kwargs):
pio.write_image(self.data_layout, filename, scale=scale, *args, **kwargs)
def write_json(self, filename, *args, **kwargs):
pio.write_json(self.data_layout, filename, *args, **kwargs)
def to_html(self, width = None, height = None, **kwargs):
return plot(self.data_layout,
output_type="div",
image_width = width or self.width,
image_height = height or self.height,
config = self.config,
include_plotlyjs = False,
**kwargs,
)
def scatter(df, x_col, y_col,
groups_col = None, tooltip_cols = None, group_order = None,
layout = dict(), series_dict = dict(), x_order = [], group_colours = dict(),
color_col = None, size_col = None,
scatter_type = 'scatter', #could be changed to e.g. scattergl
auto_axis_title = True,
legend_or_color_title = True,
auto_break_legend_or_color_title = True,
width = 600, height = 400):
"""Scatter plot from data in a DataFrame.
The DataFrame to work on should be passed as `df`
`x_col` should contain the column name of column contain x-axis values
`y_col` should contain the column name of column contain y-axis values
Use `color_col` to make scatter points coloured according to values in the column with that name e.g. by Temperature
Use `groups_col` to make scatter points coloured by group according to values in that column e.g. by Country
`layout` can contain extra Plotly layout info like chart title, axes titles etc
e.g. {'title': 'chart title', 'xaxis': {'title': 'x title'}}
"""
pl_data = []
if groups_col is not None and color_col is not None:
raise RuntimeError('Only one of "groups_col" or "color_col" should be provided when calling this function')
if tooltip_cols is None:
tooltip_cols = []
breakdown_col = groups_col or color_col
if breakdown_col is not None:
if breakdown_col not in tooltip_cols:
tooltip_cols.insert(0, breakdown_col)
if auto_break_legend_or_color_title and len(breakdown_col) >= 10:
split_leg = breakdown_col.split()
if len(split_leg) > 1:
mid_point = len(split_leg) // 2
breakdown_col = ' '.join(split_leg[:mid_point]) + '<br>' + ' '.join(split_leg[mid_point:])
if groups_col is not None:
groups_available = set(df[groups_col])
sorted_groups = group_order if group_order is not None else sorted(groups_available)
layout = reduce(dict_merge, [{}, default_layout, layout]) #overwrite default_layout with given layout
if isinstance(x_col, basestring):
xvals = df[x_col]
else:
xvals = x_col
if isinstance(y_col, basestring):
yvals = df[y_col]
else:
yvals = y_col
layout['width'] = width
layout['height'] = height
def _process_group(grp, grp_vals):
line_dict = {
'x': xvals[grp_vals].values,
'y': yvals[grp_vals].values,
'mode': 'markers',
'type': scatter_type,
'name': grp,
'marker': { 'size': 7 }
}
if tooltip_cols:
group_tooltips_df = df[tooltip_cols][grp_vals]
line_dict['text'] = ['<br>'.join(['%s: %s' % (ttc, val) for ttc, val in row.to_dict().items()])
for i, row in group_tooltips_df.iterrows()]
line_dict = dict_merge(line_dict, series_dict)
marker_dict = line_dict['marker']
if grp in group_colours:
marker_dict['color'] = group_colours[grp]
if color_col is not None:
marker_dict['color'] = df[color_col].values
if legend_or_color_title:
marker_dict['colorbar'] = {'title': '<b>%s</b>' % breakdown_col} #' '.join([color_col, field_caption]), 'ticksuffix': ticksuffix}
if df[color_col].max() > 0 and df[color_col].min() < 0:
marker_dict['cmid'] = 0
if size_col is not None:
marker_dict['size'] = df[size_col].to_list()
if x_order:
indexes = [x_order.index(x) for x in line_dict['x']]
line_dict['x'] = [v for (i,v) in sorted(zip(indexes, line_dict['x']))]
line_dict['y'] = [v for (i,v) in sorted(zip(indexes, line_dict['y']))]
if 'text' in line_dict:
line_dict['text'] = [v for (i,v) in sorted(zip(indexes, line_dict['text']))]
pl_data.append(line_dict)
if groups_col is not None:
for grp in sorted_groups:
if grp in groups_available:
grp_vals = df[groups_col] == grp
_process_group(grp, grp_vals)
else:
_process_group(grp = 'Values',
grp_vals = numpy.repeat(True, len(df)))
data_layout = {'data': pl_data, 'layout': layout}
bundle = _PlotlyChartBundle(data_layout,
width = width,
height = height,
config = default_config)
if auto_axis_title:
bundle.set_xlabel(x_col, replace_existing = False)
bundle.set_ylabel(y_col, replace_existing = False)
layout = bundle.data_layout['layout']
if legend_or_color_title and 'legent_title' not in layout and breakdown_col is not None:
layout['legend_title'] = '<b>%s</b>' % breakdown_col
return bundle
def chart(dataframe, layout = dict(), column_settings = dict(), all_columns_settings = dict(), x_and_y = True,
dropna = True, width = 800, height = 500, text_dataframe = dict(), custom_chart_data = [], col_level_separator = ': '):
"""Generic plot from data in a DataFrame. Can be used for e.g. lines, bars and histograms.
The DataFrame to work on should be passed as `dataframe`
Every column in `dataframe` is plotted as a separate series (e.g. a separate line or bar group)
The index of dataframe is used for x-axis values
`layout` can contain extra Plotly layout info like chart title, axes titles etc
e.g. {'title': 'chart title', 'xaxis': {'title': 'x title'}}
`all_columns_settings` can be used to specify an appearance setting for all columns,
e.g. {'type': 'bar'} makes all of the columns display | |
complex numbers::
>>> mp.dps = 50
>>> erf(0.5)
0.52049987781304653768274665389196452873645157575796
>>> mp.dps = 25
>>> erf(1+j)
(1.316151281697947644880271 + 0.1904534692378346862841089j)
Evaluation is supported for large arguments::
>>> mp.dps = 25
>>> erf('1e1000')
1.0
>>> erf('-1e1000')
-1.0
>>> erf('1e-1000')
1.128379167095512573896159e-1000
>>> erf('1e7j')
(0.0 + 8.593897639029319267398803e+43429448190317j)
>>> erf('1e7+1e7j')
(0.9999999858172446172631323 + 3.728805278735270407053139e-8j)
**Related functions**
See also :func:`~mpmath.erfc`, which is more accurate for large `x`,
and :func:`~mpmath.erfi` which gives the antiderivative of
`\exp(t^2)`.
The Fresnel integrals :func:`~mpmath.fresnels` and :func:`~mpmath.fresnelc`
are also related to the error function.
"""
erfc = r"""
Computes the complementary error function,
`\mathrm{erfc}(x) = 1-\mathrm{erf}(x)`.
This function avoids cancellation that occurs when naively
computing the complementary error function as ``1-erf(x)``::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> 1 - erf(10)
0.0
>>> erfc(10)
2.08848758376254e-45
:func:`~mpmath.erfc` works accurately even for ludicrously large
arguments::
>>> erfc(10**10)
4.3504398860243e-43429448190325182776
Complex arguments are supported::
>>> erfc(500+50j)
(1.19739830969552e-107492 + 1.46072418957528e-107491j)
"""
erfi = r"""
Computes the imaginary error function, `\mathrm{erfi}(x)`.
The imaginary error function is defined in analogy with the
error function, but with a positive sign in the integrand:
.. math ::
\mathrm{erfi}(x) = \frac{2}{\sqrt \pi} \int_0^x \exp(t^2) \,dt
Whereas the error function rapidly converges to 1 as `x` grows,
the imaginary error function rapidly diverges to infinity.
The functions are related as
`\mathrm{erfi}(x) = -i\,\mathrm{erf}(ix)` for all complex
numbers `x`.
**Examples**
Basic values and limits::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> erfi(0)
0.0
>>> erfi(1)
1.65042575879754
>>> erfi(-1)
-1.65042575879754
>>> erfi(inf)
+inf
>>> erfi(-inf)
-inf
Note the symmetry between erf and erfi::
>>> erfi(3j)
(0.0 + 0.999977909503001j)
>>> erf(3)
0.999977909503001
>>> erf(1+2j)
(-0.536643565778565 - 5.04914370344703j)
>>> erfi(2+1j)
(-5.04914370344703 - 0.536643565778565j)
Large arguments are supported::
>>> erfi(1000)
1.71130938718796e+434291
>>> erfi(10**10)
7.3167287567024e+43429448190325182754
>>> erfi(-10**10)
-7.3167287567024e+43429448190325182754
>>> erfi(1000-500j)
(2.49895233563961e+325717 + 2.6846779342253e+325717j)
>>> erfi(100000j)
(0.0 + 1.0j)
>>> erfi(-100000j)
(0.0 - 1.0j)
"""
erfinv = r"""
Computes the inverse error function, satisfying
.. math ::
\mathrm{erf}(\mathrm{erfinv}(x)) =
\mathrm{erfinv}(\mathrm{erf}(x)) = x.
This function is defined only for `-1 \le x \le 1`.
**Examples**
Special values include::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> erfinv(0)
0.0
>>> erfinv(1)
+inf
>>> erfinv(-1)
-inf
The domain is limited to the standard interval::
>>> erfinv(2)
Traceback (most recent call last):
...
ValueError: erfinv(x) is defined only for -1 <= x <= 1
It is simple to check that :func:`~mpmath.erfinv` computes inverse values of
:func:`~mpmath.erf` as promised::
>>> erf(erfinv(0.75))
0.75
>>> erf(erfinv(-0.995))
-0.995
:func:`~mpmath.erfinv` supports arbitrary-precision evaluation::
>>> mp.dps = 50
>>> x = erf(2)
>>> x
0.99532226501895273416206925636725292861089179704006
>>> erfinv(x)
2.0
A definite integral involving the inverse error function::
>>> mp.dps = 15
>>> quad(erfinv, [0, 1])
0.564189583547756
>>> 1/sqrt(pi)
0.564189583547756
The inverse error function can be used to generate random numbers
with a Gaussian distribution (although this is a relatively
inefficient algorithm)::
>>> nprint([erfinv(2*rand()-1) for n in range(6)]) # doctest: +SKIP
[-0.586747, 1.10233, -0.376796, 0.926037, -0.708142, -0.732012]
"""
npdf = r"""
``npdf(x, mu=0, sigma=1)`` evaluates the probability density
function of a normal distribution with mean value `\mu`
and variance `\sigma^2`.
Elementary properties of the probability distribution can
be verified using numerical integration::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> quad(npdf, [-inf, inf])
1.0
>>> quad(lambda x: npdf(x, 3), [3, inf])
0.5
>>> quad(lambda x: npdf(x, 3, 2), [3, inf])
0.5
See also :func:`~mpmath.ncdf`, which gives the cumulative
distribution.
"""
ncdf = r"""
``ncdf(x, mu=0, sigma=1)`` evaluates the cumulative distribution
function of a normal distribution with mean value `\mu`
and variance `\sigma^2`.
See also :func:`~mpmath.npdf`, which gives the probability density.
Elementary properties include::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> ncdf(pi, mu=pi)
0.5
>>> ncdf(-inf)
0.0
>>> ncdf(+inf)
1.0
The cumulative distribution is the integral of the density
function having identical mu and sigma::
>>> mp.dps = 15
>>> diff(ncdf, 2)
0.053990966513188
>>> npdf(2)
0.053990966513188
>>> diff(lambda x: ncdf(x, 1, 0.5), 0)
0.107981933026376
>>> npdf(0, 1, 0.5)
0.107981933026376
"""
expint = r"""
:func:`~mpmath.expint(n,z)` gives the generalized exponential integral
or En-function,
.. math ::
\mathrm{E}_n(z) = \int_1^{\infty} \frac{e^{-zt}}{t^n} dt,
where `n` and `z` may both be complex numbers. The case with `n = 1` is
also given by :func:`~mpmath.e1`.
**Examples**
Evaluation at real and complex arguments::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> expint(1, 6.25)
0.0002704758872637179088496194
>>> expint(-3, 2+3j)
(0.00299658467335472929656159 + 0.06100816202125885450319632j)
>>> expint(2+3j, 4-5j)
(0.001803529474663565056945248 - 0.002235061547756185403349091j)
At negative integer values of `n`, `E_n(z)` reduces to a
rational-exponential function::
>>> f = lambda n, z: fac(n)*sum(z**k/fac(k-1) for k in range(1,n+2))/\
... exp(z)/z**(n+2)
>>> n = 3
>>> z = 1/pi
>>> expint(-n,z)
584.2604820613019908668219
>>> f(n,z)
584.2604820613019908668219
>>> n = 5
>>> expint(-n,z)
115366.5762594725451811138
>>> f(n,z)
115366.5762594725451811138
"""
e1 = r"""
Computes the exponential integral `\mathrm{E}_1(z)`, given by
.. math ::
\mathrm{E}_1(z) = \int_z^{\infty} \frac{e^{-t}}{t} dt.
This is equivalent to :func:`~mpmath.expint` with `n = 1`.
**Examples**
Two ways to evaluate this function::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> e1(6.25)
0.0002704758872637179088496194
>>> expint(1,6.25)
0.0002704758872637179088496194
The E1-function is essentially the same as the Ei-function (:func:`~mpmath.ei`)
with negated argument, except for an imaginary branch cut term::
>>> e1(2.5)
0.02491491787026973549562801
>>> -ei(-2.5)
0.02491491787026973549562801
>>> e1(-2.5)
(-7.073765894578600711923552 - 3.141592653589793238462643j)
>>> -ei(2.5)
-7.073765894578600711923552
"""
ei = r"""
Computes the exponential integral or Ei-function, `\mathrm{Ei}(x)`.
The exponential integral is defined as
.. math ::
\mathrm{Ei}(x) = \int_{-\infty\,}^x \frac{e^t}{t} \, dt.
When the integration range includes `t = 0`, the exponential
integral is interpreted as providing the Cauchy principal value.
For real `x`, the Ei-function behaves roughly like
`\mathrm{Ei}(x) \approx \exp(x) + \log(|x|)`.
The Ei-function is related to the more general family of exponential
integral functions denoted by `E_n`, which are available as :func:`~mpmath.expint`.
**Basic examples**
Some basic values and limits are::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> ei(0)
-inf
>>> ei(1)
1.89511781635594
>>> ei(inf)
+inf
>>> ei(-inf)
0.0
For `x < 0`, the defining integral can be evaluated
numerically as a reference::
>>> ei(-4)
-0.00377935240984891
>>> quad(lambda t: exp(t)/t, [-inf, -4])
-0.00377935240984891
:func:`~mpmath.ei` supports complex arguments and arbitrary
precision evaluation::
>>> mp.dps = 50
>>> ei(pi)
10.928374389331410348638445906907535171566338835056
>>> mp.dps = 25
>>> ei(3+4j)
(-4.154091651642689822535359 + 4.294418620024357476985535j)
**Related functions**
The exponential integral is closely related to the logarithmic
integral. See :func:`~mpmath.li` for additional information.
The exponential integral is related to the hyperbolic
and trigonometric integrals (see :func:`~mpmath.chi`, :func:`~mpmath.shi`,
:func:`~mpmath.ci`, :func:`~mpmath.si`) similarly to how the ordinary
exponential function is related to the hyperbolic and
trigonometric functions::
>>> mp.dps = 15
>>> ei(3)
9.93383257062542
>>> chi(3) + shi(3)
9.93383257062542
>>> chop(ci(3j) - j*si(3j) - pi*j/2)
9.93383257062542
Beware that logarithmic corrections, as in the last example
above, are required to obtain the correct branch in general.
For details, see [1].
The exponential integral is also a special case of the
hypergeometric function `\,_2F_2`::
>>> z = 0.6
>>> z*hyper([1,1],[2,2],z) + (ln(z)-ln(1/z))/2 + euler
0.769881289937359
>>> ei(z)
0.769881289937359
**References**
1. Relations between Ei and other functions:
http://functions.wolfram.com/GammaBetaErf/ExpIntegralEi/27/01/
2. Abramowitz & Stegun, section 5:
http://people.math.sfu.ca/~cbm/aands/page_228.htm
3. Asymptotic expansion for Ei:
http://mathworld.wolfram.com/En-Function.html
"""
li = r"""
Computes the logarithmic integral or li-function
`\mathrm{li}(x)`, defined by
.. math ::
\mathrm{li}(x) = \int_0^x \frac{1}{\log t} \, dt
The logarithmic integral has a singularity at `x = 1`.
Alternatively, ``li(x, offset=True)`` computes the offset
logarithmic integral (used in number theory)
.. math ::
\mathrm{Li}(x) = \int_2^x \frac{1}{\log t} \, dt.
These two functions are related via the simple identity
`\mathrm{Li}(x) = \mathrm{li}(x) - \mathrm{li}(2)`.
The logarithmic integral should also not be confused with
the polylogarithm (also denoted by Li), which is implemented
as :func:`~mpmath.polylog`.
**Examples**
Some basic values and limits::
>>> from mpmath import *
>>> mp.dps = 30; mp.pretty = True
>>> li(0)
0.0
>>> li(1)
-inf
>>> li(1)
-inf
>>> li(2)
1.04516378011749278484458888919
>>> findroot(li, 2)
1.45136923488338105028396848589
>>> li(inf)
+inf
>>> li(2, offset=True)
0.0
>>> li(1, offset=True)
-inf
>>> li(0, offset=True)
-1.04516378011749278484458888919
>>> li(10, offset=True)
5.12043572466980515267839286347
The logarithmic integral can be evaluated for arbitrary
complex arguments::
>>> mp.dps = 20
>>> li(3+4j)
(3.1343755504645775265 + 2.6769247817778742392j)
The logarithmic integral is related to the exponential integral::
>>> ei(log(3))
2.1635885946671919729
>>> li(3)
2.1635885946671919729
The logarithmic integral grows like `O(x/\log(x))`::
>>> mp.dps = 15
>>> x = 10**100
>>> x/log(x)
4.34294481903252e+97
>>> li(x)
4.3619719871407e+97
The prime number theorem states that the number of primes less
than `x` is asymptotic to `\mathrm{Li}(x)` (equivalently
`\mathrm{li}(x)`). For example, it is known that there are
exactly 1,925,320,391,606,803,968,923 prime numbers less than
`10^{23}` [1]. The logarithmic integral provides a very
accurate estimate::
>>> li(10**23, offset=True)
1.92532039161405e+21
A definite integral is::
>>> quad(li, [0, 1])
-0.693147180559945
>>> -ln(2)
-0.693147180559945
**References**
1. http://mathworld.wolfram.com/PrimeCountingFunction.html
2. http://mathworld.wolfram.com/LogarithmicIntegral.html
"""
ci = r"""
Computes the cosine integral,
.. math ::
| |
will serve as the App Mesh proxy.
"""
return pulumi.get(self, "container_name")
@property
@pulumi.getter
def properties(self) -> Optional[Mapping[str, str]]:
"""
Set of network configuration parameters to provide the Container Network Interface (CNI) plugin, specified a key-value mapping.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def type(self) -> Optional[str]:
"""
Proxy type. The default value is `APPMESH`. The only supported value is `APPMESH`.
"""
return pulumi.get(self, "type")
@pulumi.output_type
class TaskDefinitionRuntimePlatform(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "cpuArchitecture":
suggest = "cpu_architecture"
elif key == "operatingSystemFamily":
suggest = "operating_system_family"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in TaskDefinitionRuntimePlatform. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
TaskDefinitionRuntimePlatform.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
TaskDefinitionRuntimePlatform.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
cpu_architecture: Optional[str] = None,
operating_system_family: Optional[str] = None):
"""
:param str cpu_architecture: Must be set to either `X86_64` or `ARM64`; see [cpu architecture](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definition_parameters.html#runtime-platform)
:param str operating_system_family: If the `requires_compatibilities` is `FARGATE` this field is required; must be set to a valid option from the [operating system family in the runtime platform](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definition_parameters.html#runtime-platform) setting
"""
if cpu_architecture is not None:
pulumi.set(__self__, "cpu_architecture", cpu_architecture)
if operating_system_family is not None:
pulumi.set(__self__, "operating_system_family", operating_system_family)
@property
@pulumi.getter(name="cpuArchitecture")
def cpu_architecture(self) -> Optional[str]:
"""
Must be set to either `X86_64` or `ARM64`; see [cpu architecture](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definition_parameters.html#runtime-platform)
"""
return pulumi.get(self, "cpu_architecture")
@property
@pulumi.getter(name="operatingSystemFamily")
def operating_system_family(self) -> Optional[str]:
"""
If the `requires_compatibilities` is `FARGATE` this field is required; must be set to a valid option from the [operating system family in the runtime platform](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definition_parameters.html#runtime-platform) setting
"""
return pulumi.get(self, "operating_system_family")
@pulumi.output_type
class TaskDefinitionVolume(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "dockerVolumeConfiguration":
suggest = "docker_volume_configuration"
elif key == "efsVolumeConfiguration":
suggest = "efs_volume_configuration"
elif key == "fsxWindowsFileServerVolumeConfiguration":
suggest = "fsx_windows_file_server_volume_configuration"
elif key == "hostPath":
suggest = "host_path"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in TaskDefinitionVolume. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
TaskDefinitionVolume.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
TaskDefinitionVolume.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
name: str,
docker_volume_configuration: Optional['outputs.TaskDefinitionVolumeDockerVolumeConfiguration'] = None,
efs_volume_configuration: Optional['outputs.TaskDefinitionVolumeEfsVolumeConfiguration'] = None,
fsx_windows_file_server_volume_configuration: Optional['outputs.TaskDefinitionVolumeFsxWindowsFileServerVolumeConfiguration'] = None,
host_path: Optional[str] = None):
"""
:param str name: Name of the volume. This name is referenced in the `sourceVolume`
parameter of container definition in the `mountPoints` section.
:param 'TaskDefinitionVolumeDockerVolumeConfigurationArgs' docker_volume_configuration: Configuration block to configure a docker volume. Detailed below.
:param 'TaskDefinitionVolumeEfsVolumeConfigurationArgs' efs_volume_configuration: Configuration block for an EFS volume. Detailed below.
:param 'TaskDefinitionVolumeFsxWindowsFileServerVolumeConfigurationArgs' fsx_windows_file_server_volume_configuration: Configuration block for an FSX Windows File Server volume. Detailed below.
:param str host_path: Path on the host container instance that is presented to the container. If not set, ECS will create a nonpersistent data volume that starts empty and is deleted after the task has finished.
"""
pulumi.set(__self__, "name", name)
if docker_volume_configuration is not None:
pulumi.set(__self__, "docker_volume_configuration", docker_volume_configuration)
if efs_volume_configuration is not None:
pulumi.set(__self__, "efs_volume_configuration", efs_volume_configuration)
if fsx_windows_file_server_volume_configuration is not None:
pulumi.set(__self__, "fsx_windows_file_server_volume_configuration", fsx_windows_file_server_volume_configuration)
if host_path is not None:
pulumi.set(__self__, "host_path", host_path)
@property
@pulumi.getter
def name(self) -> str:
"""
Name of the volume. This name is referenced in the `sourceVolume`
parameter of container definition in the `mountPoints` section.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="dockerVolumeConfiguration")
def docker_volume_configuration(self) -> Optional['outputs.TaskDefinitionVolumeDockerVolumeConfiguration']:
"""
Configuration block to configure a docker volume. Detailed below.
"""
return pulumi.get(self, "docker_volume_configuration")
@property
@pulumi.getter(name="efsVolumeConfiguration")
def efs_volume_configuration(self) -> Optional['outputs.TaskDefinitionVolumeEfsVolumeConfiguration']:
"""
Configuration block for an EFS volume. Detailed below.
"""
return pulumi.get(self, "efs_volume_configuration")
@property
@pulumi.getter(name="fsxWindowsFileServerVolumeConfiguration")
def fsx_windows_file_server_volume_configuration(self) -> Optional['outputs.TaskDefinitionVolumeFsxWindowsFileServerVolumeConfiguration']:
"""
Configuration block for an FSX Windows File Server volume. Detailed below.
"""
return pulumi.get(self, "fsx_windows_file_server_volume_configuration")
@property
@pulumi.getter(name="hostPath")
def host_path(self) -> Optional[str]:
"""
Path on the host container instance that is presented to the container. If not set, ECS will create a nonpersistent data volume that starts empty and is deleted after the task has finished.
"""
return pulumi.get(self, "host_path")
@pulumi.output_type
class TaskDefinitionVolumeDockerVolumeConfiguration(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "driverOpts":
suggest = "driver_opts"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in TaskDefinitionVolumeDockerVolumeConfiguration. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
TaskDefinitionVolumeDockerVolumeConfiguration.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
TaskDefinitionVolumeDockerVolumeConfiguration.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
autoprovision: Optional[bool] = None,
driver: Optional[str] = None,
driver_opts: Optional[Mapping[str, str]] = None,
labels: Optional[Mapping[str, str]] = None,
scope: Optional[str] = None):
"""
:param bool autoprovision: If this value is `true`, the Docker volume is created if it does not already exist. *Note*: This field is only used if the scope is `shared`.
:param str driver: Docker volume driver to use. The driver value must match the driver name provided by Docker because it is used for task placement.
:param Mapping[str, str] driver_opts: Map of Docker driver specific options.
:param Mapping[str, str] labels: Map of custom metadata to add to your Docker volume.
:param str scope: Scope for the Docker volume, which determines its lifecycle, either `task` or `shared`. Docker volumes that are scoped to a `task` are automatically provisioned when the task starts and destroyed when the task stops. Docker volumes that are scoped as `shared` persist after the task stops.
"""
if autoprovision is not None:
pulumi.set(__self__, "autoprovision", autoprovision)
if driver is not None:
pulumi.set(__self__, "driver", driver)
if driver_opts is not None:
pulumi.set(__self__, "driver_opts", driver_opts)
if labels is not None:
pulumi.set(__self__, "labels", labels)
if scope is not None:
pulumi.set(__self__, "scope", scope)
@property
@pulumi.getter
def autoprovision(self) -> Optional[bool]:
"""
If this value is `true`, the Docker volume is created if it does not already exist. *Note*: This field is only used if the scope is `shared`.
"""
return pulumi.get(self, "autoprovision")
@property
@pulumi.getter
def driver(self) -> Optional[str]:
"""
Docker volume driver to use. The driver value must match the driver name provided by Docker because it is used for task placement.
"""
return pulumi.get(self, "driver")
@property
@pulumi.getter(name="driverOpts")
def driver_opts(self) -> Optional[Mapping[str, str]]:
"""
Map of Docker driver specific options.
"""
return pulumi.get(self, "driver_opts")
@property
@pulumi.getter
def labels(self) -> Optional[Mapping[str, str]]:
"""
Map of custom metadata to add to your Docker volume.
"""
return pulumi.get(self, "labels")
@property
@pulumi.getter
def scope(self) -> Optional[str]:
"""
Scope for the Docker volume, which determines its lifecycle, either `task` or `shared`. Docker volumes that are scoped to a `task` are automatically provisioned when the task starts and destroyed when the task stops. Docker volumes that are scoped as `shared` persist after the task stops.
"""
return pulumi.get(self, "scope")
@pulumi.output_type
class TaskDefinitionVolumeEfsVolumeConfiguration(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "fileSystemId":
suggest = "file_system_id"
elif key == "authorizationConfig":
suggest = "authorization_config"
elif key == "rootDirectory":
suggest = "root_directory"
elif key == "transitEncryption":
suggest = "transit_encryption"
elif key == "transitEncryptionPort":
suggest = "transit_encryption_port"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in TaskDefinitionVolumeEfsVolumeConfiguration. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
TaskDefinitionVolumeEfsVolumeConfiguration.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
TaskDefinitionVolumeEfsVolumeConfiguration.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
file_system_id: str,
authorization_config: Optional['outputs.TaskDefinitionVolumeEfsVolumeConfigurationAuthorizationConfig'] = None,
root_directory: Optional[str] = None,
transit_encryption: Optional[str] = None,
transit_encryption_port: Optional[int] = None):
"""
:param str file_system_id: The Amazon FSx for Windows File Server file system ID to use.
:param 'TaskDefinitionVolumeEfsVolumeConfigurationAuthorizationConfigArgs' authorization_config: Configuration block for authorization for the Amazon FSx for Windows File Server file system detailed below.
:param str root_directory: The directory within the Amazon FSx for Windows File Server file system to mount as the root directory inside the host.
:param str transit_encryption: Whether or not to enable encryption for Amazon EFS data in transit between the Amazon ECS host and the Amazon EFS server. Transit encryption must be enabled if Amazon EFS IAM authorization is used. Valid values: `ENABLED`, `DISABLED`. If this parameter is omitted, the default value of `DISABLED` is used.
:param int transit_encryption_port: Port to use for transit encryption. If you do not specify a transit encryption port, it will use the port selection strategy that the | |
<filename>nipype/interfaces/base/traits_extension.py
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
Traits extension
................
This module contains Trait classes that we've pulled from the
traits source and fixed due to various bugs. File and Directory are
redefined as the release version had dependencies on TraitsUI, which
we do not want Nipype to depend on. At least not yet.
Undefined class was missing the __len__ operator, causing edit_traits
and configure_traits to fail on List objects. Even though we don't
require TraitsUI, this bug was the only thing preventing us from
popping up GUIs which users like.
These bugs have been in Traits v3.3.0 and v3.2.1. We have reported
all of these bugs and they've been fixed in enthought svn repository
(usually by <NAME>).
"""
from __future__ import print_function, division, unicode_literals, absolute_import
from builtins import str, bytes
import os
import collections
# perform all external trait imports here
from traits import __version__ as traits_version
import traits.api as traits
from traits.trait_handlers import TraitDictObject, TraitListObject
from traits.trait_errors import TraitError
from traits.trait_base import _Undefined, class_of
from traits.api import BaseUnicode
from traits.api import Unicode
from future import standard_library
if traits_version < '3.7.0':
raise ImportError('Traits version 3.7.0 or higher must be installed')
standard_library.install_aliases()
class Str(Unicode):
"""Replacement for the default traits.Str based in bytes"""
# Monkeypatch Str and DictStrStr for Python 2 compatibility
traits.Str = Str
DictStrStr = traits.Dict((bytes, str), (bytes, str))
traits.DictStrStr = DictStrStr
class BaseFile(BaseUnicode):
""" Defines a trait whose value must be the name of a file.
"""
# A description of the type of value this trait accepts:
info_text = 'a file name'
def __init__(self, value='', filter=None, auto_set=False,
entries=0, exists=False, **metadata):
""" Creates a File trait.
Parameters
----------
value : string
The default value for the trait
filter : string
A wildcard string to filter filenames in the file dialog box used by
the attribute trait editor.
auto_set : boolean
Indicates whether the file editor updates the trait value after
every key stroke.
exists : boolean
Indicates whether the trait value must be an existing file or
not.
Default Value
-------------
*value* or ''
"""
self.filter = filter
self.auto_set = auto_set
self.entries = entries
self.exists = exists
if exists:
self.info_text = 'an existing file name'
super(BaseFile, self).__init__(value, **metadata)
def validate(self, object, name, value):
""" Validates that a specified value is valid for this trait.
Note: The 'fast validator' version performs this check in C.
"""
validated_value = super(BaseFile, self).validate(object, name, value)
if not self.exists:
return validated_value
elif os.path.isfile(value):
return validated_value
else:
raise TraitError(
args='The trait \'{}\' of {} instance is {}, but the path '
' \'{}\' does not exist.'.format(name, class_of(object),
self.info_text, value))
self.error(object, name, value)
class File (BaseFile):
"""
Defines a trait whose value must be the name of a file.
Disables the default C-level fast validator.
"""
def __init__(self, value='', filter=None, auto_set=False,
entries=0, exists=False, **metadata):
""" Creates a File trait.
Parameters
----------
value : string
The default value for the trait
filter : string
A wildcard string to filter filenames in the file dialog box used by
the attribute trait editor.
auto_set : boolean
Indicates whether the file editor updates the trait value after
every key stroke.
exists : boolean
Indicates whether the trait value must be an existing file or
not.
Default Value
-------------
*value* or ''
"""
# if not exists:
# # Define the C-level fast validator to use:
# fast_validate = (11, str)
super(File, self).__init__(value, filter, auto_set, entries, exists,
**metadata)
# -------------------------------------------------------------------------------
# 'BaseDirectory' and 'Directory' traits:
# -------------------------------------------------------------------------------
class BaseDirectory (BaseUnicode):
"""
Defines a trait whose value must be the name of a directory.
"""
# A description of the type of value this trait accepts:
info_text = 'a directory name'
def __init__(self, value='', auto_set=False, entries=0,
exists=False, **metadata):
""" Creates a BaseDirectory trait.
Parameters
----------
value : string
The default value for the trait
auto_set : boolean
Indicates whether the directory editor updates the trait value
after every key stroke.
exists : boolean
Indicates whether the trait value must be an existing directory or
not.
Default Value
-------------
*value* or ''
"""
self.entries = entries
self.auto_set = auto_set
self.exists = exists
if exists:
self.info_text = 'an existing directory name'
super(BaseDirectory, self).__init__(value, **metadata)
def validate(self, object, name, value):
""" Validates that a specified value is valid for this trait.
Note: The 'fast validator' version performs this check in C.
"""
if isinstance(value, (str, bytes)):
if not self.exists:
return value
if os.path.isdir(value):
return value
else:
raise TraitError(
args='The trait \'{}\' of {} instance is {}, but the path '
' \'{}\' does not exist.'.format(name,
class_of(object), self.info_text, value))
self.error(object, name, value)
class Directory (BaseDirectory):
"""
Defines a trait whose value must be the name of a directory.
Disables the default C-level fast validator.
"""
def __init__(self, value='', auto_set=False, entries=0,
exists=False, **metadata):
""" Creates a Directory trait.
Parameters
----------
value : string
The default value for the trait
auto_set : boolean
Indicates whether the directory editor updates the trait value
after every key stroke.
exists : boolean
Indicates whether the trait value must be an existing directory or
not.
Default Value
-------------
*value* or ''
"""
# Define the C-level fast validator to use if the directory existence
# test is not required:
# if not exists:
# self.fast_validate = (11, str)
super(Directory, self).__init__(value, auto_set, entries, exists,
**metadata)
# lists of tuples
# each element consists of :
# - uncompressed (tuple[0]) extension
# - compressed (tuple[1]) extension
img_fmt_types = {
'nifti1': [('.nii', '.nii.gz'),
(('.hdr', '.img'), ('.hdr', '.img.gz'))],
'mgh': [('.mgh', '.mgz'), ('.mgh', '.mgh.gz')],
'nifti2': [('.nii', '.nii.gz')],
'cifti2': [('.nii', '.nii.gz')],
'gifti': [('.gii', '.gii.gz')],
'dicom': [('.dcm', '.dcm'), ('.IMA', '.IMA'), ('.tar', '.tar.gz')],
'nrrd': [('.nrrd', 'nrrd'), ('nhdr', 'nhdr')],
'afni': [('.HEAD', '.HEAD'), ('.BRIK', '.BRIK')]
}
class ImageFile(File):
""" Defines a trait of specific neuroimaging files """
def __init__(self, value='', filter=None, auto_set=False, entries=0,
exists=False, types=[], allow_compressed=True, **metadata):
""" Trait handles neuroimaging files.
Parameters
----------
types : list
Strings of file format types accepted
compressed : boolean
Indicates whether the file format can compressed
"""
self.types = types
self.allow_compressed = allow_compressed
super(ImageFile, self).__init__(value, filter, auto_set, entries,
exists, **metadata)
def grab_exts(self):
# TODO: file type validation
exts = []
for fmt in self.types:
if fmt in img_fmt_types:
exts.extend(sum([[u for u in y[0]] if isinstance(y[0], tuple)
else [y[0]] for y in img_fmt_types[fmt]], []))
if self.allow_compressed:
exts.extend(sum([[u for u in y[-1]] if isinstance(y[-1],
tuple) else [y[-1]] for y in img_fmt_types[fmt]], []))
else:
raise AttributeError('Information has not been added for format'
' type {} yet. Supported formats include: '
'{}'.format(fmt,
', '.join(img_fmt_types.keys())))
return list(set(exts))
def validate(self, object, name, value):
""" Validates that a specified value is valid for this trait.
"""
validated_value = super(ImageFile, self).validate(object, name, value)
if validated_value and self.types:
self._exts = self.grab_exts()
if not any(validated_value.endswith(x) for x in self._exts):
raise TraitError(
args="{} is not included in allowed types: {}".format(
validated_value, ', '.join(self._exts)))
return validated_value
"""
The functions that pop-up the Traits GUIs, edit_traits and
configure_traits, were failing because all of our inputs default to
Undefined deep and down in traits/ui/wx/list_editor.py it checks for
the len() of the elements of the list. The _Undefined class in traits
does not define the __len__ method and would error. I tried defining
our own Undefined and even sublassing Undefined, but both of those
failed with a TraitError in our initializer when we assign the
Undefined to the inputs because of an incompatible type:
TraitError: The 'vertical_gradient' trait of a BetInputSpec instance must be a float, but a value of <undefined> <class 'nipype.interfaces.traits._Undefined'> was specified.
So... in order to keep the same type but add the missing method, I
monkey patched.
"""
def length(self):
return 0
##########################################################################
# Apply monkeypatch here
_Undefined.__len__ = length
##########################################################################
Undefined = _Undefined()
def isdefined(object):
return not isinstance(object, _Undefined)
def has_metadata(trait, metadata, value=None, recursive=True):
'''
Checks if a given trait has a metadata (and optionally if it is set to particular value)
'''
count = 0
if hasattr(trait, "_metadata") and metadata in list(trait._metadata.keys()) and (trait._metadata[metadata] == value or value is None):
count += 1
if recursive:
if hasattr(trait, 'inner_traits'):
for inner_trait in trait.inner_traits():
count += has_metadata(inner_trait.trait_type, metadata, recursive)
if hasattr(trait, 'handlers') and trait.handlers is not None:
for handler in trait.handlers:
count += has_metadata(handler, metadata, recursive)
return count > 0
class | |
<reponame>bogdandm/attrs-api-client<gh_stars>100-1000
import argparse
import configparser
import importlib
import itertools
import json
import os.path
import re
import sys
from collections import defaultdict
from datetime import datetime
from pathlib import Path
from typing import Any, Callable, Dict, Generator, Iterable, List, Tuple, Type, Union
try:
import ruamel.yaml as yaml
except ImportError:
try:
import yaml
except ImportError:
yaml = None
from . import __version__ as VERSION
from .dynamic_typing import ModelMeta, register_datetime_classes
from .generator import MetadataGenerator
from .models import ModelsStructureType
from .models.attr import AttrsModelCodeGenerator
from .models.base import GenericModelCodeGenerator, generate_code
from .models.dataclasses import DataclassModelCodeGenerator
from .models.pydantic import PydanticModelCodeGenerator
from .models.structure import compose_models, compose_models_flat
from .registry import (
ModelCmp, ModelFieldsEquals, ModelFieldsNumberMatch, ModelFieldsPercentMatch, ModelRegistry
)
from .utils import convert_args
STRUCTURE_FN_TYPE = Callable[[Dict[str, ModelMeta]], ModelsStructureType]
bool_js_style = lambda s: {"true": True, "false": False}.get(s, None)
class Cli:
MODEL_CMP_MAPPING = {
"percent": convert_args(ModelFieldsPercentMatch, lambda s: float(s) / 100),
"number": convert_args(ModelFieldsNumberMatch, int),
"exact": ModelFieldsEquals
}
STRUCTURE_FN_MAPPING: Dict[str, STRUCTURE_FN_TYPE] = {
"nested": compose_models,
"flat": compose_models_flat
}
MODEL_GENERATOR_MAPPING: Dict[str, Type[GenericModelCodeGenerator]] = {
"base": convert_args(GenericModelCodeGenerator),
"attrs": convert_args(AttrsModelCodeGenerator, meta=bool_js_style),
"dataclasses": convert_args(DataclassModelCodeGenerator, meta=bool_js_style,
post_init_converters=bool_js_style),
"pydantic": convert_args(PydanticModelCodeGenerator),
}
def __init__(self):
self.initialized = False
self.models_data: Dict[str, Iterable[dict]] = {} # -m/-l
self.enable_datetime: bool = False # --datetime
self.strings_converters: bool = False # --strings-converters
self.max_literals: int = -1 # --max-strings-literals
self.merge_policy: List[ModelCmp] = [] # --merge
self.structure_fn: STRUCTURE_FN_TYPE = None # -s
self.model_generator: Type[GenericModelCodeGenerator] = None # -f & --code-generator
self.model_generator_kwargs: Dict[str, Any] = None
self.argparser = self._create_argparser()
def parse_args(self, args: List[str] = None):
"""
Parse list of command list arguments
:param args: (Optional) List of arguments
:return: None
"""
parser = self.argparser
namespace = parser.parse_args(args)
# Extract args
models: List[Tuple[str, Iterable[Path]]] = [
(model_name, itertools.chain(*map(_process_path, paths)))
for model_name, *paths in namespace.model or ()
]
models_lists: List[Tuple[str, Tuple[str, Path]]] = [
(model_name, (lookup, Path(path)))
for model_name, lookup, path in namespace.list or ()
]
parser = getattr(FileLoaders, namespace.input_format)
self.output_file = namespace.output
self.enable_datetime = namespace.datetime
disable_unicode_conversion = namespace.disable_unicode_conversion
self.strings_converters = namespace.strings_converters
self.max_literals = namespace.max_strings_literals
merge_policy = [m.split("_") if "_" in m else m for m in namespace.merge]
structure = namespace.structure
framework = namespace.framework
code_generator = namespace.code_generator
code_generator_kwargs_raw: List[str] = namespace.code_generator_kwargs
dict_keys_regex: List[str] = namespace.dict_keys_regex
dict_keys_fields: List[str] = namespace.dict_keys_fields
self.validate(models, models_lists, merge_policy, framework, code_generator)
self.setup_models_data(models, models_lists, parser)
self.set_args(merge_policy, structure, framework, code_generator, code_generator_kwargs_raw,
dict_keys_regex, dict_keys_fields, disable_unicode_conversion)
def run(self):
if self.enable_datetime:
register_datetime_classes()
generator = MetadataGenerator(
dict_keys_regex=self.dict_keys_regex,
dict_keys_fields=self.dict_keys_fields
)
registry = ModelRegistry(*self.merge_policy)
for name, data in self.models_data.items():
meta = generator.generate(*data)
registry.process_meta_data(meta, name)
registry.merge_models(generator)
registry.generate_names()
structure = self.structure_fn(registry.models_map)
output = self.version_string + \
generate_code(structure, self.model_generator, class_generator_kwargs=self.model_generator_kwargs)
if self.output_file:
with open(self.output_file, "w", encoding="utf-8") as f:
f.write(output)
return f"Output is written to {self.output_file}"
else:
return output
@property
def version_string(self):
return (
'r"""\n'
f'generated by json2python-models v{VERSION} at {datetime.now().ctime()}\n'
f'command: {" ".join(sys.argv)}\n'
'"""\n'
)
def validate(self, models, models_list, merge_policy, framework, code_generator):
"""
Validate parsed args
:param models: List of pairs (model name, list of filesystem path)
:param models_list: List of pairs (model name, list of lookup expr and filesystem path)
:param merge_policy: List of merge policies. Each merge policy is either string or string and policy arguments
:param framework: Framework name (predefined code generator)
:param code_generator: Code generator import string
:return:
"""
names = {name for name, _ in models_list}
if len(names) != len(models_list):
raise ValueError("Model names under -l flag should be unique")
for m in merge_policy:
if isinstance(m, list):
if m[0] not in self.MODEL_CMP_MAPPING:
raise ValueError(f"Invalid merge policy '{m[0]}', choices are {self.MODEL_CMP_MAPPING.keys()}")
elif m not in self.MODEL_CMP_MAPPING:
raise ValueError(f"Invalid merge policy '{m}', choices are {self.MODEL_CMP_MAPPING.keys()}")
if framework == 'custom' and code_generator is None:
raise ValueError("You should specify --code-generator to support custom generator")
elif framework != 'custom' and code_generator is not None:
raise ValueError("--code-generator argument has no effect without '--framework custom' argument")
def setup_models_data(
self,
models: Iterable[Tuple[str, Iterable[Path]]],
models_lists: Iterable[Tuple[str, Tuple[str, Path]]],
parser: 'FileLoaders.T'
):
"""
Initialize lazy loaders for models data
"""
models_dict: Dict[str, List[Iterable[dict]]] = defaultdict(list)
for model_name, paths in models:
models_dict[model_name].append(parser(path) for path in paths)
for model_name, (lookup, path) in models_lists:
models_dict[model_name].append(iter_json_file(parser(path), lookup))
self.models_data = {
model_name: itertools.chain(*list_of_gen)
for model_name, list_of_gen in models_dict.items()
}
def set_args(
self,
merge_policy: List[Union[List[str], str]],
structure: str,
framework: str,
code_generator: str,
code_generator_kwargs_raw: List[str],
dict_keys_regex: List[str],
dict_keys_fields: List[str],
disable_unicode_conversion: bool
):
"""
Convert CLI args to python representation and set them to appropriate object attributes
"""
self.merge_policy.clear()
for merge in merge_policy:
if isinstance(merge, str):
name = merge
args = ()
else:
name = merge[0]
args = merge[1:]
self.merge_policy.append(self.MODEL_CMP_MAPPING[name](*args))
self.structure_fn = self.STRUCTURE_FN_MAPPING[structure]
if framework != "custom":
self.model_generator = self.MODEL_GENERATOR_MAPPING[framework]
else:
module, cls = code_generator.rsplit('.', 1)
m = importlib.import_module(module)
self.model_generator = getattr(m, cls)
self.model_generator_kwargs = dict(
post_init_converters=self.strings_converters,
convert_unicode=not disable_unicode_conversion,
max_literals=self.max_literals
)
if code_generator_kwargs_raw:
for item in code_generator_kwargs_raw:
if item[0] == '"':
item = item[1:]
if item[-1] == '"':
item = item[:-1]
name, value = item.split("=", 1)
self.model_generator_kwargs[name] = value
self.dict_keys_regex = [re.compile(rf"^{r}$") for r in dict_keys_regex] if dict_keys_regex else ()
self.dict_keys_fields = dict_keys_fields or ()
self.initialized = True
@classmethod
def _create_argparser(cls) -> argparse.ArgumentParser:
"""
ArgParser factory
"""
parser = argparse.ArgumentParser(
formatter_class=argparse.RawTextHelpFormatter,
description="Convert given json files into Python models."
)
parser.add_argument(
"-m", "--model",
nargs="+", action="append", metavar=("<Model name>", "<JSON files>"),
help="Model name and its JSON data as path or unix-like path pattern.\n"
"'*', '**' or '?' patterns symbols are supported.\n\n"
)
parser.add_argument(
"-l", "--list",
nargs=3, action="append", metavar=("<Model name>", "<JSON key>", "<JSON file>"),
help="Like -m but given json file should contain list of model data.\n"
"If this file contains dict with nested list than you can pass\n"
"<JSON key> to lookup. Deep lookups are supported by dot-separated path.\n"
"If no lookup needed pass '-' as <JSON key>\n\n"
"I.e. for file that contains dict {\"a\": {\"b\": [model_data, ...]}} you should\n"
"pass 'a.b' as <JSON key>.\n\n"
)
parser.add_argument(
"-i", "--input-format",
default="json",
choices=['json', 'yaml', 'ini'],
help="Input files parser ('PyYaml' is required to parse yaml files)\n\n"
)
parser.add_argument(
"-o", "--output",
metavar="FILE", default="",
help="Path to output file\n\n"
)
parser.add_argument(
"-f", "--framework",
default="base",
choices=list(cls.MODEL_GENERATOR_MAPPING.keys()) + ["custom"],
help="Model framework for which python code is generated.\n"
"'base' (default) mean no framework so code will be generated without any decorators\n"
"and additional meta-data.\n"
"If you pass 'custom' you should specify --code-generator argument\n\n"
)
parser.add_argument(
"-s", "--structure",
default="flat",
choices=list(cls.STRUCTURE_FN_MAPPING.keys()),
help="Models composition style. By default nested models become nested Python classes.\n\n"
)
parser.add_argument(
"--datetime",
action="store_true",
help="Enable datetime/date/time strings parsing.\n"
"Warn.: This can lead to 6-7 times slowdown on large datasets.\n"
" Be sure that you really need this option.\n\n"
)
parser.add_argument(
"--strings-converters",
action="store_true",
help="Enable generation of string types converters (i.e. IsoDatetimeString or BooleanString).\n\n"
)
parser.add_argument(
"--max-strings-literals",
type=int,
default=GenericModelCodeGenerator.DEFAULT_MAX_LITERALS,
metavar='NUMBER',
help="Generate Literal['foo', 'bar'] when field have less than NUMBER string constants as values.\n"
f"Pass 0 to disable. By default NUMBER={GenericModelCodeGenerator.DEFAULT_MAX_LITERALS}"
f" (some generator classes could override it)\n\n"
)
parser.add_argument(
"--disable-unicode-conversion", "--no-unidecode",
action="store_true",
help="Disabling unicode conversion in fields and class names.\n\n"
)
default_percent = f"{ModelFieldsPercentMatch.DEFAULT * 100:.0f}"
default_number = f"{ModelFieldsNumberMatch.DEFAULT:.0f}"
parser.add_argument(
"--merge",
default=["percent", "number"],
nargs="+",
help=(
f"Merge policy settings. Default is 'percent_{default_percent} number_{default_number}' (percent of field match\n"
"or number of fields match).\n"
"Possible values are:\n"
"'percent[_<percent>]' - two models had a certain percentage of matched field names.\n"
f" Default percent is {default_percent}%%. "
"Custom value could be i.e. 'percent_95'.\n"
"'number[_<number>]' - two models had a certain number of matched field names.\n"
f" Default number of fields is {default_number}.\n"
"'exact' - two models should have exact same field names to merge.\n\n"
)
)
parser.add_argument(
"--dict-keys-regex", "--dkr",
nargs="+", metavar="RegEx",
help="List of regular expressions (Python syntax).\n"
"If all keys of some dict are match one of them\n"
"then this dict will be marked as dict field but not nested model.\n"
"Note: ^ and $ tokens will be added automatically but you have to\n"
"escape other special characters manually.\n"
)
parser.add_argument(
"--dict-keys-fields", "--dkf",
nargs="+", metavar="FIELD NAME",
help="List of model fields names that will be marked as dict fields\n\n"
)
parser.add_argument(
"--code-generator",
help="Absolute import path to GenericModelCodeGenerator subclass.\n"
"Works in pair with '-f custom'\n\n"
)
parser.add_argument(
"--code-generator-kwargs",
metavar="NAME=VALUE",
nargs="*", type=str,
help="List of code generator arguments (for __init__ method).\n"
"Each argument should be in following format:\n"
" argument_name=value or \"argument_name=value with space\"\n"
"Boolean values should be passed in JS style: true | false"
"\n\n"
)
return parser
def main():
import os
if os.getenv("TRAVIS", None) or os.getenv("FORCE_COVERAGE", None):
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Zinc grammar specification.
# (C) 2016 VRT Systems
#
# vim: set ts=4 sts=4 et tw=78 sw=4 si:
import datetime
import logging
import re
import sys
import iso8601
import pyparsing as pp
import six
# Bring in special Project Haystack types and time zones
from .datatypes import Quantity, Coordinate, Uri, Bin, MARKER, NA, REMOVE, Ref, XStr
from .grid import Grid
# Bring in our sortable dict class to preserve order
from .sortabledict import SortableDict
# Bring in version handling
from .version import Version, VER_2_0, VER_3_0
from .zoneinfo import timezone
# Logging instance for reporting debug info
LOG = logging.getLogger(__name__)
# All grids start with the version string.
VERSION_RE = re.compile(r'^ver:"(([^"\\]|\\[\\"bfnrt$])+)"')
NEWLINE_RE = re.compile(r'\r?\n')
# Character number regex; for exceptions
CHAR_NUM_RE = re.compile(' *\(at char \d+\),')
def reformat_exception(ex_msg, line_num=None):
print(ex_msg)
msg = CHAR_NUM_RE.sub(u'', six.text_type(ex_msg))
print(msg)
if line_num is not None:
return msg.replace(u'line:1', u'line:%d' % line_num)
else:
return msg
# Convenience function, we want whitespace left alone.
def _leave_ws(cls, *args, **kwargs):
return cls(*args, **kwargs).leaveWhitespace()
# Versions of the pyparsing types that leave our whitespace alone!
Empty = lambda *a, **kwa: _leave_ws(pp.Empty, *a, **kwa)
Regex = lambda *a, **kwa: _leave_ws(pp.Regex, *a, **kwa)
Literal = lambda *a, **kwa: _leave_ws(pp.Literal, *a, **kwa)
CaselessLiteral = lambda *a, **kwa: _leave_ws(pp.CaselessLiteral, *a, **kwa)
Word = lambda *a, **kwa: _leave_ws(pp.Word, *a, **kwa)
Optional = lambda *a, **kwa: _leave_ws(pp.Optional, *a, **kwa)
Suppress = lambda *a, **kwa: _leave_ws(pp.Suppress, *a, **kwa)
Combine = lambda *a, **kwa: _leave_ws(pp.Combine, *a, **kwa)
And = lambda *a, **kwa: _leave_ws(pp.And, *a, **kwa)
Or = lambda *a, **kwa: _leave_ws(pp.Or, *a, **kwa)
ZeroOrMore = lambda *a, **kwa: _leave_ws(pp.ZeroOrMore, *a, **kwa)
OneOrMore = lambda *a, **kwa: _leave_ws(pp.OneOrMore, *a, **kwa)
Group = lambda *a, **kwa: _leave_ws(pp.Group, *a, **kwa)
DelimitedList = lambda *a, **kwa: _leave_ws(pp.delimitedList, *a, **kwa)
Forward = lambda *a, **kwa: _leave_ws(pp.Forward, *a, **kwa)
class ZincParseException(ValueError):
"""
Exception thrown when a grid cannot be parsed successfully. If known,
the line and column for the grid are given.
"""
def __init__(self, message, grid_str, line, col):
self.grid_str = grid_str
self.line = line
self.col = col
try:
# If we know the line and column, point it out in the message.
grid_str_lines = grid_str.split('\n')
width = max([len(l) for l in grid_str_lines])
linefmt = u'%%-%ds' % width
rowfmt = u'%4d%s' + linefmt + u'%s'
formatted_lines = [
rowfmt % (
num,
' >' if (line == num) else '| ',
line_str,
'< ' if (line == num) else ' |'
)
for (num, line_str)
in enumerate(grid_str.split('\n'), 1)
]
formatted_lines.insert(line,
(u' | ' + linefmt + u' |') \
% (((col - 2) * u' ') + '.^.')
)
# Border it for readability
formatted_lines.insert(0, u' .' + (u'-' * (2 + width)) + u'.')
formatted_lines.append(u' \'' + (u'-' * (2 + width)) + u'\'')
# Append to message
message += u'\n%s' % u'\n'.join(formatted_lines)
except: # pragma: no cover
# We should not get here.
LOG.exception('Exception encountered formatting log message')
pass
super(ZincParseException, self).__init__(message)
class NearestMatch(object):
"""
This class returns the nearest matching grammar for the given version.
"""
def __init__(self, known_grammars):
self._known_grammars = known_grammars
def __getitem__(self, ver):
"""
Retrieve the grammar that closest matches the version string given.
"""
try:
return self._known_grammars[ver]
except KeyError:
pass
nearest = Version.nearest(ver)
g = self._known_grammars[nearest]
self._known_grammars[ver] = g
return g
class GenerateMatch(object):
"""
This class tries to generate a matching grammar based on the version input given.
"""
def __init__(self, generator_fn):
self._generator_fn = generator_fn
self._known_grammars = {}
def __getitem__(self, ver):
try:
return self._known_grammars[ver]
except KeyError:
g = self._generator_fn(ver)
self._known_grammars[ver] = g
return g
def _unescape(s, uri=False):
"""
Iterative parser for string escapes.
"""
out = ''
while len(s) > 0:
c = s[0]
if c == '\\':
# Backslash escape
esc_c = s[1]
if esc_c in ('u', 'U'):
# Unicode escape
out += six.unichr(int(s[2:6], base=16))
s = s[6:]
continue
else:
if esc_c == 'b':
out += '\b'
elif esc_c == 'f':
out += '\f'
elif esc_c == 'n':
out += '\n'
elif esc_c == 'r':
out += '\r'
elif esc_c == 't':
out += '\t'
else:
if uri and (esc_c == '#'):
# \# is passed through with backslash.
out += '\\'
# Pass through
out += esc_c
s = s[2:]
continue
else:
out += c
s = s[1:]
return out
# Grammar according to
# latest: http://project-haystack.org/doc/Zinc
# "2.0": https://web.archive.org/web/20141012013653/http://project-haystack.org:80/doc/Zinc
# "3.0": https://web.archive.org/web/20160805064015/http://project-haystack.org:80/doc/Zinc
# Rudimentary elements
hs_digit = Regex(r'\d')
hs_digits = Regex(r'[0-9_]+').setParseAction(
lambda toks: [''.join([t.replace('_', '') for t in toks[0]])])
hs_alphaLo = Regex(r'[a-z]')
hs_alphaHi = Regex(r'[A-Z]')
hs_alpha = Regex(r'[a-zA-Z]')
hs_valueSep = Regex(r' *, *').setName('valueSep')
hs_rowSep = Regex(r' *\n *').setName('rowSep')
hs_plusMinus = Or([Literal('+'), Literal('-')])
# Forward declaration of data types.
hs_scalar_2_0 = Forward()
hs_scalar_3_0 = Forward()
hs_scalar = NearestMatch({
VER_2_0: hs_scalar_2_0,
VER_3_0: hs_scalar_3_0
})
hs_grid_2_0 = Forward()
hs_grid_3_0 = Forward()
hs_grid = NearestMatch({
VER_2_0: hs_grid_2_0,
VER_3_0: hs_grid_3_0
})
# Co-ordinates
hs_coordDeg = Combine(And([
Optional(Literal('-')),
Optional(hs_digits),
Optional(And([Literal('.'), hs_digits]))
])).setParseAction(lambda toks: [float(toks[0] or '0')])
hs_coord = And([Suppress(Literal('C(')),
hs_coordDeg,
Suppress(hs_valueSep),
hs_coordDeg,
Suppress(Literal(')'))]).setParseAction(
lambda toks: [Coordinate(toks[0], toks[1])])
# Dates and times
hs_tzHHMMOffset = Combine(Or([
CaselessLiteral('z'),
And([hs_plusMinus, Regex(r'\d\d:\d\d')])]
))
hs_tzName = Regex(r'[A-Z][a-zA-Z0-9_\-]*')
hs_tzUTCGMT = Or([Literal('UTC'), Literal('GMT')])
hs_tzUTCOffset = Combine(And([
hs_tzUTCGMT, Optional(
Or([Literal('0'),
And([hs_plusMinus, OneOrMore(hs_digit)]
)]
))]))
hs_timeZoneName = Or([hs_tzUTCOffset, hs_tzName])
hs_dateSep = CaselessLiteral('T')
hs_date_str = Combine(And([
hs_digit, hs_digit, hs_digit, hs_digit,
Literal('-'),
hs_digit, hs_digit,
Literal('-'),
hs_digit, hs_digit]))
hs_date = hs_date_str.copy().setParseAction(
lambda toks: [datetime.datetime.strptime(toks[0], '%Y-%m-%d').date()])
hs_time_str = Combine(And([
hs_digit, hs_digit,
Literal(':'),
hs_digit, hs_digit,
Literal(':'),
hs_digit, hs_digit,
Optional(And([
Literal('.'),
OneOrMore(hs_digit)]))
]))
def _parse_time(toks):
time_str = toks[0]
time_fmt = '%H:%M:%S'
if '.' in time_str:
time_fmt += '.%f'
return [datetime.datetime.strptime(time_str, time_fmt).time()]
hs_time = hs_time_str.copy().setParseAction(_parse_time)
hs_isoDateTime = Combine(And([
hs_date_str,
hs_dateSep,
hs_time_str,
Optional(hs_tzHHMMOffset)
])).setParseAction(lambda toks: [iso8601.parse_date(toks[0].upper())])
def _parse_datetime(toks):
# Made up of parts: ISO8601 Date/Time, time zone label
isodt = toks[0]
if len(toks) > 1:
tzname = toks[1]
else:
tzname = None
if (isodt.tzinfo is None) and bool(tzname): # pragma: no cover
# This technically shouldn't happen according to Zinc specs
return [timezone(tzname).localise(isodt)]
elif bool(tzname):
try:
tz = timezone(tzname)
return [isodt.astimezone(tz)]
except: # pragma: no cover
# Unlikely to occur, might do though if Project Haystack changes
# its timezone list or if a system doesn't recognise a particular
# timezone.
return [isodt] # Failed, leave alone
else:
return [isodt]
hs_dateTime = And([
hs_isoDateTime,
Optional(And([
Suppress(Literal(' ')),
hs_timeZoneName
]))
]).setParseAction(_parse_datetime)
# Quantities and raw numeric values
hs_unitChar = Or([
hs_alpha,
Word(u'%_/$' + u''.join([
six.unichr(c)
for c in range(0x0080, 0xffff)
]), exact=1)
])
hs_unit = Combine(OneOrMore(hs_unitChar))
hs_exp = Combine(And([
CaselessLiteral('e'),
Optional(hs_plusMinus),
hs_digits
]))
hs_decimal = Combine(And([
Optional(Literal('-')),
hs_digits,
Optional(And([
Literal('.'),
hs_digits
])),
Optional(hs_exp)
])).setParseAction(lambda toks: [float(toks[0])])
hs_quantity = And([hs_decimal, hs_unit]).setParseAction(
lambda toks: [Quantity(toks[0], unit=toks[1])])
hs_number = Or([
hs_quantity,
hs_decimal,
Or([
Literal('INF'),
Literal('-INF'),
Literal('NaN')
]).setParseAction(lambda toks: [float(toks[0])])
])
# URIs
hs_uriChar = Regex(r"([^\x00-\x1f\\`]|\\[bfnrt\\:/?" \
+ r"#\[\]@&=;`]|\\[uU][0-9a-fA-F]{4})")
hs_uri = Combine(And([
Suppress(Literal('`')),
ZeroOrMore(hs_uriChar),
Suppress(Literal('`'))
])).setParseAction(lambda toks: [Uri(_unescape(toks[0], uri=True))])
# Strings
hs_strChar = Regex(r"([^\x00-\x1f\\\"]|\\[bfnrt\\\"$]|\\[uU][0-9a-fA-F]{4})")
hs_str = Combine(And([
Suppress(Literal('"')),
ZeroOrMore(hs_strChar),
Suppress(Literal('"'))
])).setParseAction(lambda toks: [_unescape(toks[0], uri=False)])
# References
hs_refChar = Or([hs_alpha, hs_digit, Word('_:-.~', exact=1)])
hs_ref = And([
Suppress(Literal('@')),
Combine(ZeroOrMore(hs_refChar)),
Optional(And([
Suppress(Literal(' ')),
hs_str
]))
]).setParseAction(lambda toks: [ \
Ref(toks[0], toks[1] if len(toks) > 1 else None) \
])
# Bins
hs_binChar = Regex(r"[\x20-\x27\x2a-\x7f]")
hs_bin = Combine(And([
Suppress(Literal('Bin(')),
Combine(ZeroOrMore(hs_binChar)),
Suppress(Literal(')'))
])).setParseAction(lambda toks: [Bin(toks[0])])
# Haystack 3.0 XStr(...)
hs_xstr = And([
Regex(r"[a-zA-Z0-9_]+"),
Suppress(Literal('(')),
hs_str,
Suppress(Literal(')'))
]).setParseAction(lambda toks: [XStr(toks[0], toks[1])])
# Booleans
hs_bool = Word('TF', min=1, max=1, exact=1).setParseAction( \
lambda toks: [toks[0] == 'T'])
# Singleton values
hs_remove = Literal('R').setParseAction( \
lambda toks: [REMOVE]).setName('remove')
hs_marker = Literal('M').setParseAction( \
lambda toks: [MARKER]).setName('marker')
hs_null = Literal('N').setParseAction( \
lambda toks: [None]).setName('null')
hs_na = Literal('NA').setParseAction( \
lambda toks: [NA]).setName('na')
# Lists, these will probably be in Haystack 4.0, so let's not
# assume a version. There are three cases:
# - Empty list: [ {optional whitespace} ]
# - List *with* trailing comma: [ 1, 2, 3, ]
# - List without trailing comma: [ 1, 2, 3 ]
#
# We need to handle this trailing separator case. That for now means
# that a NULL within a list *MUST* be explicitly given using the 'N'
# literal: we cannot support implicit NULLs as they are ambiguous.
hs_list = GenerateMatch( \
lambda ver: Group(Or([ \
Suppress(Regex(r'[ *]')), \
And([ \
Suppress(Regex(r'\[ *')), \
Optional(DelimitedList( \
hs_scalar[ver], \
delim=hs_valueSep)), \
Suppress(Optional(hs_valueSep)), \
Suppress(Regex(r' *\]')) \
]) \
])).setParseAction(lambda toks: toks.asList()))
# Tag IDs
hs_id = Regex(r'[a-z][a-zA-Z0-9_]*').setName('id')
# Grid building blocks
hs_cell = GenerateMatch( \
lambda ver: Or([Empty().copy().setParseAction(lambda toks: [None]), \
hs_scalar[ver]]).setName('cell'))
# Dict
# There are three cases:
# - Empty dict: { {optional whitespace} }
# - map with marker: { m }
# - dics: { k:1 ]
#
hs_tagmarker = hs_id
hs_tagpair = GenerateMatch(
lambda ver: And([hs_id,
Suppress(Regex(r': *')),
hs_scalar[ver]
])
.setParseAction(lambda toks: tuple(toks[:2]))
.setName('tagPair'))
hs_tag = GenerateMatch(
lambda ver: Or([hs_tagmarker, hs_tagpair[ver]])
.setName('tag'))
hs_tags = GenerateMatch(
lambda ver: ZeroOrMore(Or([hs_tag[ver], \
Suppress(Regex(r'[ *]'))])) \
.setName('tags'))
def to_dict(tokenlist):
result = {}
i = 0
it | |
Don't want a box frame
for t in leg.get_texts(): # Reduce the size of the text
t.set_fontsize(PlotScripts.global_legendsize)
outputFile = './%s%s' %(output_tag, output_format)
plt.savefig(outputFile, bbox_inches='tight') # Save the figure
print('Saved file to {0}'.format(outputFile))
plt.close()
if (paper_plot == 1):
fig, ax = plt.subplots(nrows=1, ncols=3, sharex=False, sharey=True, figsize=(16, 6))
delta_fontsize = 0
caps = 5
ewidth = 1.5
for model_number in range(0, len(SnapList)):
for count in range(len(SnapList[model_number])):
w = np.where((counts_array[model_number][count] > 0))[0]
ax[count].plot(bin_middle_array[model_number][count][w], counts_array[model_number][count][w]
/ normalization_array[model_number], color = PlotScripts.colors[model_number],
linestyle = PlotScripts.linestyles[model_number], rasterized = True,
label = r"$\mathbf{SAGE}$", linewidth = PlotScripts.global_linewidth)
tick_locs = np.arange(6.0, 12.0)
ax[count].set_xticklabels([r"$\mathbf{%d}$" % x for x in tick_locs], fontsize = PlotScripts.global_fontsize)
ax[count].set_xlim([6.8, 10.3])
ax[count].tick_params(which = 'both', direction='in',
width = PlotScripts.global_tickwidth)
ax[count].tick_params(which = 'major', length = PlotScripts.global_ticklength)
ax[count].tick_params(which = 'minor', length = PlotScripts.global_ticklength-2)
ax[count].set_xlabel(r'$\mathbf{log_{10} \: M_{*} \:[M_{\odot}]}$',
fontsize = PlotScripts.global_labelsize - delta_fontsize)
ax[count].xaxis.set_minor_locator(plt.MultipleLocator(0.25))
#ax[count].set_xticks(np.arange(6.0, 12.0))
for axis in ['top','bottom','left','right']: # Adjust axis thickness.
ax[count].spines[axis].set_linewidth(PlotScripts.global_axiswidth)
# Since y-axis is shared, only need to do this once.
ax[0].set_yscale('log', nonposy='clip')
ax[0].set_yticklabels([r"$\mathbf{10^{-5}}$",r"$\mathbf{10^{-5}}$",r"$\mathbf{10^{-4}}$", r"$\mathbf{10^{-3}}$",
r"$\mathbf{10^{-2}}$",r"$\mathbf{10^{-1}}$"])
ax[0].set_ylim([1e-5, 1e-1])
#ax[0].set_ylabel(r'\mathbf{$\log_{10} \Phi\ [\mathrm{Mpc}^{-3}\: \mathrm{dex}^{-1}]}$',
ax[0].set_ylabel(r'$\mathbf{log_{10} \: \Phi\ [Mpc^{-3}\: dex^{-1}]}$',
fontsize = PlotScripts.global_labelsize - delta_fontsize)
Obs.Get_Data_SMF()
PlotScripts.Plot_SMF_z6(ax[0], errorwidth=ewidth, capsize=caps)
PlotScripts.Plot_SMF_z7(ax[1], errorwidth=ewidth, capsize=caps)
PlotScripts.Plot_SMF_z8(ax[2], errorwidth=ewidth, capsize=caps)
####
ax[0].text(0.7, 0.9, r"$\mathbf{z = 6}$", transform = ax[0].transAxes, fontsize = PlotScripts.global_fontsize - delta_fontsize)
ax[1].text(0.7, 0.9, r"$\mathbf{z = 7}$", transform = ax[1].transAxes, fontsize = PlotScripts.global_fontsize - delta_fontsize)
ax[2].text(0.7, 0.9, r"$\mathbf{z = 8}$", transform = ax[2].transAxes, fontsize = PlotScripts.global_fontsize - delta_fontsize)
#leg = ax[0,0].legend(loc=2, bbox_to_anchor = (0.2, -0.5), numpoints=1, labelspacing=0.1)
leg = ax[0].legend(loc='lower left', numpoints=1, labelspacing=0.1)
leg.draw_frame(False) # Don't want a box frame
for t in leg.get_texts(): # Reduce the size of the text
t.set_fontsize(PlotScripts.global_legendsize - 2)
plt.tight_layout()
outputFile = "{0}_paper{1}".format(output_tag, output_format)
plt.savefig(outputFile, bbox_inches='tight') # Save the figure
print('Saved file to {0}'.format(outputFile))
plt.close()
##
def plot_fesc_galaxy(SnapList, PlotSnapList, simulation_norm,
mean_galaxy_fesc, std_galaxy_fesc, N_galaxy_fesc,
mean_halo_fesc, std_halo_fesc, N_halo_fesc,
ResolutionLimit_mean, model_tags, paper_plots,
mass_global, fesc_global, Ngamma_global, output_tag):
"""
Plots the escape fraction as a function of stellar/halo mass.
Parallel compatible.
Accepts 3D arrays of the escape fraction binned into Stellar Mass bins to plot the escape fraction for multiple models.
Mass units are log(Msun)
Parameters
---------
SnapList : Nested array, SnapList[model_number0] = [snapshot0_model0, ..., snapshotN_model0], with length equal to the number of models.
Snapshots for each model.
simulation_norm : array with length equal to the number of models.
Denotes which simulation each model uses.
0 : MySim
1 : Mini-Millennium
2 : Tiamat (down to z = 5)
3 : Extended Tiamat (down to z = 1.6ish).
4 : Britton's Simulation
5 : Kali
mean_galaxy_fesc, std_galaxy_fesc, N_galaxy_fesc : Nested 3-dimensional array, mean_galaxy_fesc[model_number0][snapshot0] = [bin0_meanfesc, ..., binN_meanfesc], with length equal to the number of models.
Mean/Standard deviation for fesc in each stellar mass bin, for each [model_number] and [snapshot_number]. N_galaxy_fesc is the number of galaxies placed into each mass bin.
mean_halo_fesc, std_halo_fesc, N_halo_fesc Nested 3-dimensional array, mean_halo_fesc[model_number0][snapshot0] = [bin0_meanfesc, ..., binN_meanfesc], with length equal to the number of models.
Identical to previous except using the halo virial mass for the binning rather than stellar mass.
ResolutionLimit_mean : array of floats with the same shape as mean_galaxy_fesc.
This is the mean stellar mass for a halo with len (number of N-body simulation particles) between 'stellar_mass_halolen_lower' and 'stellar_mass_halolen_upper'.
model_tags : array of strings with length equal to the number of models.
Strings that contain the tag for each model. Will be placed on the plot.
paper_plots: Integer.
Flag to denote whether we should plot a full, 4 panel plot for the
RSAGE paper.
output_tag : string
Name of the file that will be generated.
Returns
-------
No returns.
Generates and saves the plot (named via output_tag).
Units
-----
Mass units are log(Msun).
"""
def adjust_stellarmass_plot(ax):
#ax.axhline(0.20, 0, 100, color ='k', linewidth = PlotScripts.global_linewidth, linestyle = '-.')
#ax.text(7.8, 0.22, r"$f_\mathrm{esc, base}$", color = 'k',
# size = PlotScripts.global_fontsize)
ax.set_xlabel(r'$\mathbf{log_{10} \: M_{*} \:[M_{\odot}]}$',
size = PlotScripts.global_fontsize)
ax.set_ylabel(r'$\mathbf{\langle f_{esc}\rangle_{M_*}}$',
size = PlotScripts.global_labelsize)
ax.set_xlim([6.8, 10])
ax.set_ylim([0.05, 0.45])
#ax.axhline(0.35, 0, 100, color ='k', linewidth = PlotScripts.global_linewidth, linestyle = '-.')
#ax.text(9.1, 0.37, r"$f_\mathrm{esc} = 0.35$", color = 'k',
# size = PlotScripts.global_fontsize)
ax.xaxis.set_minor_locator(mtick.MultipleLocator(0.25))
ax.yaxis.set_minor_locator(mtick.MultipleLocator(0.05))
ax.tick_params(which = 'both', direction='in', width =
PlotScripts.global_tickwidth)
ax.tick_params(which = 'major', length = PlotScripts.global_ticklength)
ax.tick_params(which = 'minor', length = PlotScripts.global_ticklength-2)
for axis in ['top','bottom','left','right']: # Adjust axis thickness.
ax.spines[axis].set_linewidth(PlotScripts.global_axiswidth)
tick_locs = np.arange(6.0, 11.0)
ax.set_xticklabels([r"$\mathbf{%d}$" % x for x in tick_locs],
fontsize = PlotScripts.global_fontsize)
tick_locs = np.arange(0.0, 0.80, 0.10)
ax.set_yticklabels([r"$\mathbf{%.2f}$" % x for x in tick_locs],
fontsize = PlotScripts.global_fontsize)
'''
labels = ax.yaxis.get_ticklabels()
locs = ax.yaxis.get_ticklocs()
for label, loc in zip(labels, locs):
print("{0} {1}".format(label, loc))
'''
leg = ax.legend(loc="upper right", numpoints=1, labelspacing=0.1)
leg.draw_frame(False) # Don't want a box frame
for t in leg.get_texts(): # Reduce the size of the text
t.set_fontsize('medium')
def adjust_paper_plots(ax, model_tags):
ax[1,0].set_xlabel(r'$\mathbf{log_{10} \: M_{*} \:[M_{\odot}]}$',
size = PlotScripts.global_fontsize)
ax[1,1].set_xlabel(r'$\mathbf{log_{10} \: M_{*} \:[M_{\odot}]}$',
size = PlotScripts.global_fontsize)
ax[0,0].set_ylabel(r'$\mathbf{\langle f_{esc}\rangle_{M_*}}$',
size = PlotScripts.global_labelsize)
ax[1,0].set_ylabel(r'$\mathbf{\langle f_{esc}\rangle_{M_*}}$',
size = PlotScripts.global_labelsize)
ax_x = [0, 0, 1, 1]
ax_y = [0, 1, 0, 1]
for count, (x, y) in enumerate(zip(ax_x, ax_y)):
ax[x,y].set_xlim([4.8, 10.4])
ax[x,y].set_ylim([0.00, 0.68])
ax[x,y].yaxis.set_major_locator(mtick.MultipleLocator(0.1))
ax[x,y].xaxis.set_major_locator(mtick.MultipleLocator(1.0))
ax[x,y].yaxis.set_minor_locator(mtick.MultipleLocator(0.05))
ax[x,y].xaxis.set_minor_locator(mtick.MultipleLocator(0.25))
ax[x,y].tick_params(which = 'both', direction='in', width =
PlotScripts.global_tickwidth)
ax[x,y].tick_params(which = 'major', length = PlotScripts.global_ticklength)
ax[x,y].tick_params(which = 'minor',
length = PlotScripts.global_ticklength - 2)
for axis in ['top','bottom','left','right']: # Adjust axis thickness.
ax[x,y].spines[axis].set_linewidth(PlotScripts.global_axiswidth)
print(model_tags[count])
label = model_tags[count]
ax[x,y].text(0.05, 0.65, label, transform = ax[x,y].transAxes, fontsize = PlotScripts.global_fontsize - delta_fontsize)
tick_locs = np.arange(4.0, 11.0)
ax[1,0].set_xticklabels([r"$\mathbf{%d}$" % x for x in tick_locs],
fontsize = PlotScripts.global_fontsize)
ax[1,1].set_xticklabels([r"$\mathbf{%d}$" % x for x in tick_locs],
fontsize = PlotScripts.global_fontsize)
tick_locs = np.arange(-0.1, 0.80, 0.10)
ax[0,0].set_yticklabels([r"$\mathbf{%.2f}$" % x for x in tick_locs],
fontsize = PlotScripts.global_fontsize)
ax[1,0].set_yticklabels([r"$\mathbf{%.2f}$" % x for x in tick_locs],
fontsize = PlotScripts.global_fontsize)
print("x")
labels = ax[1,0].xaxis.get_ticklabels()
locs = ax[1,0].xaxis.get_ticklocs()
for label, loc in zip(labels, locs):
print("{0} {1}".format(label, loc))
print("y")
labels = ax[1,0].yaxis.get_ticklabels()
locs = ax[1,0].yaxis.get_ticklocs()
for label, loc in zip(labels, locs):
print("{0} {1}".format(label, loc))
print("Plotting fesc as a function of stellar mass.")
## Array initialization ##
master_mean_fesc_stellar, master_std_fesc_stellar, master_N_fesc_stellar, master_bin_middle_stellar = \
collect_across_tasks(mean_galaxy_fesc, std_galaxy_fesc, N_galaxy_fesc,
SnapList, PlotSnapList, True, m_gal_low, m_gal_high)
if rank == 0:
if paper_plots == 0:
fig = plt.figure()
ax1 = fig.add_subplot(111)
else:
fig, ax = plt.subplots(nrows=2, ncols=2, sharex='col', sharey='row', figsize=(16, 6))
fig2, ax2 = plt.subplots(nrows=2, ncols=2, sharex='col', sharey='row', figsize=(16, 6))
delta_fontsize = 0
caps = 5
ewidth = 1.5
count_x = 0
for count, model_number in enumerate(range(0, len(SnapList))):
if count == 2:
count_x += 1
print("There were a total of {0} galaxies over the entire redshift range.".format(sum(N_halo_fesc[model_number])))
## Normalization for each model. ##
if (simulation_norm[model_number] == 0):
AllVars.Set_Params_Mysim()
elif (simulation_norm[model_number] == 1):
AllVars.Set_Params_MiniMill()
elif (simulation_norm[model_number] == 2):
AllVars.Set_Params_Tiamat()
elif (simulation_norm[model_number] == 3):
AllVars.Set_Params_Tiamat_extended()
elif (simulation_norm[model_number] == 4):
AllVars.Set_Params_Britton()
elif(simulation_norm[model_number] == 5):
AllVars.Set_Params_Kali()
plot_count = 0
for snapshot_idx in range(0, len(SnapList[model_number])):
if (SnapList[model_number][snapshot_idx] == PlotSnapList[model_number][plot_count]):
if (model_number == 0):
label = r"$\mathbf{z = " + \
str(int(round(AllVars.SnapZ[SnapList[model_number][snapshot_idx]]))) +\
"}$"
else:
label = ""
## Plots as a function of stellar mass ##
w = np.where((master_N_fesc_stellar[model_number][snapshot_idx] < 4))[0] # If there are no galaxies in the bin we don't want to plot.
master_mean_fesc_stellar[model_number][snapshot_idx][w] = np.nan
if paper_plots == 0:
print(master_mean_fesc_stellar[model_number][snapshot_idx])
ax1.plot(master_bin_middle_stellar[model_number][snapshot_idx],
master_mean_fesc_stellar[model_number][snapshot_idx],
color = PlotScripts.colors[plot_count],
ls = PlotScripts.linestyles[model_number],
rasterized = True, label = label,
lw = PlotScripts.global_linewidth)
else:
ax[count_x, count%2].plot(master_bin_middle_stellar[model_number][snapshot_idx],
master_mean_fesc_stellar[model_number][snapshot_idx],
color = PlotScripts.colors[plot_count],
ls = PlotScripts.linestyles[0],
rasterized = True, label = label,
lw = PlotScripts.global_linewidth)
#w = np.random.randint(0,
# len(mass_global[model_number][snapshot_idx][0]),
# size=500)
#sc = ax2[count_x, count%2].scatter(mass_global[model_number][snapshot_idx][0][w],
# fesc_global[model_number][snapshot_idx][0][w],
# c=np.log10(Ngamma_global[model_number][snapshot_idx][0][w]*1.0e50),
# alpha = 0.5,cmap='plasma')
#plt.colorbar(sc)
#ax2[count_x, count%2].hexbin(mass_global[model_number][snapshot_idx],
# fesc_global[model_number][snapshot_idx],
# C=Ngamma_global[model_number][snapshot_idx])
plot_count += 1
if (plot_count | |
<filename>zaza/openstack/utilities/series_upgrade.py
# Copyright 2020 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Collection of functions for testing series upgrade."""
import asyncio
import collections
import copy
import concurrent
import logging
import os
import time
from zaza import model
from zaza.charm_lifecycle import utils as cl_utils
import zaza.openstack.utilities.generic as os_utils
SUBORDINATE_PAUSE_RESUME_BLACKLIST = [
"cinder-ceph",
]
def app_config(charm_name, is_async=True):
"""Return a dict with the upgrade config for an application.
:param charm_name: Name of the charm about to upgrade
:type charm_name: str
:param async: Whether the upgreade functions should be async
:type async: bool
:returns: A dicitonary of the upgrade config for the application
:rtype: Dict
"""
if is_async:
default_upgrade = async_series_upgrade_application
secondary_first_upgrade = async_series_upgrade_non_leaders_first
else:
default_upgrade = series_upgrade_application
secondary_first_upgrade = series_upgrade_non_leaders_first
default = {
'origin': 'openstack-origin',
'pause_non_leader_subordinate': True,
'pause_non_leader_primary': True,
'upgrade_function': default_upgrade,
'post_upgrade_functions': []}
_app_settings = collections.defaultdict(lambda: default)
ceph = {
'origin': "source",
'pause_non_leader_primary': False,
'pause_non_leader_subordinate': False,
}
exceptions = {
'rabbitmq-server': {
'origin': 'source',
'pause_non_leader_subordinate': False, },
'percona-cluster': {'origin': 'source', },
'nova-compute': {
'pause_non_leader_primary': False,
'pause_non_leader_subordinate': False, },
'ceph': ceph,
'ceph-mon': ceph,
'ceph-osd': ceph,
'designate-bind': {'origin': None, },
'tempest': {'origin': None, },
'memcached': {
'origin': None,
'pause_non_leader_primary': False,
'pause_non_leader_subordinate': False,
},
'vault': {
'origin': None,
'pause_non_leader_primary': False,
'pause_non_leader_subordinate': True,
'post_upgrade_functions': [
('zaza.openstack.charm_tests.vault.setup.'
'mojo_unseal_by_unit')]
},
'mongodb': {
'origin': None,
'upgrade_function': secondary_first_upgrade,
}
}
for key, value in exceptions.items():
_app_settings[key] = copy.deepcopy(default)
_app_settings[key].update(value)
return _app_settings[charm_name]
def run_post_upgrade_functions(post_upgrade_functions):
"""Execute list supplied functions.
:param post_upgrade_functions: List of functions
:type post_upgrade_functions: [function, function, ...]
"""
if post_upgrade_functions:
for func in post_upgrade_functions:
logging.info("Running {}".format(func))
cl_utils.get_class(func)()
def series_upgrade_non_leaders_first(
application, from_series="trusty",
to_series="xenial",
origin='openstack-origin',
completed_machines=[],
pause_non_leader_primary=False,
pause_non_leader_subordinate=False,
files=None,
workaround_script=None,
post_upgrade_functions=None
):
"""Series upgrade non leaders first.
Wrap all the functionality to handle series upgrade for charms
which must have non leaders upgraded first.
:param application: Name of application to upgrade series
:type application: str
:param from_series: The series from which to upgrade
:type from_series: str
:param to_series: The series to which to upgrade
:type to_series: str
:param origin: The configuration setting variable name for changing origin
source. (openstack-origin or source)
:type origin: str
:param completed_machines: List of completed machines which do no longer
require series upgrade.
:type completed_machines: list
:param pause_non_leader_primary: Whether the non-leader applications should
be paused
:type pause_non_leader_primary: bool
:param pause_non_leader_subordinate: Whether the non-leader subordinate
hacluster applications should be
paused
:type pause_non_leader_subordinate: bool
:param from_series: The series from which to upgrade
:param files: Workaround files to scp to unit under upgrade
:type files: list
:param workaround_script: Workaround script to run during series upgrade
:type workaround_script: str
:returns: None
:rtype: None
"""
status = model.get_status().applications[application]
leader = None
non_leaders = []
for unit in status["units"]:
if status["units"][unit].get("leader"):
leader = unit
else:
non_leaders.append(unit)
# Pause the non-leaders
for unit in non_leaders:
if pause_non_leader_subordinate:
if status["units"][unit].get("subordinates"):
for subordinate in status["units"][unit]["subordinates"]:
_app = subordinate.split('/')[0]
if _app in SUBORDINATE_PAUSE_RESUME_BLACKLIST:
logging.info("Skipping pausing {} - blacklisted"
.format(subordinate))
else:
logging.info("Pausing {}".format(subordinate))
model.run_action(
subordinate, "pause", action_params={})
if pause_non_leader_primary:
logging.info("Pausing {}".format(unit))
model.run_action(unit, "pause", action_params={})
# Series upgrade the non-leaders first
for unit in non_leaders:
machine = status["units"][unit]["machine"]
if machine not in completed_machines:
logging.info("Series upgrade non-leader unit: {}"
.format(unit))
series_upgrade(unit, machine,
from_series=from_series, to_series=to_series,
origin=origin,
post_upgrade_functions=post_upgrade_functions)
run_post_upgrade_functions(post_upgrade_functions)
completed_machines.append(machine)
else:
logging.info("Skipping unit: {}. Machine: {} already upgraded. "
.format(unit, machine))
model.block_until_all_units_idle()
# Series upgrade the leader
machine = status["units"][leader]["machine"]
logging.info("Series upgrade leader: {}".format(leader))
if machine not in completed_machines:
series_upgrade(leader, machine,
from_series=from_series, to_series=to_series,
origin=origin,
workaround_script=workaround_script,
files=files,
post_upgrade_functions=post_upgrade_functions)
completed_machines.append(machine)
else:
logging.info("Skipping unit: {}. Machine: {} already upgraded."
.format(unit, machine))
model.block_until_all_units_idle()
async def async_series_upgrade_non_leaders_first(
application,
from_series="trusty",
to_series="xenial",
origin='openstack-origin',
completed_machines=[],
pause_non_leader_primary=False,
pause_non_leader_subordinate=False,
files=None,
workaround_script=None,
post_upgrade_functions=None
):
"""Series upgrade non leaders first.
Wrap all the functionality to handle series upgrade for charms
which must have non leaders upgraded first.
:param application: Name of application to upgrade series
:type application: str
:param from_series: The series from which to upgrade
:type from_series: str
:param to_series: The series to which to upgrade
:type to_series: str
:param origin: The configuration setting variable name for changing origin
source. (openstack-origin or source)
:type origin: str
:param completed_machines: List of completed machines which do no longer
require series upgrade.
:type completed_machines: list
:param pause_non_leader_primary: Whether the non-leader applications should
be paused
:type pause_non_leader_primary: bool
:param pause_non_leader_subordinate: Whether the non-leader subordinate
hacluster applications should be
paused
:type pause_non_leader_subordinate: bool
:param from_series: The series from which to upgrade
:param files: Workaround files to scp to unit under upgrade
:type files: list
:param workaround_script: Workaround script to run during series upgrade
:type workaround_script: str
:returns: None
:rtype: None
"""
status = (await model.async_get_status()).applications[application]
leader = None
non_leaders = []
for unit in status["units"]:
if status["units"][unit].get("leader"):
leader = unit
else:
non_leaders.append(unit)
# Pause the non-leaders
for unit in non_leaders:
if pause_non_leader_subordinate:
if status["units"][unit].get("subordinates"):
for subordinate in status["units"][unit]["subordinates"]:
_app = subordinate.split('/')[0]
if _app in SUBORDINATE_PAUSE_RESUME_BLACKLIST:
logging.info("Skipping pausing {} - blacklisted"
.format(subordinate))
else:
logging.info("Pausing {}".format(subordinate))
await model.async_run_action(
subordinate, "pause", action_params={})
if pause_non_leader_primary:
logging.info("Pausing {}".format(unit))
await model.async_run_action(unit, "pause", action_params={})
# Series upgrade the non-leaders first
for unit in non_leaders:
machine = status["units"][unit]["machine"]
if machine not in completed_machines:
logging.info("Series upgrade non-leader unit: {}"
.format(unit))
await async_series_upgrade(
unit, machine,
from_series=from_series, to_series=to_series,
origin=origin,
post_upgrade_functions=post_upgrade_functions)
run_post_upgrade_functions(post_upgrade_functions)
completed_machines.append(machine)
else:
logging.info("Skipping unit: {}. Machine: {} already upgraded. "
.format(unit, machine))
await model.async_block_until_all_units_idle()
# Series upgrade the leader
machine = status["units"][leader]["machine"]
logging.info("Series upgrade leader: {}".format(leader))
if machine not in completed_machines:
await async_series_upgrade(
leader, machine,
from_series=from_series, to_series=to_series,
origin=origin,
workaround_script=workaround_script,
files=files,
post_upgrade_functions=post_upgrade_functions)
completed_machines.append(machine)
else:
logging.info("Skipping unit: {}. Machine: {} already upgraded."
.format(unit, machine))
await model.async_block_until_all_units_idle()
def series_upgrade_application(application, pause_non_leader_primary=True,
pause_non_leader_subordinate=True,
from_series="trusty", to_series="xenial",
origin='openstack-origin',
completed_machines=[],
files=None, workaround_script=None,
post_upgrade_functions=None):
"""Series upgrade application.
Wrap all the functionality to handle series upgrade for a given
application. Including pausing non-leader units.
:param application: Name of application to upgrade series
:type application: str
:param pause_non_leader_primary: Whether the non-leader applications should
be paused
:type pause_non_leader_primary: bool
:param pause_non_leader_subordinate: Whether the non-leader subordinate
hacluster applications should be
paused
:type pause_non_leader_subordinate: bool
:param from_series: The series from which to upgrade
:type from_series: str
:param to_series: The series to which to upgrade
:type to_series: str
:param origin: The configuration setting variable name for changing origin
source. (openstack-origin or source)
:type origin: str
:param completed_machines: List of completed machines which do no longer
require series upgrade.
:type completed_machines: list
:param files: Workaround files to scp to unit under upgrade
:type files: list
:param workaround_script: Workaround script to run during series upgrade
:type workaround_script: str
:returns: None
:rtype: None
"""
status = model.get_status().applications[application]
# For some applications (percona-cluster) the leader unit must upgrade
# first. For API applications the non-leader haclusters must be paused
# before upgrade. Finally, for some applications this is arbitrary but
# generalized.
leader = None
non_leaders = []
for unit in status["units"]:
if status["units"][unit].get("leader"):
leader = unit
else:
non_leaders.append(unit)
# Pause the non-leaders
for unit in non_leaders:
if pause_non_leader_subordinate:
if status["units"][unit].get("subordinates"):
for subordinate in status["units"][unit]["subordinates"]:
_app = subordinate.split('/')[0]
if _app in SUBORDINATE_PAUSE_RESUME_BLACKLIST:
logging.info("Skipping pausing {} - blacklisted"
.format(subordinate))
else:
logging.info("Pausing {}".format(subordinate))
model.run_action(
subordinate, "pause", action_params={})
if pause_non_leader_primary:
logging.info("Pausing {}".format(unit))
model.run_action(unit, "pause", action_params={})
machine = status["units"][leader]["machine"]
# Series upgrade the leader
logging.info("Series upgrade leader: {}".format(leader))
if machine not in completed_machines:
series_upgrade(leader, machine,
from_series=from_series, to_series=to_series,
origin=origin, workaround_script=workaround_script,
files=files,
post_upgrade_functions=post_upgrade_functions)
completed_machines.append(machine)
else:
logging.info("Skipping unit: {}. Machine: {} already upgraded."
"But setting origin on the application {}"
.format(unit, machine, application))
logging.info("Set origin on {}".format(application))
os_utils.set_origin(application, origin)
model.block_until_all_units_idle()
# Series upgrade the non-leaders
for unit in non_leaders:
machine = status["units"][unit]["machine"]
if machine not in completed_machines:
logging.info("Series upgrade non-leader unit: {}"
.format(unit))
series_upgrade(unit, machine,
from_series=from_series, to_series=to_series,
origin=origin, workaround_script=workaround_script,
files=files,
post_upgrade_functions=post_upgrade_functions)
completed_machines.append(machine)
else:
logging.info("Skipping unit: {}. Machine: {} already upgraded. "
"But setting origin on the application {}"
.format(unit, machine, application))
logging.info("Set origin on {}".format(application))
| |
' database: [20]')
@patch('charmhelpers.contrib.openstack.utils.port_has_listener')
@patch.object(openstack, 'juju_log')
@patch('charmhelpers.contrib.openstack.utils.status_set')
@patch('charmhelpers.contrib.openstack.utils.is_unit_paused_set',
return_value=False)
def test_set_os_workload_status_complete_ports_not_open(
self, is_unit_paused_set, status_set, log, port_has_listener):
configs = MagicMock()
configs.complete_contexts.return_value = []
required_interfaces = {}
ports = [50, 60, 70]
port_has_listener.side_effect = [True, False, True]
openstack.set_os_workload_status(
configs, required_interfaces, ports=ports)
status_set.assert_called_with(
'blocked',
'Ports which should be open, but are not: 60')
@patch.object(openstack, 'juju_log')
@patch('charmhelpers.contrib.openstack.utils.status_set')
@patch('charmhelpers.contrib.openstack.utils.is_unit_paused_set',
return_value=True)
def test_set_os_workload_status_paused_simple(
self, is_unit_paused_set, status_set, log):
configs = MagicMock()
configs.complete_contexts.return_value = []
required_interfaces = {}
openstack.set_os_workload_status(configs, required_interfaces)
status_set.assert_called_with(
'maintenance',
"Paused. Use 'resume' action to resume normal service.")
@patch('charmhelpers.contrib.openstack.utils.service_running')
@patch('charmhelpers.contrib.openstack.utils.port_has_listener')
@patch.object(openstack, 'juju_log')
@patch('charmhelpers.contrib.openstack.utils.status_set')
@patch('charmhelpers.contrib.openstack.utils.is_unit_paused_set',
return_value=True)
def test_set_os_workload_status_paused_services_check(
self, is_unit_paused_set, status_set, log,
port_has_listener, service_running):
configs = MagicMock()
configs.complete_contexts.return_value = []
required_interfaces = {}
services = [
{'service': 'database', 'ports': [10, 20]},
{'service': 'identity', 'ports': [30]},
]
port_has_listener.return_value = False
service_running.side_effect = [False, False]
openstack.set_os_workload_status(
configs, required_interfaces, services=services)
status_set.assert_called_with(
'maintenance',
"Paused. Use 'resume' action to resume normal service.")
@patch('charmhelpers.contrib.openstack.utils.service_running')
@patch('charmhelpers.contrib.openstack.utils.port_has_listener')
@patch.object(openstack, 'juju_log')
@patch('charmhelpers.contrib.openstack.utils.status_set')
@patch('charmhelpers.contrib.openstack.utils.is_unit_paused_set',
return_value=True)
def test_set_os_workload_status_paused_services_fail(
self, is_unit_paused_set, status_set, log,
port_has_listener, service_running):
configs = MagicMock()
configs.complete_contexts.return_value = []
required_interfaces = {}
services = [
{'service': 'database', 'ports': [10, 20]},
{'service': 'identity', 'ports': [30]},
]
port_has_listener.return_value = False
# Fail the identity service
service_running.side_effect = [False, True]
openstack.set_os_workload_status(
configs, required_interfaces, services=services)
status_set.assert_called_with(
'blocked',
"Services should be paused but these services running: identity")
@patch('charmhelpers.contrib.openstack.utils.service_running')
@patch('charmhelpers.contrib.openstack.utils.port_has_listener')
@patch.object(openstack, 'juju_log')
@patch('charmhelpers.contrib.openstack.utils.status_set')
@patch('charmhelpers.contrib.openstack.utils.is_unit_paused_set',
return_value=True)
def test_set_os_workload_status_paused_services_ports_fail(
self, is_unit_paused_set, status_set, log,
port_has_listener, service_running):
configs = MagicMock()
configs.complete_contexts.return_value = []
required_interfaces = {}
services = [
{'service': 'database', 'ports': [10, 20]},
{'service': 'identity', 'ports': [30]},
]
# make the service 20 port be still listening.
port_has_listener.side_effect = [False, True, False]
service_running.return_value = False
openstack.set_os_workload_status(
configs, required_interfaces, services=services)
status_set.assert_called_with(
'blocked',
"Services should be paused but these service:ports are open:"
" database: [20]")
@patch('charmhelpers.contrib.openstack.utils.port_has_listener')
@patch.object(openstack, 'juju_log')
@patch('charmhelpers.contrib.openstack.utils.status_set')
@patch('charmhelpers.contrib.openstack.utils.is_unit_paused_set',
return_value=True)
def test_set_os_workload_status_paused_ports_check(
self, is_unit_paused_set, status_set, log,
port_has_listener):
configs = MagicMock()
configs.complete_contexts.return_value = []
required_interfaces = {}
ports = [50, 60, 70]
port_has_listener.side_effect = [False, False, False]
openstack.set_os_workload_status(
configs, required_interfaces, ports=ports)
status_set.assert_called_with(
'maintenance',
"Paused. Use 'resume' action to resume normal service.")
@patch('charmhelpers.contrib.openstack.utils.port_has_listener')
@patch.object(openstack, 'juju_log')
@patch('charmhelpers.contrib.openstack.utils.status_set')
@patch('charmhelpers.contrib.openstack.utils.is_unit_paused_set',
return_value=True)
def test_set_os_workload_status_paused_ports_fail(
self, is_unit_paused_set, status_set, log,
port_has_listener):
configs = MagicMock()
configs.complete_contexts.return_value = []
required_interfaces = {}
# fail port 70 to make it seem to be running
ports = [50, 60, 70]
port_has_listener.side_effect = [False, False, True]
openstack.set_os_workload_status(
configs, required_interfaces, ports=ports)
status_set.assert_called_with(
'blocked',
"Services should be paused but "
"these ports which should be closed, but are open: 70")
@patch('charmhelpers.contrib.openstack.utils.service_running')
@patch('charmhelpers.contrib.openstack.utils.port_has_listener')
def test_check_actually_paused_simple_services(
self, port_has_listener, service_running):
services = ['database', 'identity']
port_has_listener.return_value = False
service_running.return_value = False
state, message = openstack.check_actually_paused(
services)
self.assertEquals(state, None)
self.assertEquals(message, None)
@patch('charmhelpers.contrib.openstack.utils.service_running')
@patch('charmhelpers.contrib.openstack.utils.port_has_listener')
def test_check_actually_paused_simple_services_fail(
self, port_has_listener, service_running):
services = ['database', 'identity']
port_has_listener.return_value = False
service_running.side_effect = [False, True]
state, message = openstack.check_actually_paused(
services)
self.assertEquals(state, 'blocked')
self.assertEquals(
message,
"Services should be paused but these services running: identity")
@patch('charmhelpers.contrib.openstack.utils.service_running')
@patch('charmhelpers.contrib.openstack.utils.port_has_listener')
def test_check_actually_paused_services_dict(
self, port_has_listener, service_running):
services = [
{'service': 'database', 'ports': [10, 20]},
{'service': 'identity', 'ports': [30]},
]
# Assume that the service and ports are open.
port_has_listener.return_value = False
service_running.return_value = False
state, message = openstack.check_actually_paused(
services)
self.assertEquals(state, None)
self.assertEquals(message, None)
@patch('charmhelpers.contrib.openstack.utils.service_running')
@patch('charmhelpers.contrib.openstack.utils.port_has_listener')
def test_check_actually_paused_services_dict_fail(
self, port_has_listener, service_running):
services = [
{'service': 'database', 'ports': [10, 20]},
{'service': 'identity', 'ports': [30]},
]
# Assume that the service and ports are open.
port_has_listener.return_value = False
service_running.side_effect = [False, True]
state, message = openstack.check_actually_paused(
services)
self.assertEquals(state, 'blocked')
self.assertEquals(
message,
"Services should be paused but these services running: identity")
@patch('charmhelpers.contrib.openstack.utils.service_running')
@patch('charmhelpers.contrib.openstack.utils.port_has_listener')
def test_check_actually_paused_services_dict_ports_fail(
self, port_has_listener, service_running):
services = [
{'service': 'database', 'ports': [10, 20]},
{'service': 'identity', 'ports': [30]},
]
# Assume that the service and ports are open.
port_has_listener.side_effect = [False, True, False]
service_running.return_value = False
state, message = openstack.check_actually_paused(
services)
self.assertEquals(state, 'blocked')
self.assertEquals(message,
'Services should be paused but these service:ports'
' are open: database: [20]')
@patch('charmhelpers.contrib.openstack.utils.service_running')
@patch('charmhelpers.contrib.openstack.utils.port_has_listener')
def test_check_actually_paused_ports_okay(
self, port_has_listener, service_running):
port_has_listener.side_effect = [False, False, False]
service_running.return_value = False
ports = [50, 60, 70]
state, message = openstack.check_actually_paused(
ports=ports)
self.assertEquals(state, None)
self.assertEquals(state, None)
@patch('charmhelpers.contrib.openstack.utils.service_running')
@patch('charmhelpers.contrib.openstack.utils.port_has_listener')
def test_check_actually_paused_ports_fail(
self, port_has_listener, service_running):
port_has_listener.side_effect = [False, True, False]
service_running.return_value = False
ports = [50, 60, 70]
state, message = openstack.check_actually_paused(
ports=ports)
self.assertEquals(state, 'blocked')
self.assertEquals(message,
'Services should be paused but these ports '
'which should be closed, but are open: 60')
@staticmethod
def _unit_paused_helper(hook_data_mock):
# HookData()() returns a tuple (kv, delta_config, delta_relation)
# but we only want kv in the test.
kv = MagicMock()
@contextlib.contextmanager
def hook_data__call__():
yield (kv, True, False)
hook_data__call__.return_value = (kv, True, False)
hook_data_mock.return_value = hook_data__call__
return kv
@patch('charmhelpers.contrib.openstack.utils.unitdata.HookData')
def test_set_unit_paused(self, hook_data):
kv = self._unit_paused_helper(hook_data)
openstack.set_unit_paused()
kv.set.assert_called_once_with('unit-paused', True)
@patch('charmhelpers.contrib.openstack.utils.unitdata.HookData')
def test_set_unit_upgrading(self, hook_data):
kv = self._unit_paused_helper(hook_data)
openstack.set_unit_upgrading()
kv.set.assert_called_once_with('unit-upgrading', True)
@patch('charmhelpers.contrib.openstack.utils.unitdata.HookData')
def test_clear_unit_paused(self, hook_data):
kv = self._unit_paused_helper(hook_data)
openstack.clear_unit_paused()
kv.set.assert_called_once_with('unit-paused', False)
@patch('charmhelpers.contrib.openstack.utils.unitdata.HookData')
def test_clear_unit_upgrading(self, hook_data):
kv = self._unit_paused_helper(hook_data)
openstack.clear_unit_upgrading()
kv.set.assert_called_once_with('unit-upgrading', False)
@patch('charmhelpers.contrib.openstack.utils.unitdata.HookData')
def test_is_unit_paused_set(self, hook_data):
kv = self._unit_paused_helper(hook_data)
kv.get.return_value = True
r = openstack.is_unit_paused_set()
kv.get.assert_called_once_with('unit-paused')
self.assertEquals(r, True)
kv.get.return_value = False
r = openstack.is_unit_paused_set()
self.assertEquals(r, False)
@patch('charmhelpers.contrib.openstack.utils.unitdata.HookData')
def test_is_unit_upgrading_set(self, hook_data):
kv = self._unit_paused_helper(hook_data)
kv.get.return_value = True
r = openstack.is_unit_upgrading_set()
kv.get.assert_called_once_with('unit-upgrading')
self.assertEquals(r, True)
kv.get.return_value = False
r = openstack.is_unit_upgrading_set()
self.assertEquals(r, False)
@patch('charmhelpers.contrib.openstack.utils.service_pause')
@patch('charmhelpers.contrib.openstack.utils.set_unit_paused')
def test_pause_unit_okay(self, set_unit_paused, service_pause):
services = ['service1', 'service2']
service_pause.side_effect = [True, True]
openstack.pause_unit(None, services=services)
set_unit_paused.assert_called_once_with()
self.assertEquals(service_pause.call_count, 2)
@patch('charmhelpers.contrib.openstack.utils.service_pause')
@patch('charmhelpers.contrib.openstack.utils.set_unit_paused')
def test_pause_unit_service_fails(self, set_unit_paused, service_pause):
services = ['service1', 'service2']
service_pause.side_effect = [True, True]
openstack.pause_unit(None, services=services)
set_unit_paused.assert_called_once_with()
self.assertEquals(service_pause.call_count, 2)
# Fail the 2nd service
service_pause.side_effect = [True, False]
try:
openstack.pause_unit(None, services=services)
raise Exception("pause_unit should have raised Exception")
except Exception as e:
self.assertEquals(e.args[0],
"Couldn't pause: service2 didn't stop cleanly.")
@patch('charmhelpers.contrib.openstack.utils.service_pause')
@patch('charmhelpers.contrib.openstack.utils.set_unit_paused')
def test_pausee_unit_service_charm_func(
self, set_unit_paused, service_pause):
services = ['service1', 'service2']
service_pause.return_value = True
charm_func = MagicMock()
charm_func.return_value = None
openstack.pause_unit(None, services=services, charm_func=charm_func)
charm_func.assert_called_once_with()
# fail the charm_func
charm_func.return_value = "Custom charm failed"
try:
openstack.pause_unit(
None, services=services, charm_func=charm_func)
raise Exception("pause_unit should have raised Exception")
except Exception as e:
self.assertEquals(e.args[0],
"Couldn't pause: Custom charm failed")
@patch('charmhelpers.contrib.openstack.utils.service_pause')
@patch('charmhelpers.contrib.openstack.utils.set_unit_paused')
def test_pause_unit_assess_status_func(
self, set_unit_paused, service_pause):
services = ['service1', 'service2']
service_pause.return_value = True
assess_status_func = MagicMock()
assess_status_func.return_value = None
openstack.pause_unit(assess_status_func, services=services)
assess_status_func.assert_called_once_with()
# fail the assess_status_func
assess_status_func.return_value = "assess_status_func failed"
try:
openstack.pause_unit(assess_status_func, services=services)
raise Exception("pause_unit should have raised Exception")
except Exception as e:
self.assertEquals(e.args[0],
"Couldn't pause: assess_status_func failed")
@patch('charmhelpers.contrib.openstack.utils.service_resume')
@patch('charmhelpers.contrib.openstack.utils.clear_unit_paused')
def test_resume_unit_okay(self, clear_unit_paused, service_resume):
services = ['service1', 'service2']
service_resume.side_effect = [True, True]
openstack.resume_unit(None, services=services)
clear_unit_paused.assert_called_once_with()
self.assertEquals(service_resume.call_count, 2)
@patch('charmhelpers.contrib.openstack.utils.service_resume')
@patch('charmhelpers.contrib.openstack.utils.clear_unit_paused')
def test_resume_unit_service_fails(
self, clear_unit_paused, service_resume):
services = ['service1', 'service2']
service_resume.side_effect = [True, True]
openstack.resume_unit(None, services=services)
clear_unit_paused.assert_called_once_with()
self.assertEquals(service_resume.call_count, 2)
# Fail the 2nd service
service_resume.side_effect = [True, False]
try:
openstack.resume_unit(None, services=services)
raise Exception("resume_unit should have raised Exception")
except Exception as e:
self.assertEquals(
e.args[0], "Couldn't resume: service2 didn't start cleanly.")
@patch('charmhelpers.contrib.openstack.utils.service_resume')
@patch('charmhelpers.contrib.openstack.utils.clear_unit_paused')
def test_resume_unit_service_charm_func(
self, clear_unit_paused, service_resume):
services = ['service1', 'service2']
service_resume.return_value = True
charm_func = MagicMock()
charm_func.return_value = None
openstack.resume_unit(None, services=services, charm_func=charm_func)
charm_func.assert_called_once_with()
# fail the charm_func
charm_func.return_value = "Custom charm failed"
try:
openstack.resume_unit(
None, services=services, charm_func=charm_func)
raise Exception("resume_unit should have raised Exception")
except Exception as e:
self.assertEquals(e.args[0],
"Couldn't resume: Custom charm failed")
@patch('charmhelpers.contrib.openstack.utils.service_resume')
@patch('charmhelpers.contrib.openstack.utils.clear_unit_paused')
def test_resume_unit_assess_status_func(
self, clear_unit_paused, service_resume):
services = ['service1', 'service2']
service_resume.return_value = True
assess_status_func = MagicMock()
assess_status_func.return_value = None
openstack.resume_unit(assess_status_func, services=services)
assess_status_func.assert_called_once_with()
# fail the assess_status_func
assess_status_func.return_value = "assess_status_func failed"
try:
openstack.resume_unit(assess_status_func, services=services)
raise Exception("resume_unit should have raised Exception")
except Exception as e:
self.assertEquals(e.args[0],
"Couldn't resume: assess_status_func failed")
@patch('charmhelpers.contrib.openstack.utils.status_set')
@patch('charmhelpers.contrib.openstack.utils.'
'_determine_os_workload_status')
def test_make_assess_status_func(self, _determine_os_workload_status,
status_set):
_determine_os_workload_status.return_value = ('active', 'fine')
f = openstack.make_assess_status_func('one', 'two', three='three')
r = f()
self.assertEquals(r, None)
_determine_os_workload_status.assert_called_once_with(
'one', 'two', three='three')
status_set.assert_called_once_with('active', 'fine')
# return something other than 'active' or 'maintenance'
_determine_os_workload_status.return_value = ('broken', 'damaged')
r = f()
self.assertEquals(r, 'damaged')
@patch.object(openstack, 'restart_on_change_helper')
@patch.object(openstack, 'is_unit_paused_set')
def test_pausable_restart_on_change(
self, is_unit_paused_set, restart_on_change_helper):
@openstack.pausable_restart_on_change({})
def test_func():
pass
# test with pause: restart_on_change_helper should not be called.
is_unit_paused_set.return_value = True
test_func()
self.assertEquals(restart_on_change_helper.call_count, 0)
# test without pause: restart_on_change_helper should be called.
is_unit_paused_set.return_value = False
test_func()
self.assertEquals(restart_on_change_helper.call_count, 1)
@patch.object(openstack, 'juju_log')
@patch.object(openstack, 'action_set')
@patch.object(openstack, 'action_fail')
@patch.object(openstack, 'openstack_upgrade_available')
@patch('charmhelpers.contrib.openstack.utils.config')
def test_openstack_upgrade(self, config, openstack_upgrade_available,
action_fail, action_set, log):
def do_openstack_upgrade(configs):
pass
openstack_upgrade_available.return_value = True
# action-managed-upgrade=True
config.side_effect = [True]
openstack.do_action_openstack_upgrade('package-xyz',
do_openstack_upgrade,
None)
self.assertTrue(openstack_upgrade_available.called)
msg = ('success, upgrade completed.')
action_set.assert_called_with({'outcome': msg})
self.assertFalse(action_fail.called)
@patch.object(openstack, 'juju_log')
@patch.object(openstack, 'action_set')
@patch.object(openstack, 'action_fail')
@patch.object(openstack, 'openstack_upgrade_available')
| |
getTransmitterCWFrequencyRes(cls):
return None if not cls.transmitterSupportsCW() else cls.txType.toneGenType.frqRes
##
# \brief Gets the CW tone generator amplitude range for transmitters on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getTransmitterCWAmplitudeRange()
@classmethod
def getTransmitterCWAmplitudeRange(cls):
return (0.0,0.0) if not cls.transmitterSupportsCW() else cls.txType.toneGenType.ampRange
##
# \brief Gets the CW tone generator amplitude resolution for transmitters on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getTransmitterCWAmplitudeRes()
@classmethod
def getTransmitterCWAmplitudeRes(cls):
return None if not cls.transmitterSupportsCW() else cls.txType.toneGenType.ampRes
##
# \brief Gets the CW tone generator phase range for transmitters on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getTransmitterCWPhaseRange()
@classmethod
def getTransmitterCWPhaseRange(cls):
return (0.0,0.0) if not cls.transmitterSupportsCW() else cls.txType.toneGenType.phaseRange
##
# \brief Gets the CW tone generator phase resolution for transmitters on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getTransmitterCWPhaseRes()
@classmethod
def getTransmitterCWPhaseRes(cls):
return None if not cls.transmitterSupportsCW() else cls.txType.toneGenType.phaseRes
##
# \brief Gets whether transmitters on the radio support sweep functions
# during continuous-wave (CW) tone generation.
#
# \copydetails CyberRadioDriver::IRadio::getTransmitterCWPhaseRes()
@classmethod
def transmitterSupportsCWSweep(cls):
return cls.transmitterSupportsCW() and cls.txType.toneGenType.sweepCmd is not None
##
# \brief Gets the CW tone generator sweep start frequency range for
# transmitters on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getTransmitterCWSweepStartRange()
@classmethod
def getTransmitterCWSweepStartRange(cls):
return (0.0,0.0) if not cls.transmitterSupportsCWSweep() \
else cls.txType.toneGenType.startRange
##
# \brief Gets the CW tone generator sweep start frequency resolution for
# transmitters on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getTransmitterCWSweepStartRes()
@classmethod
def getTransmitterCWSweepStartRes(cls):
return None if not cls.transmitterSupportsCWSweep() \
else cls.txType.toneGenType.startRes
##
# \brief Gets the CW tone generator sweep stop frequency range for
# transmitters on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getTransmitterCWSweepStopRange()
@classmethod
def getTransmitterCWSweepStopRange(cls):
return (0.0,0.0) if not cls.transmitterSupportsCWSweep() \
else cls.txType.toneGenType.stopRange
##
# \brief Gets the CW tone generator sweep stop frequency resolution for
# transmitters on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getTransmitterCWSweepStopRes()
@classmethod
def getTransmitterCWSweepStopRes(cls):
return None if not cls.transmitterSupportsCWSweep() \
else cls.txType.toneGenType.stopRes
##
# \brief Gets the CW tone generator sweep step frequency range for
# transmitters on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getTransmitterCWSweepStepRange()
@classmethod
def getTransmitterCWSweepStepRange(cls):
return (0.0,0.0) if not cls.transmitterSupportsCWSweep() \
else cls.txType.toneGenType.stepRange
##
# \brief Gets the CW tone generator sweep step frequency resolution for
# transmitters on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getTransmitterCWSweepStepRes()
@classmethod
def getTransmitterCWSweepStepRes(cls):
return None if not cls.transmitterSupportsCWSweep() \
else cls.txType.toneGenType.stepRes
##
# \brief Gets the CW tone generator sweep dwell time range for
# transmitters on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getTransmitterCWSweepDwellRange()
@classmethod
def getTransmitterCWSweepDwellRange(cls):
return (0.0,0.0) if not cls.transmitterSupportsCWSweep() \
else cls.txType.toneGenType.dwellRange
##
# \brief Gets the CW tone generator sweep dwell time resolution for
# transmitters on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getTransmitterCWSweepDwellRes()
@classmethod
def getTransmitterCWSweepDwellRes(cls):
return None if not cls.transmitterSupportsCWSweep() \
else cls.txType.toneGenType.dwellRes
##
# \brief Gets the number of wideband DUCs on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getNumWbduc()
@classmethod
def getNumWbduc(cls):
return len(cls.getWbducIndexRange())
##
# \brief Gets the index range for the wideband DUCs on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getWbducIndexRange()
@classmethod
def getWbducIndexRange(cls):
return list(range(cls.wbducIndexBase, cls.wbducIndexBase + cls.numWbduc, 1))
##
# \brief Gets the frequency offset range for the wideband DUCs on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getWbducFrequencyRange()
@classmethod
def getWbducFrequencyRange(cls):
return (0.0,0.0) if cls.wbducType is None else cls.wbducType.frqRange
##
# \brief Gets the frequency resolution for wideband DUCs on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getWbducFrequencyRes()
@classmethod
def getWbducFrequencyRes(cls):
return 0.0 if cls.wbducType is None else cls.wbducType.frqRes
##
# \brief Gets the frequency unit for wideband DUCs on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getWbducFrequencyUnit()
@classmethod
def getWbducFrequencyUnit(cls):
return 0.0 if cls.wbducType is None else cls.wbducType.frqUnits
##
# \brief Gets the attenuation range for the wideband DUCs on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getWbducAttenuationRange()
@classmethod
def getWbducAttenuationRange(cls):
return (0.0,0.0) if cls.wbducType is None else cls.wbducType.attRange
##
# \brief Gets the attenuation resolution for wideband DUCs on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getWbducAttenuationRes()
@classmethod
def getWbducAttenuationRes(cls):
return 0.0 if cls.wbducType is None else cls.wbducType.attRes
##
# \brief Gets the allowed rate set for the wideband DUCs on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getWbducRateSet()
@classmethod
def getWbducRateSet(cls):
ducObj = cls.wbducType
return ducObj.rateSet if ducObj is not None else {}
##
# \brief Gets the allowed rate list for the wideband DUCs on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getWbducRateList()
@classmethod
def getWbducRateList(cls):
ducObj = cls.wbducType
if ducObj is not None:
return [ducObj.rateSet[k] for k in sorted(ducObj.rateSet.keys())]
else:
return []
##
# \brief Gets whether or not the wideband DUCs on the radio support loading
# sample snapshots.
#
# \copydetails CyberRadioDriver::IRadio::wbducSupportsSnapshotLoad()
@classmethod
def wbducSupportsSnapshotLoad(cls):
return (cls.wbducType is not None and cls.wbducType.snapshotLoadCmd is not None)
##
# \brief Gets whether or not the wideband DUCs on the radio support
# transmitting sample snapshots.
#
# \copydetails CyberRadioDriver::IRadio::wbducSupportsSnapshotTransmit()
@classmethod
def wbducSupportsSnapshotTransmit(cls):
return (cls.wbducType is not None and cls.wbducType.snapshotTxCmd is not None)
##
# \brief Gets the index range for the DDC groups on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getWbddcGroupIndexRange()
@classmethod
def getDdcGroupIndexRange(cls, wideband):
return cls.getWbddcGroupIndexRange() if wideband else cls.getNbddcGroupIndexRange()
##
# \brief Gets the number of wideband DDC groups on the radio.
# \copydetails CyberRadioDriver::IRadio::getNumWbddcGroups()
@classmethod
def getNumWbddcGroups(cls):
return len(cls.getWbddcGroupIndexRange())
##
# \brief Gets the index range for the wideband DDC groups on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getWbddcGroupIndexRange()
@classmethod
def getWbddcGroupIndexRange(cls):
return list(range(cls.wbddcGroupIndexBase, cls.wbddcGroupIndexBase + cls.numWbddcGroups, 1))
##
# \brief Gets the number of narrowband DDC groups on the radio.
# \copydetails CyberRadioDriver::IRadio::getNumNbddcGroups()
@classmethod
def getNumNbddcGroups(cls):
return len(cls.getNbddcGroupIndexRange())
##
# \brief Gets the index range for the narrowband DDC groups on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getNbddcGroupIndexRange()
@classmethod
def getNbddcGroupIndexRange(cls):
return list(range(cls.nbddcGroupIndexBase, cls.nbddcGroupIndexBase + cls.numNbddcGroups, 1))
##
# \brief Gets the number of combined DDC groups on the radio.
# \copydetails CyberRadioDriver::IRadio::getNumCombinedDdcGroups()
@classmethod
def getNumCombinedDdcGroups(cls):
return len(cls.getCombinedDdcGroupIndexRange())
##
# \brief Gets the index range for the combined DDC groups on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getCombinedDdcGroupIndexRange()
@classmethod
def getCombinedDdcGroupIndexRange(cls):
return list(range(cls.cddcGroupIndexBase, cls.cddcGroupIndexBase + cls.numCddcGroups, 1))
##
# \brief Gets the number of wideband DUC groups on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getNumWbducGroups()
@classmethod
def getNumWbducGroups(cls):
return len(cls.getWbducGroupIndexRange())
##
# \brief Gets the index range for the wideband DUC groups on the radio.
#
# \copydetails CyberRadioDriver::IRadio::getWbducGroupIndexRange()
@classmethod
def getWbducGroupIndexRange(cls):
return list(range(cls.wbducGroupIndexBase, cls.wbducGroupIndexBase + cls.numWbducGroups, 1))
# ------------- Deprecated/Helper Methods ----------------- #
##
# \internal
# \brief Define this object's string representation.
def __str__(self):
return self.name
##
# \internal
# \brief Helper function that returns an index list.
def _getIndexList(self,objIndex,objDict):
if objIndex is None:
return list(objDict.keys())
elif type(objIndex) is int:
return [objIndex,] if objIndex in list(objDict.keys()) else []
elif type(objIndex) is list:
return [i for i in objIndex if i in list(objDict.keys())]
else:
return []
##
# \internal
# \brief Helper function that "normalizes" an input configuration dictionary
# section by doing the following:
# <ul>
# <li> Ensuring that keys for any enumerated entries are integers
# <li> Expanding sub-dictionaries with the special "all" key
# <li> Performing specialization for individual entries
#
# \param configDict The incoming configuration dictionary.
# \param entryIndexList The list of entry indices (used in expanding "all" keys).
# \return The new configuration dictionary.
def _normalizeConfigDictSection(self, configDict, entryIndexList):
newConfigDict = {}
# Fix keys in config dictionary
convertKeys = []
invalidKeys = []
for key in configDict:
try:
tmp = int(key)
if tmp != key:
convertKeys.append(key)
except:
if key != configKeys.ALL:
invalidKeys.append(key)
for key in invalidKeys:
configDict.pop(key)
for key in convertKeys:
configDict[int(key)] = configDict.pop(key)
if configKeys.ALL in configDict:
tmpDict = configDict.pop(configKeys.ALL)
for entryNum in entryIndexList:
newConfigDict[entryNum] = copy.deepcopy(tmpDict)
for entryNum in configDict:
if entryNum in newConfigDict:
self._dictUpdate(newConfigDict[entryNum], \
configDict[entryNum], \
newConfigDict[entryNum], \
list(configDict[entryNum].keys()))
else:
newConfigDict[entryNum] = copy.deepcopy(configDict[entryNum])
return newConfigDict
##
# \internal
# \brief Helper function that "normalizes" an input configuration dictionary
# by doing the following:
# <ul>
# <li> Ensuring that keys for component enumerations are integers
# <li> Expanding sub-dictionaries with | |
<gh_stars>0
#!/usr/bin/env python
# Copyright (c) 2011-2020, wradlib developers.
# Distributed under the MIT License. See LICENSE.txt for more info.
"""
Zonal Statistics
^^^^^^^^^^^^^^^^
This module supports you in computing statistics over spatial zones. A typical
application would be to compute mean areal precipitation for a catchment by
using precipitation estimates from a radar grid in polar coordinates or from
precipitation estimates in a Cartesian grid.
The general usage is similar to the :mod:`wradlib.ipol` and
:mod:`wradlib.adjust`:
You have to create an instance of a class (derived from
:class:`~wradlib.zonalstats.ZonalDataBase`) by using
the spatial information of your source and target objects (e.g. radar bins and
catchment polygons). The Zonal Data within this object can be saved eg. as an
ESRI Shapefile.
This object is then called with another class to compute zonal statistics for
your target objects by calling the class instance with an array of values
(one for each source object).
Typically, creating the instance of the ZonalData class will be computationally
expensive, but only has to be done once (as long as the geometries do
not change).
Calling the objects with actual data, however, will be very fast.
.. note:: Right now we only support a limited set of 2-dimensional zonal
statistics. In the future, we plan to extend this to three dimensions.
.. currentmodule:: wradlib.zonalstats
.. autosummary::
:nosignatures:
:toctree: generated/
{}
"""
__all__ = [
"DataSource",
"ZonalDataBase",
"ZonalDataPoint",
"ZonalDataPoly",
"ZonalStatsBase",
"ZonalStatsPoly",
"ZonalStatsPoint",
"mask_from_bbox",
"get_bbox",
"grid_centers_to_vertices",
"get_clip_mask",
]
__doc__ = __doc__.format("\n ".join(__all__))
import os
import tempfile
import warnings
import numpy as np
from matplotlib import patches
from matplotlib.path import Path
from osgeo import gdal, ogr
from scipy import spatial
from wradlib import georef, io
ogr.UseExceptions()
gdal.UseExceptions()
# check windows
isWindows = os.name == "nt"
class DataSource(object):
""" DataSource class for handling ogr/gdal vector data
DataSource handles creates in-memory (vector) ogr DataSource object with
one layer for point or polygon geometries.
Parameters
----------
data : sequence of source points (shape Nx2) or polygons (shape NxMx2) or
ESRI Shapefile filename containing source points/polygons
srs : object
ogr.SpatialReferenceSystem SRS describing projection of given data
Warning
-------
Writing shapefiles with the wrong locale settings can have impact on the
type of the decimal. If problem arise use LC_NUMERIC=C in your environment.
Examples
--------
See \
:ref:`/notebooks/zonalstats/wradlib_zonalstats_classes.ipynb#DataSource`.
"""
def __init__(self, data=None, srs=None, name="layer", source=0, **kwargs):
self._srs = srs
self._name = name
if data is not None:
try:
self._ds = self._check_src(data)
except TypeError:
self.load_vector(data, source=source)
self._create_spatial_index()
else:
self._ds = None
@property
def ds(self):
""" Returns DataSource
"""
self._check_ds()
return self._ds
@ds.setter
def ds(self, value):
self._ds = value
def _check_ds(self):
""" Raise ValueError if empty DataSource
"""
if self._ds is None:
raise ValueError("Trying to access empty Datasource.")
@property
def data(self):
""" Returns DataSource geometries as numpy ndarrays
Note
----
This may be slow, because it extracts all source polygons
"""
lyr = self.ds.GetLayer()
lyr.ResetReading()
lyr.SetSpatialFilter(None)
lyr.SetAttributeFilter(None)
return self._get_data()
def _get_data(self):
""" Returns DataSource geometries as numpy ndarrays
"""
lyr = self.ds.GetLayer()
sources = []
for feature in lyr:
geom = feature.GetGeometryRef()
poly = georef.vector.ogr_to_numpy(geom)
sources.append(poly)
return np.array(sources)
def get_data_by_idx(self, idx):
""" Returns DataSource geometries as numpy ndarrays from given index
Parameters
----------
idx : sequence of int
indices
"""
lyr = self.ds.GetLayer()
lyr.ResetReading()
lyr.SetSpatialFilter(None)
lyr.SetAttributeFilter(None)
sources = []
for i in idx:
feature = lyr.GetFeature(i)
geom = feature.GetGeometryRef()
poly = georef.vector.ogr_to_numpy(geom)
sources.append(poly)
return np.array(sources)
def get_data_by_att(self, attr=None, value=None):
"""Returns DataSource geometries filtered by given attribute/value
Parameters
----------
attr : string
attribute name
value : string
attribute value
"""
lyr = self.ds.GetLayer()
lyr.ResetReading()
lyr.SetSpatialFilter(None)
lyr.SetAttributeFilter("{0}={1}".format(attr, value))
return self._get_data()
def get_data_by_geom(self, geom=None):
"""Returns DataSource geometries filtered by given OGR geometry
Parameters
----------
geom : OGR.Geometry object
"""
lyr = self.ds.GetLayer()
lyr.ResetReading()
lyr.SetAttributeFilter(None)
lyr.SetSpatialFilter(geom)
return self._get_data()
def _create_spatial_index(self):
"""Creates spatial index file .qix
"""
sql1 = "DROP SPATIAL INDEX ON {}".format(self._name)
sql2 = "CREATE SPATIAL INDEX ON {}".format(self._name)
self.ds.ExecuteSQL(sql1)
self.ds.ExecuteSQL(sql2)
def _create_table_index(self, col):
"""Creates attribute index files
"""
sql1 = "DROP INDEX ON {}".format(self._name)
sql2 = "CREATE INDEX ON {} USING {}".format(self._name, col)
self.ds.ExecuteSQL(sql1)
self.ds.ExecuteSQL(sql2)
def _check_src(self, src):
"""Basic check of source elements (sequence of points or polygons).
- array cast of source elements
- create ogr_src datasource/layer holding src points/polygons
- transforming source grid points/polygons to ogr.geometries
on ogr.layer
"""
tmpfile = tempfile.NamedTemporaryFile(mode="w+b").name
ogr_src = io.gdal.gdal_create_dataset(
"ESRI Shapefile", os.path.join("/vsimem", tmpfile), gdal_type=gdal.OF_VECTOR
)
src = np.array(src)
# create memory datasource, layer and create features
if src.ndim == 2:
geom_type = ogr.wkbPoint
# no Polygons, just Points
else:
geom_type = ogr.wkbPolygon
fields = [("index", ogr.OFTInteger)]
georef.vector.ogr_create_layer(
ogr_src, self._name, srs=self._srs, geom_type=geom_type, fields=fields
)
georef.vector.ogr_add_feature(ogr_src, src, name=self._name)
return ogr_src
def dump_vector(self, filename, driver="ESRI Shapefile", remove=True):
"""Output layer to OGR Vector File
Parameters
----------
filename : string
path to shape-filename
driver : string
driver string
remove : bool
if True removes existing output file
"""
ds_out = io.gdal.gdal_create_dataset(
driver, filename, gdal_type=gdal.OF_VECTOR, remove=remove
)
georef.vector.ogr_copy_layer(self.ds, 0, ds_out)
# flush everything
del ds_out
def load_vector(self, filename, source=0, driver="ESRI Shapefile"):
"""Read Layer from OGR Vector File
Parameters
----------
filename : string
path to shape-filename
source : int or string
number or name of wanted layer, defaults to 0
driver : string
driver string
"""
tmpfile = tempfile.NamedTemporaryFile(mode="w+b").name
self.ds = io.gdal.gdal_create_dataset(
"ESRI Shapefile", os.path.join("/vsimem", tmpfile), gdal_type=gdal.OF_VECTOR
)
# get input file handles
ds_in, tmp_lyr = io.gdal.open_vector(filename, driver=driver, layer=source)
# copy layer
ogr_src_lyr = self.ds.CopyLayer(tmp_lyr, self._name)
# get spatial reference object
srs = ogr_src_lyr.GetSpatialRef()
if srs is not None:
self._srs = ogr_src_lyr.GetSpatialRef()
# flush everything
del ds_in
def dump_raster(
self, filename, driver="GTiff", attr=None, pixel_size=1.0, remove=True, **kwargs
):
"""Output layer to GDAL Rasterfile
Parameters
----------
filename : string
path to shape-filename
driver : string
GDAL Raster Driver
attr : string
attribute to burn into raster
pixel_size : float
pixel Size in source units
remove : bool
if True removes existing output file
Keyword Arguments
-----------------
silent : bool
If True no ProgressBar is shown. Defaults to False.
"""
silent = kwargs.pop("silent", False)
progress = None if (silent or isWindows) else gdal.TermProgress
layer = self.ds.GetLayer()
layer.ResetReading()
x_min, x_max, y_min, y_max = layer.GetExtent()
cols = int((x_max - x_min) / pixel_size)
rows = int((y_max - y_min) / pixel_size)
# Todo: at the moment, always writing floats
ds_out = io.gdal.gdal_create_dataset(
"MEM", "", cols, rows, 1, gdal_type=gdal.GDT_Float32
)
ds_out.SetGeoTransform((x_min, pixel_size, 0, y_max, 0, -pixel_size))
proj = layer.GetSpatialRef()
if proj is None:
proj = self._srs
ds_out.SetProjection(proj.ExportToWkt())
band = ds_out.GetRasterBand(1)
band.FlushCache()
if attr is not None:
gdal.RasterizeLayer(
ds_out,
[1],
layer,
burn_values=[0],
options=["ATTRIBUTE={0}".format(attr), "ALL_TOUCHED=TRUE"],
callback=progress,
)
else:
gdal.RasterizeLayer(
ds_out,
[1],
layer,
burn_values=[1],
options=["ALL_TOUCHED=TRUE"],
callback=progress,
)
io.gdal.write_raster_dataset(filename, ds_out, driver, remove=remove)
del ds_out
def set_attribute(self, name, values):
"""Add/Set given Attribute with given values
Parameters
----------
name : string
Attribute Name
values : :class:`numpy:numpy.ndarray`
Values to fill in attributes
"""
lyr = self.ds.GetLayerByIndex(0)
lyr.ResetReading()
# todo: automatically check for value type
defn = lyr.GetLayerDefn()
if defn.GetFieldIndex(name) == -1:
lyr.CreateField(ogr.FieldDefn(name, ogr.OFTReal))
for i, item in enumerate(lyr):
item.SetField(name, values[i])
lyr.SetFeature(item)
def get_attributes(self, attrs, filt=None):
"""Read attributes
Parameters
----------
attrs : list
Attribute Names to retrieve
filt : tuple
(attname,value) for Attribute Filter
"""
lyr = self.ds.GetLayer()
lyr.ResetReading()
if filt is not None:
lyr.SetAttributeFilter("{0}={1}".format(*filt))
ret = [[] for _ in attrs]
for ogr_src in lyr:
for i, att in enumerate(attrs):
ret[i].append(ogr_src.GetField(att))
return ret
def get_geom_properties(self, props, filt=None):
"""Read properties
Parameters
----------
props : list
Property Names to retrieve
filt : tuple
(attname,value) for Attribute Filter
"""
lyr = self.ds.GetLayer()
lyr.ResetReading()
if filt is not None:
lyr.SetAttributeFilter("{0}={1}".format(*filt))
ret = [[] for _ in props]
for ogr_src in lyr:
for i, prop in enumerate(props):
ret[i].append(getattr(ogr_src.GetGeometryRef(), prop)())
return ret
def get_attrs_and_props(self, attrs=None, props=None, filt=None):
"""Read properties and attributes
Parameters
----------
attrs : list
Attribute Names to retrieve
props : list
Property Names to retrieve
filt : tuple
(attname,value) for Attribute Filter
"""
lyr = self.ds.GetLayer()
lyr.ResetReading()
if filt is not None:
lyr.SetAttributeFilter("{0}={1}".format(*filt))
ret_props = [[] for _ in props]
ret_attrs = [[] for _ in attrs]
for ogr_src in lyr:
for i, att in enumerate(attrs):
ret_attrs[i].append(ogr_src.GetField(att))
for i, prop in enumerate(props):
ret_props[i].append(getattr(ogr_src.GetGeometryRef(), prop)())
return ret_attrs, ret_props
class ZonalDataBase(object):
"""Base class for managing 2-dimensional zonal data.
For target polygons from either source points or source polygons.
Provides the basic design for all other classes.
| |
has over lap with tasks
while True:
doorbell = self.dp_sched_mail_box.when_any()
yield doorbell
# rejected or permitted tasks
dp_processed_task_idx = []
# ensure the scheduler is really lazy to process getter, wait for all quota incremented
assert (
self.env.peek() != self.env.now
or self.env._queue[0][1] == LazyAnyFilterQueue.LAZY
)
# ignore fake door bell, listen again
if len(self.dp_sched_mail_box.items) == 0:
continue
# HACK, avoid calling slow get()
msgs, self.dp_sched_mail_box.items = self.dp_sched_mail_box.items, []
new_arrival_tid = []
incremented_quota_idx = set()
msgs_amount = len(msgs)
for m in msgs:
if isinstance(m, int):
tid = m
# assert m in self.dp_waiting_tasks.items
idx = self.dp_waiting_tasks.items.index(tid, -msgs_amount - 10)
new_arrival_tid.append((idx, tid))
else:
assert isinstance(m, list)
incremented_quota_idx.update(m)
this_epoch_unused_quota = [
block['dp_quota'].level for block in self.block_dp_storage.items
]
# new task arrived
for _, new_task_id in new_arrival_tid:
assert self.task_state[new_task_id]['dominant_resource_share'] is None
has_quota_increment = len(incremented_quota_idx) > 0
# update DRS of tasks if its demands has any incremented quota, or new comming tasks.
quota_incre_upper_bound = (
max(incremented_quota_idx) if has_quota_increment else -1
)
quota_incre_lower_bound = (
min(incremented_quota_idx) if has_quota_increment else -1
)
# cal DRS
if not self.is_rdp:
self._cal_drs_dp_L_Inf(new_arrival_tid)
else:
assert self.is_rdp
self._cal_drs_rdp_a_all2()
permit_dp_task_order = None
# optimization for no new quota case
if (not has_quota_increment) and len(new_arrival_tid) != 0:
new_arrival_drs = (
self.task_state[t[1]]['dominant_resource_share']
for t in new_arrival_tid
)
permit_dp_task_order = list(zip(new_arrival_drs, new_arrival_tid))
hq.heapify(permit_dp_task_order)
else:
assert has_quota_increment
waiting_task_drs = (
self.task_state[t]['dominant_resource_share']
for t in self.dp_waiting_tasks.items
)
permit_dp_task_order = list(
zip(waiting_task_drs, enumerate(self.dp_waiting_tasks.items))
)
hq.heapify(permit_dp_task_order)
# iterate over tasks ordered by DRS, match quota, allocate.
permitted_task_ids = set()
dp_rejected_task_ids = set()
permitted_blk_ids = set()
should_grant_top_small = self.env.config[
'resource_master.dp_policy.dpf_family.grant_top_small'
]
are_leading_tasks_ok = True
if not self.is_rdp:
self._dpf_best_effort_dp_sched(
are_leading_tasks_ok,
dp_processed_task_idx,
permit_dp_task_order,
permitted_blk_ids,
permitted_task_ids,
should_grant_top_small,
this_epoch_unused_quota,
)
# reject tasks after allocation
# only reject task on retired blocks
if has_quota_increment: # either dpft or dpfn
self._dpf_check_remaining_dp_n_reject(
dp_processed_task_idx,
dp_rejected_task_ids,
permitted_task_ids,
this_epoch_unused_quota,
)
else: # is_rdp
self.best_effort_rdp_sched_n_commit_reject(
dp_processed_task_idx,
dp_rejected_task_ids,
permit_dp_task_order,
permitted_blk_ids,
permitted_task_ids,
)
# dequeue all permitted and rejected waiting tasks
# HACK avoid calling dp_waiting_tasks.get()
dp_processed_task_idx.sort(reverse=True)
for i in dp_processed_task_idx:
self.debug(
self.dp_waiting_tasks.items[i], "task get dequeued from wait queue"
)
del self.dp_waiting_tasks.items[i]
def _dpf_check_remaining_dp_n_reject(
self,
dp_processed_task_idx,
dp_rejected_task_ids,
permitted_task_ids,
this_epoch_unused_quota,
):
# reject tasks after allocation
# only reject task on retired blocks
assert not self.is_rdp
for idx, t_id in enumerate(self.dp_waiting_tasks.items):
should_reject = None
if t_id not in permitted_task_ids:
this_task = self.task_state[t_id]
this_request = this_task["resource_request"]
task_demand_block_idx = this_request['block_idx']
task_demand_epsilon = this_request['epsilon']
# HACK, only check old and new items for rejection performance
old_demand_b_idx = task_demand_block_idx[0]
old_item = self.block_dp_storage.items[old_demand_b_idx]
new_demand_b_idx = task_demand_block_idx[-1]
# check oldest item this will check and reject for dpft
if old_item["retire_event"].triggered:
assert old_item["retire_event"].ok
b = old_demand_b_idx
if this_epoch_unused_quota[b] < task_demand_epsilon:
should_reject = True
# check latest item
elif (
not self.is_dp_policy_dpft and new_demand_b_idx != old_demand_b_idx
):
new_item = self.block_dp_storage.items[new_demand_b_idx]
if new_item["retire_event"] and new_item["retire_event"].triggered:
b = new_demand_b_idx
if this_epoch_unused_quota[b] < task_demand_epsilon:
should_reject = True
if should_reject:
this_task["dp_permitted_event"].fail(DpBlockRetiredError())
dp_rejected_task_ids.add(t_id)
dp_processed_task_idx.append(idx)
def best_effort_rdp_sched_n_commit_reject(
self,
dp_processed_task_idx,
dp_rejected_task_ids,
permit_dp_task_order,
permitted_blk_ids,
permitted_task_ids,
):
for drs, t in permit_dp_task_order:
t_idx, t_id = t
this_task = self.task_state[t_id]
this_request = this_task["resource_request"]
task_demand_block_idx = this_request['block_idx']
task_demand_e_rdp = this_request['e_rdp']
violated_blk, is_quota_insufficient_all = self.is_all_block_quota_sufficient(task_demand_block_idx, task_demand_e_rdp)
# task is permitted
if not is_quota_insufficient_all :#
drs = this_task['dominant_resource_share']
self.debug(t_id, "DP permitted, Dominant resource share: %.3f" % drs)
this_task["dp_permitted_event"].succeed()
permitted_task_ids.add(t_id)
permitted_blk_ids.update(task_demand_block_idx)
# need to update consumption for following rejection
self._commit_rdp_allocation(task_demand_block_idx, task_demand_e_rdp)
this_task["dp_committed_event"].succeed()
this_task['is_dp_granted'] = True
dp_processed_task_idx.append(t_idx)
else: # is_quota_insufficient_all
if self.block_dp_storage.items[violated_blk]["retire_event"].triggered:
assert self.block_dp_storage.items[violated_blk]["retire_event"].ok
dp_rejected_task_ids.add(t_id)
this_task["dp_permitted_event"].fail(
DpBlockRetiredError(
"block %d retired, insufficient unlocked rdp left" % violated_blk
)
)
this_task['is_dp_granted'] = False
dp_processed_task_idx.append(t_idx)
return
def is_all_block_quota_sufficient(self, task_demand_block_idx, task_demand_e_rdp):
for b in task_demand_block_idx:
for j, e_d in enumerate(task_demand_e_rdp):
if (
e_d
<= self.block_dp_storage.items[b]['rdp_quota_balance'][j]
):
break
else:
return b, True
else:
return None, False
def _dpf_best_effort_dp_sched(
self,
are_leading_tasks_ok,
dp_processed_task_idx,
permit_dp_task_order,
permitted_blk_ids,
permitted_task_ids,
should_grant_top_small,
this_epoch_unused_quota,
):
for drs, t in permit_dp_task_order:
t_idx, t_id = t
if should_grant_top_small and (not are_leading_tasks_ok):
break
this_task = self.task_state[t_id]
this_request = this_task["resource_request"]
task_demand_block_idx = this_request['block_idx']
task_demand_epsilon = this_request['epsilon']
for b_idx in task_demand_block_idx:
if (
this_epoch_unused_quota[b_idx]
+ self.env.config['sim.numerical_delta']
< task_demand_epsilon
):
are_leading_tasks_ok = False
break
# task is permitted
else:
drs = this_task['dominant_resource_share']
self.debug(t_id, "DP permitted, Dominant resource share: %.3f" % drs)
for i in task_demand_block_idx:
this_epoch_unused_quota[i] -= task_demand_epsilon
this_task["dp_permitted_event"].succeed()
permitted_task_ids.add(t_id)
permitted_blk_ids.update(task_demand_block_idx)
dp_processed_task_idx.append(t_idx)
return
def _cal_drs_rdp_a_all2(self):
for t_id in reversed(self.dp_waiting_tasks.items):
this_task = self.task_state[t_id]
# ending condition, drs already calculated
if this_task['dominant_resource_share'] is not None:
break
this_request = this_task['resource_request']
# block wise
temp_max = -1
for b in this_request['block_idx']:
for j, e in enumerate(this_request['e_rdp']):
# iterate over all alpha demand
if self.block_dp_storage.items[b]["rdp_budget_curve"][j] > 0:
normalized_e = (e / self.block_dp_storage.items[b]["rdp_budget_curve"][j]
)
temp_max = max(temp_max, normalized_e)
assert temp_max != -1
this_task['dominant_resource_share'] = temp_max
def _cal_drs_dp_L_Inf(self, new_arrival_tid):
for _, new_task_id in new_arrival_tid:
this_task = self.task_state[new_task_id]
this_task['dominant_resource_share'] = this_task["resource_request"][
'epsilon'
]
def allocator_frontend_loop(self):
while True:
# loop only blocks here
yield self.mail_box.when_any()
for i in range(self.mail_box.size):
get_evt = self.mail_box.get()
msg = get_evt.value
if msg["message_type"] == DpHandlerMessageType.NEW_TASK:
assert msg["task_id"] not in self.task_state
self.task_state[msg["task_id"]] = dict()
self.task_state[msg["task_id"]]["task_proc"] = msg["task_process"]
if msg["message_type"] == DpHandlerMessageType.ALLOCATION_REQUEST:
assert msg["task_id"] in self.task_state
self.task_state[msg["task_id"]] = dict()
self.task_state[msg["task_id"]]["resource_request"] = msg
self.task_state[msg["task_id"]][
"resource_allocate_timestamp"
] = None
self.task_state[msg["task_id"]]["dp_commit_timestamp"] = None
self.task_state[msg["task_id"]]["task_completion_timestamp"] = None
self.task_state[msg["task_id"]]["task_publish_timestamp"] = None
self.task_state[msg["task_id"]]["is_dp_granted"] = None
self.task_state[msg["task_id"]]["is_admission_control_ok"] = None
self.task_state[msg["task_id"]][
"resource_allocated_event"
] = msg.pop("resource_allocated_event")
self.task_state[msg["task_id"]]["dp_committed_event"] = msg.pop(
"dp_committed_event"
)
# following two events are controlled by scheduling policy
self.task_state[msg["task_id"]][
"dp_permitted_event"
] = self.env.event()
self.task_state[msg["task_id"]][
"resource_permitted_event"
] = self.env.event()
self.task_state[msg["task_id"]][
"resource_released_event"
] = self.env.event()
self.task_state[msg["task_id"]]["dominant_resource_share"] = None
self.task_state[msg["task_id"]]["execution_proc"] = msg.pop(
"execution_proc"
)
self.task_state[msg["task_id"]]["waiting_for_dp_proc"] = msg.pop(
"waiting_for_dp_proc"
)
## trigger allocation
self.task_state[msg["task_id"]][
"handler_proc_dp"
] = self.env.process(self.task_dp_handler(msg["task_id"]))
self.task_state[msg["task_id"]][
"handler_proc_resource"
] = self.env.process(self.task_resources_handler(msg["task_id"]))
self.task_state[msg["task_id"]][
"blk2accum_getters"
] = dict() # blk_idx: getter
msg['task_init_event'].succeed()
def _handle_accum_block_waiters(self, task_id):
this_task = self.task_state[task_id]
resource_demand = this_task["resource_request"]
dp_committed_event = this_task["dp_committed_event"]
wait_for_all_getter_proc = self.env.all_of(
list(this_task["blk2accum_getters"].values())
)
try:
if self.env.config['task.timeout.enabled']:
timeout_evt = self.env.timeout(
self.env.config['task.timeout.interval'], TIMEOUT_VAL
)
permitted_or_timeout_val = yield wait_for_all_getter_proc | timeout_evt
else:
permitted_or_timeout_val = yield wait_for_all_getter_proc
if wait_for_all_getter_proc.triggered:
self.debug(task_id, "get all dp from blocks")
return 0
else:
assert TIMEOUT_VAL in list(permitted_or_timeout_val.values())
raise DprequestTimeoutError()
except (
StopReleaseDpError,
InsufficientDpException,
DprequestTimeoutError,) as err:
self.debug(
task_id,
"policy=%s, fail to acquire dp due to" % self.dp_policy,
err.__repr__(),
)
# interrupt dp_waiting_proc
if this_task["handler_proc_resource"].is_alive:
this_task["handler_proc_resource"].interrupt(
DpHandlerMessageType.DP_HANDLER_INTERRUPT_MSG
)
dp_committed_event.fail(err)
removed_accum_cn = []
missing_waiting_accum_cn = []
fullfilled_blk = []
unfullfilled_blk = []
for blk_idx, get_event in this_task[
"blk2accum_getters"
].items(): # get_evt_block_mapping.items():
if get_event.triggered and get_event.ok:
fullfilled_blk.append(blk_idx)
elif (not get_event.triggered) or (not get_event.ok):
unfullfilled_blk.append(blk_idx)
get_event.cancel() # if not triggered pop from waiters
get_event.defused = True
this_block = self.block_dp_storage.items[blk_idx]
dp_container = this_block["dp_container"]
if task_id in this_block['waiting_tid2accum_containers']:
this_block['waiting_tid2accum_containers'].pop(task_id)
removed_accum_cn.append(task_id)
else:
missing_waiting_accum_cn.append(blk_idx)
if get_event.triggered and get_event.ok:
assert (
dp_container.level + get_event.amount
< dp_container.capacity
+ self.env.config['sim.numerical_delta']
)
if len(removed_accum_cn) != 0:
self.debug(
task_id,
"accum containers removed by task handler for blocks %s"
% removed_accum_cn,
)
if len(missing_waiting_accum_cn) != 0:
self.debug(
task_id,
"accum containers removed by sched for blocks %s"
% removed_accum_cn,
)
self.debug(task_id, "fullfilled block demand getter: %s" % fullfilled_blk)
self.debug(
task_id, "unfullfilled block demand getter: %s" % unfullfilled_blk
)
return 1
def _check_task_admission_control(self, task_id):
this_task = self.task_state[task_id]
resource_demand = this_task["resource_request"]
dp_committed_event = this_task["dp_committed_event"]
if not self.is_rdp:
# only check uncommitted dp capacity
# peek remaining DP, reject if DP is already insufficient
for i in resource_demand["block_idx"]:
this_block = self.block_dp_storage.items[i]
capacity = this_block["dp_container"].capacity
if (
capacity + self.env.config['sim.numerical_delta']
< resource_demand["epsilon"]
):
self.debug(
task_id,
"DP is insufficient before asking dp scheduler, Block ID: %d, remain epsilon: %.3f"
% (i, capacity),
)
if this_task["handler_proc_resource"].is_alive:
this_task["handler_proc_resource"].interrupt(
DpHandlerMessageType.DP_HANDLER_INTERRUPT_MSG
)
# inform user's dp waiting task
dp_committed_event.fail(
InsufficientDpException(
"DP request is rejected by handler admission control, Block ID: %d, remain epsilon: %.3f"
% (i, capacity)
)
)
return False
elif self.is_accum_container_sched and (
not this_block['block_proc'].is_alive
):
dp_committed_event.fail(
InsufficientDpException(
"DP request is rejected by handler admission control, Block %d sched is inactive"
% i
)
)
return False
else:
for b in resource_demand["block_idx"]:
for j, e in enumerate(resource_demand["e_rdp"]):
if (
self.block_dp_storage.items[b]["rdp_budget_curve"][j]
- self.block_dp_storage.items[b]["rdp_consumption"][j]
>= e
):
break
else:
self.debug(
task_id,
"RDP is insufficient before asking rdp scheduler, Block ID: %d"
% (b),
)
if this_task["handler_proc_resource"].is_alive:
this_task["handler_proc_resource"].interrupt(
DpHandlerMessageType.DP_HANDLER_INTERRUPT_MSG
)
# inform user's dp waiting task
this_task['is_dp_granted'] = False
dp_committed_event.fail(
InsufficientDpException(
| |
MD5 Authentication TLV
h = data[offset+tlv_length+2-16:] # MD5 hash, last 16 bytes
salt = salt + data[offset:offset+tlv_length+2].replace(h, "\x00" * 16)
uses_authentication = True
offset = offset + tlv_length + 2
else:
break
except:
break
if uses_authentication:
sys.stdout.write("%d:$hsrp$%s$%s\n" % (index, salt.encode("hex"), h.encode("hex")))
f.close()
def pcap_parser_glbp(fname):
f = open(fname, "rb")
pcap = dpkt.pcap.Reader(f)
index = 0
for _, buf in pcap:
index = index + 1
eth = dpkt.ethernet.Ethernet(buf)
if eth.type == dpkt.ethernet.ETH_TYPE_IP or eth.type == dpkt.ethernet.ETH_TYPE_IP6:
ip = eth.data
if eth.type == dpkt.ethernet.ETH_TYPE_IP and ip.p != dpkt.ip.IP_PROTO_UDP:
continue
if eth.type == dpkt.ethernet.ETH_TYPE_IP6 and ip.nxt != dpkt.ip.IP_PROTO_UDP:
continue
ip_headers = ip.pack_hdr()
source_geoip = ip_headers[-8:-4]
udp = ip.data
data = udp.data
if udp.dport != 3222: # is this GLBP traffic?
continue
if ord(data[0]) != 1: # GLBP version
continue
if len(data) < 40: # rough estimate ;)
continue
# Ideally, we should do Authentication TLV processing with generic TLV processing code below!
tlv_type = ord(data[12])
if tlv_type != 3:
continue
# Is this "MD5 chain" type authentication?
algo_type = ord(data[14])
if algo_type != 3:
sys.stderr.write("[-] Ignoring non-MD5-chain auth type in packet %s!\n" % index)
continue
auth_length = ord(data[15])
if auth_length != 20:
continue
# hash is at offset 20
h = data[20:20 + 16].encode("hex")
# salt extends from offset 0 to 19 (hash starts from 20)
salt = data[0:20]
# append "Source GeoIP" + 12 zero bytes (verify this part) to
# the salt
salt = salt + source_geoip + ("\x00" * 12)
# process extra TLVs
offset = 36
while True:
try:
tlv_type = ord(data[offset:offset+1])
tlv_length = ord(data[offset+1:offset+2])
if tlv_type == 1: # Hello TLV, extract "Virtual IPv4"
hello_salt = data[offset:offset+tlv_length]
salt = salt + hello_salt
offset = offset + tlv_length
elif tlv_type == 4: # unknown TLV ;)
unknown_salt = data[offset:offset+tlv_length]
salt = salt + unknown_salt
offset = offset + tlv_length
elif tlv_type == 2: # Request/Response TLV?
rr_salt = data[offset:offset+tlv_length]
salt = salt + rr_salt
offset = offset + tlv_length
else:
break
except:
break
sys.stdout.write("%s:$hsrp$%s$%s\n" % (index, salt.encode("hex"), h))
f.close()
# Parts are borrowed from "module_tacacs_plus.py" from the loki project which is
# Copyright 2015 <NAME> <<EMAIL>>. See the licensing blurb before
# "pcap_parser_wlccp" function.
#
# 1 2 3 4 5 6 7 8 1 2 3 4 5 6 7 8 1 2 3 4 5 6 7 8 1 2 3 4 5 6 7 8
#
# +----------------+----------------+----------------+----------------+
# |major | minor | | | |
# |version| version| type | seq_no | flags |
# +----------------+----------------+----------------+----------------+
# | |
# | session_id |
# +----------------+----------------+----------------+----------------+
# | |
# | length |
# +----------------+----------------+----------------+----------------+
def pcap_parser_tacacs_plus(fname):
TACACS_PLUS_PORT = 49
TACACS_PLUS_VERSION_MAJOR = 0xc
TYPE_AUTHEN = 0x01
FLAGS_UNENCRYPTED = 0x01
f = open(fname, "rb")
pcap = dpkt.pcap.Reader(f)
index = 0
for _, buf in pcap:
index = index + 1
eth = dpkt.ethernet.Ethernet(buf)
if eth.type == dpkt.ethernet.ETH_TYPE_IP or eth.type == dpkt.ethernet.ETH_TYPE_IP6:
ip = eth.data
if eth.type == dpkt.ethernet.ETH_TYPE_IP and ip.p != dpkt.ip.IP_PROTO_TCP:
continue
if eth.type == dpkt.ethernet.ETH_TYPE_IP6 and ip.nxt != dpkt.ip.IP_PROTO_TCP:
continue
tcp = ip.data
data = tcp.data
if tcp.dport != TACACS_PLUS_PORT and tcp.sport != TACACS_PLUS_PORT:
continue
if len(tcp.data) <= 12:
continue
server = tcp.sport == TACACS_PLUS_PORT
ver, kind, seq_no, flags, session_id, length = struct.unpack("!BBBBII", data[:12])
if flags & FLAGS_UNENCRYPTED:
continue
version_minor = ver & 0x0F
if not server or kind != TYPE_AUTHEN:
continue
ciphertext = data[12:]
predata = struct.pack("!I", session_id)
postdata = struct.pack("!BB", TACACS_PLUS_VERSION_MAJOR << 4 + version_minor, seq_no)
sys.stdout.write("%s:$tacacs-plus$0$%s$%s$%s\n" % (index,
predata.encode("hex"),
ciphertext.encode("hex"),
postdata.encode("hex")))
f.close()
# This code is borrowed from "module_wlccp.py" from the loki project which is
# Copyright 2015 <NAME> <<EMAIL>>.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted.
def pcap_parser_wlccp(fname):
f = open(fname, "rb")
pcap = dpkt.pcap.Reader(f)
index = 0
comms = {} # "state machine", bugs introduced by me!
for _, buf in pcap:
index = index + 1
eth = dpkt.ethernet.Ethernet(buf)
if eth.type == dpkt.ethernet.ETH_TYPE_IP or eth.type == dpkt.ethernet.ETH_TYPE_IP6:
ip = eth.data
if eth.type == dpkt.ethernet.ETH_TYPE_IP and ip.p != dpkt.ip.IP_PROTO_UDP:
continue
if eth.type == dpkt.ethernet.ETH_TYPE_IP6 and ip.nxt != dpkt.ip.IP_PROTO_UDP:
continue
udp = ip.data
data = udp.data
if udp.dport != 2887 and udp.sport != 2887:
continue
if len(udp.data) <= 28 + 12 + 6 + 4 + 16: # rough check
continue
# WLCCP header parse
(version, sap, dst_type, length, msg_type, hopcount, iden, flags, orig_node_type) = struct.unpack("!BBHHBBHHH", data[:14])
orig_node_mac = data[14:20]
dst_node_type = struct.unpack("!H", data[20:22])
dst_node_mac = data[22:28]
data = data[28:]
if msg_type & 0x3f == 0x0b: # EAP AUTH
# EAP header parse
requestor_type = struct.unpack("!H", data[:2])
requestor_mac = data[2:8]
(aaa_msg_type, aaa_auth_type, aaa_key_mgmt_type, status_code) = struct.unpack("!BBBB", data[8:12])
data = data[12:]
host = requestor_mac.encode("hex")
if host in comms:
leap = comms[host]
elif not host == "000000000000":
comms[host] = (None, None, None, None)
(eapol_version, eapol_type, eapol_len) = struct.unpack("!BBH", data[2:6])
data = data[6:]
# check EAP-TYPE
if eapol_type == 0x00:
(eap_code, eap_id, eap_len) = struct.unpack("!BBH", data[:4])
data = data[4:]
# check EAP-CODE
if eap_code == 0x01:
(leap_type, leap_version, leap_reserved, leap_count) = struct.unpack("!BBBB", data[:4])
data = data[4:]
# EAP-REQUEST, check the leap hdr
if leap_type == 0x11 and leap_version == 0x01 and leap_reserved == 0x00 and leap_count == 0x08:
(leap_auth_chall, leap_auth_resp, leap_supp_chall, leap_supp_resp) = leap
if not leap_auth_chall and not leap_auth_resp and not leap_supp_chall and not leap_supp_resp:
iden = eap_id
chall = data[:8]
user = data[8:16]
print("[DEBUG] WLCCP: EAP-AUTH challenge from authenticator seen for %s" % host)
comms[host] = ((iden, chall, user), leap_auth_resp, leap_supp_chall, leap_supp_resp)
elif leap_auth_chall and leap_auth_resp and not leap_supp_chall and not leap_supp_resp:
chall = data[:8]
print("[DEBUG] WLCCP: EAP-AUTH challenge from supplicant seen for %s" % host)
comms[host] = (leap_auth_chall, leap_auth_resp, chall, leap_supp_resp)
elif eap_code == 0x02:
(leap_type, leap_version, leap_reserved, leap_count) = struct.unpack("!BBBB", data[:4])
data = data[4:]
# EAP-RESPONSE, check the leap hdr
if leap_type == 0x11 and leap_version == 0x01 and leap_reserved == 0x00 and leap_count == 0x18:
(leap_auth_chall, leap_auth_resp, leap_supp_chall, leap_supp_resp) = leap
if leap_auth_chall and not leap_auth_resp and not leap_supp_chall and not leap_supp_resp:
resp = data[:24]
print("[DEBUG] WLCCP: EAP-AUTH response from authenticator seen for %s" % host)
comms[host] = (leap_auth_chall, resp, leap_supp_chall, leap_supp_resp)
elif leap_auth_chall and leap_auth_resp and leap_supp_chall and not leap_supp_resp:
resp = data[:24]
print("[DEBUG] WLCCP: EAP-AUTH response from supplicant seen for %s" % host)
comms[host] = (leap_auth_chall, leap_auth_resp, leap_supp_chall, resp)
for entry in comms:
(leap_auth_chall, leap_auth_resp, leap_supp_chall, leap_supp_resp) = comms[entry]
if leap_auth_chall:
_, challenge, user = leap_auth_chall
print("%s:$NETNTLM$%s$%s" % (user, challenge.encode("hex"), leap_auth_resp.encode("hex")))
f.close()
def endian(s):
ret = ""
for i in range(0, len(s), 2):
ret += s[i + 1] + s[i]
return ret
def process_hash(uid, nonce, sha1):
if len(nonce) == 0:
return
uid = int(endian(uid[::-1]), 16)
print "%s:$dynamic_24$%s$HEX$%s" % (uid, sha1, nonce)
def handle_gg_login105(payload, nonce):
"""
GG_LOGIN105 stores uid as hex encoded ASCII. 16th byte is the number of digits in uid.
uid begins at 17th byte. sha1 hash is separated from last digit of uid by two bytes.
"""
digits = int(payload[30:32], 16)
uid = payload[32:32 + 2*digits].decode("hex")
offset = 32 + 2*digits + 4
sha1 = payload[offset:offset + 40]
print "%s:$dynamic_24$%s$HEX$%s" % (uid, sha1, nonce)
def pcap_parser_gadu(pcapfile):
try:
packets = rdpcap(pcapfile)
except:
sys.stderr.write("%s is not a .pcap file\n" % pcapfile)
return
ports = [8074]
nonce = ""
for pkt in packets:
if TCP in pkt and (pkt[TCP].dport in ports or pkt[TCP].sport in ports):
payload = str(pkt[TCP].payload).encode('hex')
if payload[:8] == '01000000': # GG_WELCOME
nonce = payload[16:]
if payload[:8] == '31000000': # GG_LOGIN80
hashtype = payload[28:30]
if hashtype == "02":
uid = payload[16:24]
sha1 = payload[30:70]
process_hash(uid, nonce, sha1)
if payload[:8] == '83000000': # GG_LOGIN105
handle_gg_login105(payload, nonce)
def pcap_parser_eigrp(fname):
f = open(fname, "rb")
pcap = dpkt.pcap.Reader(f)
index = 0
for _, buf in pcap:
index = index + 1
eth = dpkt.ethernet.Ethernet(buf)
if eth.type == dpkt.ethernet.ETH_TYPE_IP or eth.type == dpkt.ethernet.ETH_TYPE_IP6:
ip = eth.data
# IPv6 support is based on the following sample .pcap file
# http://wiki.wireshark.org/SampleCaptures?action=AttachFile&do=get&target=eigrp-for-ipv6-auth.pcap
if eth.type == dpkt.ethernet.ETH_TYPE_IP and ip.p != dpkt.ip.IP_PROTO_EIGRP:
continue
if eth.type == dpkt.ethernet.ETH_TYPE_IP6 and ip.nxt != dpkt.ip.IP_PROTO_EIGRP:
continue
data = ip.data
destination = socket.inet_ntoa(ip.src)
if ord(data[0]) != 2: # EIGRP version
| |
"sim2": "Comparison Run", "m1": r"$M_{\text{ej}}^a$", "m2": r"$M_{\text{ej}}^b$", "tmax":r"$t_{\text{max}}$", "err":r"$\Delta$"}
# precs = ["", "", ".2f", ".2f", ".1f", "d"]
#
# size = '{'
# head = ''
# for i, v_n in enumerate(cols):
# v_n = lbl_dic[v_n]
# size = size + 'c'
# head = head + '{}'.format(v_n)
# if v_n != cols[-1]: size = size + ' '
# if i != len(cols) - 1: head = head + ' & '
# size = size + '}'
#
# unit_bar = ''
# for v_n in cols:
# if v_n in units_dic.keys():
# unit = units_dic[v_n]
# else:
# unit = v_n
# unit_bar = unit_bar + '{}'.format(unit)
# if v_n != cols[-1]: unit_bar = unit_bar + ' & '
#
# head = head + ' \\\\' # = \\
# unit_bar = unit_bar + ' \\\\ '
#
# print(errs[sims[0]])
#
# print('\n')
#
# print('\\begin{table*}[t]')
# print('\\begin{center}')
# print('\\begin{tabular}' + '{}'.format(size))
# print('\\hline')
# print(head)
# print(unit_bar)
# print('\\hline\\hline')
#
# for sim1, mask1, sim2, mask2 in zip(sims, masks, sims2, masks2):
# row = ''
# for v_n, prec in zip(cols, precs):
#
# if prec != "":
# val = "%{}".format(prec) % errs[sim1][v_n]
# else:
# val = errs[sim1][v_n].replace("_", "\_")
# row = row + val
# if v_n != cols[-1]: row = row + ' & '
# row = row + ' \\\\' # = \\
# print(row)
#
# print(r'\hline')
# print(r'\end{tabular}')
# print(r'\end{center}')
# print(r'\caption{'+r'Viscosity effect on the ejected material total cumulative mass. Criterion {} '
# .format(mask.replace('_', '\_')) +
# r'$\Delta = |M_{\text{ej}}^a - M_{\text{ej}}^b| / M_{\text{ej}}^a |_{tmax} $ }')
# print(r'\label{tbl:1}')
# print(r'\end{table*}')
#
# exit(1)
"""=================================================================================================================="""
def eos_color(eos):
if eos == 'DD2':
return 'blue'
elif eos == 'BHBlp':
return 'purple'
elif eos == 'LS220':
return 'orange'
elif eos == 'SFHo':
return 'red'
elif eos == 'SLy4':
return 'green'
else:
return 'black'
def get_ms(q, qmin=1, qmax = 1.4, msmin = 5., msmax = 10.):
k = (qmax - qmin) / (msmax - msmin)
b = qmax - (k * msmax)
return (q - b) / k
""" =================================================| DUMPSTER |===================================================="""
class ErrorEstimation_old:
def __init__(self, sim1, sim2):
self.det = 0
self.sim1 = sim1
self.sim2 = sim2
pass
# --------------------| Preparation |--------------------------- #
def get_tmax(self):
o_par1 = ALL_PAR(self.sim1)
o_par2 = ALL_PAR(self.sim2)
tmerg1 = o_par1.get_par("tmerger")
tmerg2 = o_par2.get_par("tmerger")
t98geomass1 = o_par1.get_outflow_par(self.det, "geo", "t98mass")
t98geomass2 = o_par2.get_outflow_par(self.det, "geo", "t98mass")
tend1 = o_par1.get_outflow_par(self.det, "geo", "tend")
tend2 = o_par2.get_outflow_par(self.det, "geo", "tend")
assert tend1 > t98geomass1
assert tend2 > t98geomass2
assert tmerg1 < t98geomass1
assert tmerg2 < t98geomass2
tend1 = tend1 - tmerg1
tend2 = tend2 - tmerg2
t98geomass1 = t98geomass1 - tmerg1
t98geomass2 = t98geomass2 - tmerg2
delta_t1 = tend1 - t98geomass1
delta_t2 = tend2 - t98geomass2
print("Time window for bernoulli ")
print("\t{} {:.2f} [ms]".format(self.sim1, delta_t1*1e3))
print("\t{} {:.2f} [ms]".format(self.sim2, delta_t2*1e3))
exit(1)
delta_t = np.min([delta_t1, delta_t2])
return delta_t
#
#
# assert tend1 > tmerg1
# assert tend2 > tmerg2
#
# print("tend1:{} tmerg1:{} -> {}".format(tend1, tmerg1, tend1-tmerg1))
# print("tend2:{} tmerg2:{} -> {}".format(tend2, tmerg2, tend2-tmerg2))
# # print("tmax:{}".format)
#
# tend1 = tend1 - tmerg1
# tend2 = tend2 - tmerg2
#
# tmax = np.min([tend1, tend2])
# print("get_tmax = tmax:{}".format(tmax))
#
#
# return tmax
def compute_outflow_new_mask(self, sim, tasks, new_mask, rewrite):
# get_tmax60 # ms
print("\tAdding mask:{}".format(new_mask))
o_outflow = EJECTA_PARS(sim, add_mask=new_mask)
if not os.path.isdir(Paths.ppr_sims+sim+"/"+"outflow_{:d}/".format(self.det)+new_mask+'/'):
os.mkdir(Paths.ppr_sims+sim+"/"+"outflow_{:d}/".format(self.det)+new_mask+'/')
for task in tasks:
if task == "hist":
from outflowed import outflowed_historgrams
outflowed_historgrams(o_outflow, [self.det], [new_mask], o_outflow.list_hist_v_ns, rewrite=rewrite)
elif task == "corr":
from outflowed import outflowed_correlations
outflowed_correlations(o_outflow, [self.det], [new_mask], o_outflow.list_corr_v_ns, rewrite=rewrite)
elif task == "totflux":
from outflowed import outflowed_totmass
outflowed_totmass(o_outflow, [self.det], [new_mask], rewrite=rewrite)
elif task == "timecorr":
from outflowed import outflowed_timecorr
outflowed_timecorr(o_outflow, [self.det], [new_mask], o_outflow.list_hist_v_ns, rewrite=rewrite)
else:
raise NameError("method for computing outflow with new mask is not setup for task:{}".format(task))
def main_prepare_outflow_data(self, new_mask, rewrite=False):
# get new mask for a maximum time (postmerger)
# compute outflow data for this new mask
tasks = ["totflux", "hist"]
self.compute_outflow_new_mask(self.sim1, tasks, new_mask, rewrite=rewrite)
self.compute_outflow_new_mask(self.sim2, tasks, new_mask, rewrite=rewrite)
return new_mask
# --------------------| Data Comparison |--------------------------- #
def get_outflow_par_err(self, new_mask, v_n):
o_par1 = ALL_PAR(self.sim1, add_mask=new_mask)
o_par2 = ALL_PAR(self.sim2, add_mask=new_mask)
val1 = o_par1.get_outflow_par(self.det, new_mask, v_n)
val2 = o_par2.get_outflow_par(self.det, new_mask, v_n)
err = np.abs(val1 - val2) / val1
return val1, val2, err
def main(self, v_ns, rewrite):
base_masks = ["geo", "bern_geoend"]
new_masks = []
ind_res_dic = {}
comb_res_dic = {}
tmax = self.get_tmax()
# preparing data
for base_mask in base_masks:
__new_mask = base_mask + "_length{:.0f}".format(tmax * 1e5) # 100ms
Printcolor.print_colored_string(
["task:", "outflow", "det:", "{}".format(self.det), "mask:", __new_mask, ":", "starting"],
["blue", "green", "blue", "green", "blue", "green", "", "green"])
# try:
new_mask = self.main_prepare_outflow_data(__new_mask, rewrite=rewrite)
new_masks.append(new_mask)
# except AssertionError:
# Printcolor.print_colored_string(
# ["task:", "outflow", "det:", "{}".format(self.det), "mask:", __new_mask, ":", "Assertion Error"],
# ["blue", "green", "blue", "green", "blue", "green", "", "red"])
# break
if len(new_masks) == 0:
raise ValueError("non of the given base_masks:{} succeeded".format(base_masks))
# writing resukts
o_par1 = ALL_PAR(self.sim1)
o_par2 = ALL_PAR(self.sim2)
ind_res_dic["sims"] = [self.sim1, self.sim2]
ind_res_dic["base_masks"] = base_masks
ind_res_dic["new_masks"] = new_masks
for mask in base_masks:
if mask.__contains__("bern_"):
t98mass1 = o_par1.get_outflow_par(self.det, "geo", "t98mass")
t98mass2 = o_par2.get_outflow_par(self.det, "geo", "t98mass")
tmerg1 = o_par1.get_par("tmerger")
tmerg2 = o_par2.get_par("tmerger")
ind_res_dic["t98mass"] = [t98mass1-tmerg1, t98mass2-tmerg2]
# loading results
for new_mask in new_masks:
comb_res_dic[new_mask] = {}
for v_n in v_ns:
val1, val2, err = self.get_outflow_par_err(new_mask, v_n)
comb_res_dic[new_mask][v_n] = [val1, val2, err]
# printing results
for key in ind_res_dic.keys():
print ind_res_dic[key]
print("sim1:{} sim2:{}".format(self.sim1, self.sim2))
for new_mask in new_masks:
print("\tmask:{}".format(new_mask))
for v_n in v_ns:
val1, val2, err = comb_res_dic[new_mask][v_n]
print("\t\tval1:{} val2:{} err:{}".format(val1, val2, err))
return ind_res_dic, comb_res_dic
class ErrorEstimation:
def __init__(self, sim1, sim2):
self.det = 0
self.sim1 = sim1
self.sim2 = sim2
self.o_par1 = ADD_METHODS_ALL_PAR(self.sim1)
self.o_par2 = ADD_METHODS_ALL_PAR(self.sim2)
def get_post_geo_delta_t(self):
# o_par1 = ALL_PAR(self.sim1)
# o_par2 = ALL_PAR(self.sim2)
tmerg1 = self.o_par1.get_par("tmerger")
tmerg2 = self.o_par2.get_par("tmerger")
t98geomass1 = self.o_par1.get_outflow_par(self.det, "geo", "t98mass")
t98geomass2 = self.o_par2.get_outflow_par(self.det, "geo", "t98mass")
tend1 = self.o_par1.get_outflow_par(self.det, "geo", "tend")
tend2 = self.o_par2.get_outflow_par(self.det, "geo", "tend")
assert tend1 > t98geomass1
assert tend2 > t98geomass2
assert tmerg1 < t98geomass1
assert tmerg2 < t98geomass2
tend1 = tend1 - tmerg1
tend2 = tend2 - tmerg2
t98geomass1 = t98geomass1 - tmerg1
t98geomass2 = t98geomass2 - tmerg2
delta_t1 = tend1 - t98geomass1
delta_t2 = tend2 - t98geomass2
print("Time window for bernoulli ")
print("\t{} {:.2f} [ms]".format(self.sim1, delta_t1*1e3))
print("\t{} {:.2f} [ms]".format(self.sim2, delta_t2*1e3))
# exit(1)
delta_t = np.min([delta_t1, delta_t2])
return delta_t
def get_tmax_d3_data(self):
isd3_1, itd3_1, td3_1 = self.o_par1.get_ittime("profiles", "prof")
isd3_2, itd3_2, td3_2 = self.o_par2.get_ittime("profiles", "prof")
if len(td3_1) == 0:
Printcolor.red("D3 data not found for sim1:{}".format(self.sim1))
return np.nan
if len(td3_2) == 0:
Printcolor.red("D3 data not found for sim2:{}".format(self.sim2))
return np.nan
if td3_1.min() > td3_2.max():
Printcolor.red("D3 data does not overlap. sim1 has min:{} that is > than sim2 max: {}"
.format(td3_1.min(), td3_2.max()))
return np.nan
if td3_1.max() < td3_2.min():
Printcolor.red("D3 data does not overlap. sim1 has max:{} that is < than sim2 min: {}"
.format(td3_1.max(), td3_2.min()))
return np.nan
tmax = np.min([td3_1.max(), td3_2.max()])
print("\ttmax for D3 data: {}".format(tmax))
return float(tmax)
def compute_outflow_new_mask(self, sim, tasks, mask, rewrite):
# get_tmax60 # ms
print("\tAdding mask:{}".format(mask))
o_outflow = EJECTA_PARS(sim, add_mask=mask)
if not os.path.isdir(Paths.ppr_sims + sim +"/" +"outflow_{:d}/".format(self.det) + mask + '/'):
os.mkdir(Paths.ppr_sims + sim +"/" +"outflow_{:d}/".format(self.det) + mask + '/')
for task in tasks:
if task == "hist":
from outflowed import outflowed_historgrams
outflowed_historgrams(o_outflow, [self.det], [mask], o_outflow.list_hist_v_ns, rewrite=rewrite)
elif task == "corr":
from outflowed import outflowed_correlations
outflowed_correlations(o_outflow, [self.det], [mask], o_outflow.list_corr_v_ns, rewrite=rewrite)
elif task == "totflux":
from outflowed import outflowed_totmass
outflowed_totmass(o_outflow, [self.det], [mask], rewrite=rewrite)
elif task == "timecorr":
from outflowed import outflowed_timecorr
outflowed_timecorr(o_outflow, [self.det], [mask], o_outflow.list_hist_v_ns, rewrite=rewrite)
else:
raise NameError("method for computing outflow with new mask is not setup for task:{}".format(task))
def get_outflow_par_err(self, new_mask, v_n):
o_par1 = ALL_PAR(self.sim1, add_mask=new_mask)
o_par2 = ALL_PAR(self.sim2, add_mask=new_mask)
val1 = o_par1.get_outflow_par(self.det, new_mask, v_n)
val2 = o_par2.get_outflow_par(self.det, new_mask, v_n)
# err = np.abs(val1 - val2) / val1
return val1, val2
def main(self, rewrite=True):
geo_v_ns = ["Mej_tot", "Ye_ave", "s_ave", "theta_rms"]
tasks = ["totflux", "hist"]
self.get_tmax_d3_data()
# d3
v_ns = ["Mdisk3D"]
d3_res1 = {}
d3_res2 = {}
td3 = self.get_tmax_d3_data()
if not np.isnan(td3):
for v_n in v_ns:
d3_res1[v_n] = self.o_par1.get_int_par(v_n, td3)
d3_res2[v_n] = self.o_par2.get_int_par(v_n, td3)
else:
for v_n in v_ns:
d3_res1[v_n] = np.nan
d3_res2[v_n] = np.nan
print("--- {} ---".format("d3"))
print(self.sim1),
print([("{}: {}".format(key, val)) for key, val in d3_res1.items()])
print(self.sim2),
print([("{}: {}".format(key, val)) for key, val in d3_res2.items()])
# geo
mask = "geo"
self.compute_outflow_new_mask(self.sim1, tasks, mask, rewrite=rewrite)
self.compute_outflow_new_mask(self.sim2, tasks, mask, rewrite=rewrite)
geo_res1 = {}
geo_res2 | |
<gh_stars>0
import os
import pickle
import os
import pickle
import argparse
import time
import subprocess
import shutil
import torch
from torch.autograd import Variable
import numpy as np
from utils import DataLoader
from helper import getCoef, sample_gaussian_2d, get_mean_error, get_final_error
from helper import *
from grid import getSequenceGridMask, getGridMask
def main():
parser = argparse.ArgumentParser()
# Observed length of the trajectory parameter
parser.add_argument('--obs_length', type=int, default=10,
help='Observed length of the trajectory')
# Predicted length of the trajectory parameter
parser.add_argument('--pred_length', type=int, default=10,
help='Predicted length of the trajectory')
# Model to be loaded
parser.add_argument('--epoch', type=int, default=14,
help='Epoch of model to be loaded')
# cuda support
parser.add_argument('--use_cuda', action="store_true", default=False,
help='Use GPU or not')
# drive support
parser.add_argument('--drive', action="store_true", default=False,
help='Use Google drive or not')
# number of iteration -> we are trying many times to get lowest test error derived from observed part and prediction of observed
# part.Currently it is useless because we are using direct copy of observed part and no use of prediction.Test error will be 0.
parser.add_argument('--iteration', type=int, default=1,
help='Number of iteration to create test file (smallest test errror will be selected)')
# gru model
parser.add_argument('--gru', action="store_true", default=False,
help='True : GRU cell, False: LSTM cell')
# method selection
parser.add_argument('--method', type=int, default=1,
help='Method of lstm will be used (1 = social lstm, 2 = obstacle lstm, 3 = vanilla lstm)')
# Parse the parameters
sample_args = parser.parse_args()
#for drive run
prefix = ''
f_prefix = '.'
if sample_args.drive is True:
prefix='drive/semester_project/social_lstm_final/'
f_prefix = 'drive/semester_project/social_lstm_final'
#run sh file for folder creation
# if not os.path.isdir("log/"):
#print("Directory creation script is running...")
# subprocess.call([f_prefix+'/make_directories.sh'])
method_name = get_method_name(sample_args.method)
model_name = "LSTM"
save_tar_name = method_name+"_lstm_model_"
if sample_args.gru:
model_name = "GRU"
save_tar_name = method_name+"_gru_model_"
#print("Selected method name: ", method_name, " model name: ", model_name)
# Save directory
save_directory = os.path.join(f_prefix, 'model/', method_name, model_name)
#plot directory for plotting in the future
plot_directory = os.path.join(f_prefix, 'plot/', method_name, model_name)
result_directory = os.path.join(f_prefix, 'result/', method_name)
plot_test_file_directory = 'test'
# Define the path for the config file for saved args
with open(os.path.join(save_directory,'config.pkl'), 'rb') as f:
saved_args = pickle.load(f)
seq_lenght = sample_args.pred_length + sample_args.obs_length
# Create the DataLoader object
dataloader = DataLoader(f_prefix, 1, seq_lenght, forcePreProcess = True, infer=True)
create_directories(os.path.join(result_directory, model_name), dataloader.get_all_directory_namelist())
create_directories(plot_directory, [plot_test_file_directory])
dataloader.reset_batch_pointer()
dataset_pointer_ins = dataloader.dataset_pointer
smallest_err = 100000
smallest_err_iter_num = -1
origin = (0,0)
reference_point = (0,1)
submission_store = [] # store submission data points (txt)
result_store = [] # store points for plotting
for iteration in range(sample_args.iteration):
# Initialize net
net = get_model(sample_args.method, saved_args, True)
if sample_args.use_cuda:
net = net.cuda()
# Get the checkpoint path
checkpoint_path = os.path.join(save_directory, save_tar_name+str(sample_args.epoch)+'.tar')
if os.path.isfile(checkpoint_path):
#print('Loading checkpoint')
checkpoint = torch.load(checkpoint_path)
model_epoch = checkpoint['epoch']
net.load_state_dict(checkpoint['state_dict'])
#print('Loaded checkpoint at epoch', model_epoch)
# For each batch
iteration_submission = []
iteration_result = []
results = []
submission = []
# Variable to maintain total error
total_error = 0
final_error = 0
for batch in range(dataloader.num_batches):
start = time.time()
# Get data
x, y, d , numPedsList, PedsList ,target_ids = dataloader.next_batch()
# Get the sequence
# print(target_ids[0])
x_seq, d_seq ,numPedsList_seq, PedsList_seq, target_id = 0, 0, 0, 0, 0
x_seq, d_seq ,numPedsList_seq, PedsList_seq, target_id = x[0], d[0], numPedsList[0], PedsList[0], target_ids
dataloader.clean_test_data(x_seq, target_id, sample_args.obs_length, sample_args.pred_length)
dataloader.clean_ped_list(x_seq, PedsList_seq, target_id, sample_args.obs_length, sample_args.pred_length)
#get processing file name and then get dimensions of file
folder_name = dataloader.get_directory_name_with_pointer(d_seq)
dataset_data = dataloader.get_dataset_dimension(folder_name)
#dense vector creation
x_seq, lookup_seq = dataloader.convert_proper_array(x_seq, numPedsList_seq, PedsList_seq)
#will be used for error calculation
orig_x_seq = x_seq.clone()
target_id_values = orig_x_seq[0][lookup_seq[target_id], 0:2]
#grid mask calculation
if sample_args.method == 2: #obstacle lstm
grid_seq = getSequenceGridMask(x_seq, dataset_data, PedsList_seq, saved_args.neighborhood_size, saved_args.grid_size, saved_args.use_cuda, True)
elif sample_args.method == 1: #social lstm
grid_seq = getSequenceGridMask(x_seq, dataset_data, PedsList_seq, saved_args.neighborhood_size, saved_args.grid_size, saved_args.use_cuda)
#vectorize datapoints
x_seq, first_values_dict = vectorize_seq(x_seq, PedsList_seq, lookup_seq)
# <------------- Experimental block ---------------->
# x_seq = translate(x_seq, PedsList_seq, lookup_seq ,target_id_values)
# angle = angle_between(reference_point, (x_seq[1][lookup_seq[target_id], 0].data.numpy(), x_seq[1][lookup_seq[target_id], 1].data.numpy()))
# x_seq = rotate_traj_with_target_ped(x_seq, angle, PedsList_seq, lookup_seq)
# grid_seq = getSequenceGridMask(x_seq[:sample_args.obs_length], dataset_data, PedsList_seq, saved_args.neighborhood_size, saved_args.grid_size, sample_args.use_cuda)
# x_seq, first_values_dict = vectorize_seq(x_seq, PedsList_seq, lookup_seq)
if sample_args.use_cuda:
x_seq = x_seq.cuda()
# The sample function
if sample_args.method == 3: #vanilla lstm
# Extract the observed part of the trajectories
obs_traj, obs_PedsList_seq = x_seq[:sample_args.obs_length], PedsList_seq[:sample_args.obs_length]
ret_x_seq = sample(obs_traj, obs_PedsList_seq, sample_args, net, x_seq, PedsList_seq, saved_args, dataset_data, dataloader, lookup_seq, numPedsList_seq, sample_args.gru)
else:
# Extract the observed part of the trajectories
obs_traj, obs_PedsList_seq, obs_grid = x_seq[:sample_args.obs_length], PedsList_seq[:sample_args.obs_length], grid_seq[:sample_args.obs_length]
ret_x_seq = sample(obs_traj, obs_PedsList_seq, sample_args, net, x_seq, PedsList_seq, saved_args, dataset_data, dataloader, lookup_seq, numPedsList_seq, sample_args.gru, obs_grid)
#revert the points back to original space
ret_x_seq = revert_seq(ret_x_seq, PedsList_seq, lookup_seq, first_values_dict)
# <--------------------- Experimental inverse block ---------------------->
# ret_x_seq = revert_seq(ret_x_seq, PedsList_seq, lookup_seq, target_id_values, first_values_dict)
# ret_x_seq = rotate_traj_with_target_ped(ret_x_seq, -angle, PedsList_seq, lookup_seq)
# ret_x_seq = translate(ret_x_seq, PedsList_seq, lookup_seq ,-target_id_values)
# Record the mean and final displacement error
total_error += get_mean_error(ret_x_seq[1:sample_args.obs_length].data, orig_x_seq[1:sample_args.obs_length].data, PedsList_seq[1:sample_args.obs_length], PedsList_seq[1:sample_args.obs_length], sample_args.use_cuda, lookup_seq)
final_error += get_final_error(ret_x_seq[1:sample_args.obs_length].data, orig_x_seq[1:sample_args.obs_length].data, PedsList_seq[1:sample_args.obs_length], PedsList_seq[1:sample_args.obs_length], lookup_seq)
end = time.time()
#print('Current file : ', dataloader.get_file_name(0),' Processed trajectory number : ', batch+1, 'out of', dataloader.num_batches, 'trajectories in time', end - start)
if dataset_pointer_ins is not dataloader.dataset_pointer:
if dataloader.dataset_pointer is not 0:
iteration_submission.append(submission)
iteration_result.append(results)
dataset_pointer_ins = dataloader.dataset_pointer
submission = []
results = []
submission.append(submission_preprocess(dataloader, ret_x_seq.data[sample_args.obs_length:, lookup_seq[target_id], :].numpy(), sample_args.pred_length, sample_args.obs_length, target_id))
results.append((x_seq.data.cpu().numpy(), ret_x_seq.data.cpu().numpy(), PedsList_seq, lookup_seq , dataloader.get_frame_sequence(seq_lenght), target_id, sample_args.obs_length))
iteration_submission.append(submission)
iteration_result.append(results)
submission_store.append(iteration_submission)
result_store.append(iteration_result)
if total_error<smallest_err:
#print("**********************************************************")
#print('Best iteration has been changed. Previous best iteration: ', smallest_err_iter_num+1, 'Error: ', smallest_err / dataloader.num_batches)
#print('New best iteration : ', iteration+1, 'Error: ',total_error / dataloader.num_batches)
smallest_err_iter_num = iteration
smallest_err = total_error
#print('Iteration:' ,iteration+1,' Total training (observed part) mean error of the model is ', total_error / dataloader.num_batches)
#print('Iteration:' ,iteration+1,'Total training (observed part) final error of the model is ', final_error / dataloader.num_batches)
#print(submission)
#print('Smallest error iteration:', smallest_err_iter_num+1)
dataloader.write_to_file(submission_store[smallest_err_iter_num], result_directory, prefix, model_name)
dataloader.write_to_plot_file(result_store[smallest_err_iter_num], os.path.join(plot_directory, plot_test_file_directory))
# print(ret_x_seq[10:20])
return ret_x_seq[10:20]
def sample(x_seq, Pedlist, args, net, true_x_seq, true_Pedlist, saved_args, dimensions, dataloader, look_up, num_pedlist, is_gru, grid = None):
'''
The sample function
params:
x_seq: Input positions
Pedlist: Peds present in each frame
args: arguments
net: The model
true_x_seq: True positions
true_Pedlist: The true peds present in each frame
saved_args: Training arguments
dimensions: The dimensions of the dataset
target_id: ped_id number that try to predict in this sequence
'''
# Number of peds in the sequence
numx_seq = len(look_up)
with torch.no_grad():
# Construct variables for hidden and cell states
hidden_states = Variable(torch.zeros(numx_seq, net.args.rnn_size))
if args.use_cuda:
hidden_states = hidden_states.cuda()
if not is_gru:
cell_states = Variable(torch.zeros(numx_seq, net.args.rnn_size))
if args.use_cuda:
cell_states = cell_states.cuda()
else:
cell_states = None
ret_x_seq = Variable(torch.zeros(args.obs_length+args.pred_length, numx_seq, 2))
# Initialize the return data structure
if args.use_cuda:
ret_x_seq = ret_x_seq.cuda()
# For the observed part of the trajectory
for tstep in range(args.obs_length-1):
if grid is None: #vanilla lstm
# Do a forward prop
out_obs, hidden_states, cell_states = net(x_seq[tstep].view(1, numx_seq, 2), hidden_states, cell_states, [Pedlist[tstep]], [num_pedlist[tstep]], dataloader, look_up)
else:
# Do a forward prop
out_obs, hidden_states, cell_states = net(x_seq[tstep].view(1, numx_seq, 2), [grid[tstep]], hidden_states, cell_states, [Pedlist[tstep]], [num_pedlist[tstep]], dataloader, look_up)
# loss_obs = Gaussian2DLikelihood(out_obs, x_seq[tstep+1].view(1, numx_seq, 2), [Pedlist[tstep+1]])
# Extract the mean, std and corr of the bivariate Gaussian
mux, muy, sx, sy, corr = getCoef(out_obs)
# Sample from the bivariate Gaussian
next_x, next_y = sample_gaussian_2d(mux.data, muy.data, sx.data, sy.data, corr.data, true_Pedlist[tstep], look_up)
ret_x_seq[tstep + 1, :, 0] = next_x
ret_x_seq[tstep + 1, :, 1] = next_y
ret_x_seq[:args.obs_length, :, :] = x_seq.clone()
# Last seen grid
if grid is not None: #no vanilla lstm
prev_grid = grid[-1].clone()
#assign last position of observed data to temp
#temp_last_observed = ret_x_seq[args.obs_length-1].clone()
#ret_x_seq[args.obs_length-1] = x_seq[args.obs_length-1]
# For the predicted part of the trajectory
for tstep in range(args.obs_length-1, args.pred_length + args.obs_length-1):
# Do a forward prop
if grid is None: #vanilla lstm
outputs, hidden_states, cell_states = net(ret_x_seq[tstep].view(1, numx_seq, 2), hidden_states, cell_states, [true_Pedlist[tstep]], [num_pedlist[tstep]], dataloader, look_up)
else:
outputs, hidden_states, cell_states = net(ret_x_seq[tstep].view(1, numx_seq, 2), [prev_grid], hidden_states, cell_states, [true_Pedlist[tstep]], [num_pedlist[tstep]], dataloader, look_up)
# Extract the mean, std and corr of the bivariate Gaussian
mux, muy, sx, sy, corr = getCoef(outputs)
# Sample from the bivariate Gaussian
next_x, next_y = sample_gaussian_2d(mux.data, muy.data, | |
8)
# Bits reserved_0
# Bits PA_LEVEL_7
# Output power level for 7th slot.
# Register PA_POWER6
def setPA_POWER6(self, val):
"""Set register PA_POWER6"""
self.write(REG.PA_POWER6, val, 8)
def getPA_POWER6(self):
"""Get register PA_POWER6"""
return self.read(REG.PA_POWER6, 8)
# Bits reserved_0
# Bits PA_LEVEL_6
# Output power level for 6th slot.
# Register PA_POWER5
def setPA_POWER5(self, val):
"""Set register PA_POWER5"""
self.write(REG.PA_POWER5, val, 8)
def getPA_POWER5(self):
"""Get register PA_POWER5"""
return self.read(REG.PA_POWER5, 8)
# Bits reserved_0
# Bits PA_LEVEL_5
# Output power level for 5th slot.
# Register PA_POWER4
def setPA_POWER4(self, val):
"""Set register PA_POWER4"""
self.write(REG.PA_POWER4, val, 8)
def getPA_POWER4(self):
"""Get register PA_POWER4"""
return self.read(REG.PA_POWER4, 8)
# Bits reserved_0
# Bits PA_LEVEL_4
# Output power level for 4th slot.
# Register PA_POWER3
def setPA_POWER3(self, val):
"""Set register PA_POWER3"""
self.write(REG.PA_POWER3, val, 8)
def getPA_POWER3(self):
"""Get register PA_POWER3"""
return self.read(REG.PA_POWER3, 8)
# Bits reserved_0
# Bits PA_LEVEL_3
# Output power level for 3rd slot.
# Register PA_POWER2
def setPA_POWER2(self, val):
"""Set register PA_POWER2"""
self.write(REG.PA_POWER2, val, 8)
def getPA_POWER2(self):
"""Get register PA_POWER2"""
return self.read(REG.PA_POWER2, 8)
# Bits reserved_0
# Bits PA_LEVEL_2
# Output power level for 2nd slot.
# Register PA_POWER1
def setPA_POWER1(self, val):
"""Set register PA_POWER1"""
self.write(REG.PA_POWER1, val, 8)
def getPA_POWER1(self):
"""Get register PA_POWER1"""
return self.read(REG.PA_POWER1, 8)
# Bits reserved_0
# Bits PA_LEVEL_1
# Output power level for 1st slot.
# Register PA_POWER0
def setPA_POWER0(self, val):
"""Set register PA_POWER0"""
self.write(REG.PA_POWER0, val, 8)
def getPA_POWER0(self):
"""Get register PA_POWER0"""
return self.read(REG.PA_POWER0, 8)
# Bits DIG_SMOOTH_EN
# Bits PA_MAXDBM
# Bits PA_RAMP_EN
# Bits PA_RAMP_STEP_LEN
# Set the step width (unit: 1/8 of bit period).
# Bits PA_LEVEL_MAX_IDX
# Final level for power ramping or selected output power index.
# Register PA_CONFIG1
def setPA_CONFIG1(self, val):
"""Set register PA_CONFIG1"""
self.write(REG.PA_CONFIG1, val, 8)
def getPA_CONFIG1(self):
"""Get register PA_CONFIG1"""
return self.read(REG.PA_CONFIG1, 8)
# Bits reserved_0
# Bits FIR_CFG
# FIR configuration:
# Bits FIR_EN
# Bits reserved_1
# Register PA_CONFIG0
def setPA_CONFIG0(self, val):
"""Set register PA_CONFIG0"""
self.write(REG.PA_CONFIG0, val, 8)
def getPA_CONFIG0(self):
"""Get register PA_CONFIG0"""
return self.read(REG.PA_CONFIG0, 8)
# Bits reserved_0
# Bits PA_FC
# PA Bessel filter bandwidth:
# Register SYNTH_CONFIG2
def setSYNTH_CONFIG2(self, val):
"""Set register SYNTH_CONFIG2"""
self.write(REG.SYNTH_CONFIG2, val, 8)
def getSYNTH_CONFIG2(self):
"""Get register SYNTH_CONFIG2"""
return self.read(REG.SYNTH_CONFIG2, 8)
# Bits reserved_0
# Bits PLL_PFD_SPLIT_EN
# Enables increased DN current pulses to improve linearization of CP/PFD (see Table 34).
# Bits reserved_1
# Register VCO_CONFIG
def setVCO_CONFIG(self, val):
"""Set register VCO_CONFIG"""
self.write(REG.VCO_CONFIG, val, 8)
def getVCO_CONFIG(self):
"""Get register VCO_CONFIG"""
return self.read(REG.VCO_CONFIG, 8)
# Bits reserved_0
# Bits VCO_CALAMP_EXT_SEL
# Bits VCO_CALFREQ_EXT_SEL
# Bits reserved_1
# Register VCO_CALIBR_IN2
def setVCO_CALIBR_IN2(self, val):
"""Set register VCO_CALIBR_IN2"""
self.write(REG.VCO_CALIBR_IN2, val, 8)
def getVCO_CALIBR_IN2(self):
"""Get register VCO_CALIBR_IN2"""
return self.read(REG.VCO_CALIBR_IN2, 8)
# Bits VCO_CALAMP_TX
# VCO magnitude calibration word (binary coding to be internally converted in thermometric code) used in TX.
# Bits VCO_CALAMP_RX
# VCO magnitude calibration word (binary coding to be internally converted in thermometric code) used in RX.
# Register VCO_CALIBR_IN1
def setVCO_CALIBR_IN1(self, val):
"""Set register VCO_CALIBR_IN1"""
self.write(REG.VCO_CALIBR_IN1, val, 8)
def getVCO_CALIBR_IN1(self):
"""Get register VCO_CALIBR_IN1"""
return self.read(REG.VCO_CALIBR_IN1, 8)
# Bits reserved_0
# Bits VCO_CALFREQ_TX
# VCO Cbank frequency calibration word to be used in TX.
# Register VCO_CALIBR_IN0
def setVCO_CALIBR_IN0(self, val):
"""Set register VCO_CALIBR_IN0"""
self.write(REG.VCO_CALIBR_IN0, val, 8)
def getVCO_CALIBR_IN0(self):
"""Get register VCO_CALIBR_IN0"""
return self.read(REG.VCO_CALIBR_IN0, 8)
# Bits reserved_0
# Bits VCO_CALFREQ_RX
# VCO Cbank frequency calibration word to be used in RX.
# Register XO_RCO_CONF1
def setXO_RCO_CONF1(self, val):
"""Set register XO_RCO_CONF1"""
self.write(REG.XO_RCO_CONF1, val, 8)
def getXO_RCO_CONF1(self):
"""Get register XO_RCO_CONF1"""
return self.read(REG.XO_RCO_CONF1, 8)
# Bits reserved_0
# Bits PD_CLKDIV
# Bits reserved_1
# Register XO_RCO_CONF0
def setXO_RCO_CONF0(self, val):
"""Set register XO_RCO_CONF0"""
self.write(REG.XO_RCO_CONF0, val, 8)
def getXO_RCO_CONF0(self):
"""Get register XO_RCO_CONF0"""
return self.read(REG.XO_RCO_CONF0, 8)
# Bits EXT_REF
# Bits GM_CONF
# Set the driver gm of the XO at start up.
# Bits REFDIV
# Bits reserved_0
# Bits EXT_RCO_OSC
# 1: the 34.7 kHz signal must be supplied from any GPIO.
# Bits RCO_CALIBRATION
# Register RCO_CALIBR_CONF3
def setRCO_CALIBR_CONF3(self, val):
"""Set register RCO_CALIBR_CONF3"""
self.write(REG.RCO_CALIBR_CONF3, val, 8)
def getRCO_CALIBR_CONF3(self):
"""Get register RCO_CALIBR_CONF3"""
return self.read(REG.RCO_CALIBR_CONF3, 8)
# Bits RWT_IN
# RWT word value for the RCO.
# Bits RFB_IN
# MSB part of RFB word value for RCO.
# Register RCO_CALIBR_CONF2
def setRCO_CALIBR_CONF2(self, val):
"""Set register RCO_CALIBR_CONF2"""
self.write(REG.RCO_CALIBR_CONF2, val, 8)
def getRCO_CALIBR_CONF2(self):
"""Get register RCO_CALIBR_CONF2"""
return self.read(REG.RCO_CALIBR_CONF2, 8)
# Bits RFB_IN
# LSB part of RFB word value for RCO.
# Bits reserved_0
# Register PM_CONF4
def setPM_CONF4(self, val):
"""Set register PM_CONF4"""
self.write(REG.PM_CONF4, val, 8)
def getPM_CONF4(self):
"""Get register PM_CONF4"""
return self.read(REG.PM_CONF4, 8)
# Bits reserved_0
# Bits EXT_SMPS
# Bits reserved_1
# Register PM_CONF3
def setPM_CONF3(self, val):
"""Set register PM_CONF3"""
self.write(REG.PM_CONF3, val, 8)
def getPM_CONF3(self):
"""Get register PM_CONF3"""
return self.read(REG.PM_CONF3, 8)
# Bits KRM_EN
# Bits KRM
# Sets the divider ratio (MSB) of the rate multiplier (default: Fsw=Fdig/4)
# Register PM_CONF2
def setPM_CONF2(self, val):
"""Set register PM_CONF2"""
self.write(REG.PM_CONF2, val, 8)
def getPM_CONF2(self):
"""Get register PM_CONF2"""
return self.read(REG.PM_CONF2, 8)
# Bits KRM
# Sets the divider ratio (LSB) of the rate multiplier (default: Fsw=Fdig/4)
# Register PM_CONF1
def setPM_CONF1(self, val):
"""Set register PM_CONF1"""
self.write(REG.PM_CONF1, val, 8)
def getPM_CONF1(self):
"""Get register PM_CONF1"""
return self.read(REG.PM_CONF1, 8)
# Bits reserved_0
# Bits BATTERY_LVL_EN
# Bits SET_BLD_TH
# Set the BLD threshold:
# Bits SMPS_LVL_MODE
# - 0: SMPS output level will depend upon the value written in the PM_CONFIG0 register (SET_SMPS_LEVEL field) both in RX and TX state.
# - 1: SMPS output level will depend upon the value in PM_CONFIG register just in TX state, while in RX state it will be fixed to 1.4 V
# Bits BYPASS_LDO
# Set to 0 (default value)
# Bits reserved_1
# Register PM_CONF0
def setPM_CONF0(self, val):
"""Set register PM_CONF0"""
self.write(REG.PM_CONF0, val, 8)
def getPM_CONF0(self):
"""Get register PM_CONF0"""
return self.read(REG.PM_CONF0, 8)
# Bits reserved_0
# Bits SET_SMPS_LVL
# SMPS output voltage:
# Bits reserved_1
# Bits SLEEP_MODE_SEL
# Register MC_STATE1
def setMC_STATE1(self, val):
"""Set register MC_STATE1"""
self.write(REG.MC_STATE1, val, 8)
def getMC_STATE1(self):
"""Get register MC_STATE1"""
return self.read(REG.MC_STATE1, 8)
# Bits reserved_0
# Bits RCO_CAL_OK
# RCO calibration successfully terminated.
# Bits ANT_SEL
# Currently selected antenna.
# Bits TX_FIFO_FULL
# Bits RX_FIFO_EMPTY
# Bits ERROR_LOCK
# Register MC_STATE0
def setMC_STATE0(self, val):
"""Set register MC_STATE0"""
self.write(REG.MC_STATE0, val, 8)
def getMC_STATE0(self):
"""Get register MC_STATE0"""
return self.read(REG.MC_STATE0, 8)
# Bits STATE
# Current state.
# Bits XO_ON
# Register TX_FIFO_STATUS
def setTX_FIFO_STATUS(self, val):
"""Set register TX_FIFO_STATUS"""
self.write(REG.TX_FIFO_STATUS, val, 8)
def getTX_FIFO_STATUS(self):
"""Get register TX_FIFO_STATUS"""
return self.read(REG.TX_FIFO_STATUS, 8)
# Bits NELEM_TXFIFO
# Number of elements in TX FIFO.
# Register RX_FIFO_STATUS
def setRX_FIFO_STATUS(self, val):
"""Set register RX_FIFO_STATUS"""
self.write(REG.RX_FIFO_STATUS, val, 8)
def getRX_FIFO_STATUS(self):
"""Get register RX_FIFO_STATUS"""
return self.read(REG.RX_FIFO_STATUS, 8)
# Bits NELEM_RXFIFO
# Number of elements in RX FIFO.
# Register RCO_CALIBR_OUT4
def setRCO_CALIBR_OUT4(self, val):
"""Set register RCO_CALIBR_OUT4"""
self.write(REG.RCO_CALIBR_OUT4, val, 8)
def getRCO_CALIBR_OUT4(self):
"""Get register RCO_CALIBR_OUT4"""
return self.read(REG.RCO_CALIBR_OUT4, 8)
# Bits RWT_OUT
# RWT word from internal RCO calibrator.
# Bits RFB_OUT
# RFB word (MSB) from internal RCO calibrator.
# Register RCO_CALIBR_OUT3
def setRCO_CALIBR_OUT3(self, val):
"""Set register RCO_CALIBR_OUT3"""
self.write(REG.RCO_CALIBR_OUT3, val, 8)
def getRCO_CALIBR_OUT3(self):
"""Get register RCO_CALIBR_OUT3"""
return self.read(REG.RCO_CALIBR_OUT3, 8)
# Bits RFB_OUT
# RF word (LSB) from internal RCO calibrator.
# Bits reserved_0
# Register VCO_CALIBR_OUT1
def setVCO_CALIBR_OUT1(self, val):
"""Set register VCO_CALIBR_OUT1"""
self.write(REG.VCO_CALIBR_OUT1, val, 8)
def getVCO_CALIBR_OUT1(self):
"""Get register VCO_CALIBR_OUT1"""
return self.read(REG.VCO_CALIBR_OUT1, 8)
# Bits reserved_0
# Bits VCO_CAL_AMP_OUT
# VCO magnitude calibration output word (binary coding internally converted from thermometric coding).
# Register VCO_CALIBROUT0
def setVCO_CALIBROUT0(self, val):
"""Set register VCO_CALIBROUT0"""
self.write(REG.VCO_CALIBROUT0, val, 8)
def getVCO_CALIBROUT0(self):
"""Get register VCO_CALIBROUT0"""
return self.read(REG.VCO_CALIBROUT0, 8)
# Bits reserved_0
# Bits VCO_CAL_FREQ_OUT
# VCO Cbank frequency calibration output word (binary coding internally converted from thermometric coding).
# Register TX_PCKT_INFO
def setTX_PCKT_INFO(self, val):
"""Set register TX_PCKT_INFO"""
self.write(REG.TX_PCKT_INFO, val, 8)
def getTX_PCKT_INFO(self):
"""Get register TX_PCKT_INFO"""
return self.read(REG.TX_PCKT_INFO, 8)
# Bits reserved_0
# Bits TX_SEQ_NUM
# Current TX packet sequence number.
# Bits N_RETX
# Number of re-transmissions done for the last TX packet.
# Register RX_PCKT_INFO
def setRX_PCKT_INFO(self, val):
"""Set register RX_PCKT_INFO"""
self.write(REG.RX_PCKT_INFO, val, 8)
def getRX_PCKT_INFO(self):
"""Get register RX_PCKT_INFO"""
return self.read(REG.RX_PCKT_INFO, 8)
# Bits reserved_0
# Bits NACK_RX
# NACK field of the received packet.
# Bits RX_SEQ_NUM
# Sequence number of the received packet.
# Register AFC_CORR
def setAFC_CORR(self, val):
"""Set register AFC_CORR"""
self.write(REG.AFC_CORR, val, 8)
def getAFC_CORR(self):
"""Get register AFC_CORR"""
return self.read(REG.AFC_CORR, 8)
# Bits AFC_CORR
# AFC corrected value.
# Register LINK_QUALIF2
def setLINK_QUALIF2(self, val):
"""Set register LINK_QUALIF2"""
self.write(REG.LINK_QUALIF2, val, 8)
def getLINK_QUALIF2(self):
"""Get register LINK_QUALIF2"""
return self.read(REG.LINK_QUALIF2, 8)
# Bits PQI
# PQI value of the received packet.
# Register LINK_QUALIF1
def setLINK_QUALIF1(self, val):
"""Set register LINK_QUALIF1"""
self.write(REG.LINK_QUALIF1, val, 8)
def getLINK_QUALIF1(self):
"""Get register LINK_QUALIF1"""
return self.read(REG.LINK_QUALIF1, 8)
# Bits CS
# Carrier Sense indication.
# Bits SQI
# SQI value of the received packet.
# Register RSSI_LEVEL
def setRSSI_LEVEL(self, val):
"""Set register RSSI_LEVEL"""
self.write(REG.RSSI_LEVEL, val, 8)
def getRSSI_LEVEL(self):
"""Get register RSSI_LEVEL"""
return self.read(REG.RSSI_LEVEL, 8)
# Bits RSSI_LEVEL
# RSSI level captured at the end of the SYNC word detection of the received packet.
# Register RX_PCKT_LEN1
# Length of the packet received.
def setRX_PCKT_LEN1(self, val):
"""Set register RX_PCKT_LEN1"""
self.write(REG.RX_PCKT_LEN1, val, 16)
def getRX_PCKT_LEN1(self):
"""Get register RX_PCKT_LEN1"""
return self.read(REG.RX_PCKT_LEN1, 16)
# Bits RX_PCKT_LEN1
# Register CRC_FIELD3
def setCRC_FIELD3(self, val):
"""Set register CRC_FIELD3"""
self.write(REG.CRC_FIELD3, val, 8)
def getCRC_FIELD3(self):
"""Get register CRC_FIELD3"""
return self.read(REG.CRC_FIELD3, 8)
# Bits CRC_FIELD3
# CRC field 3 of the received packet.
# Register CRC_FIELD2
def setCRC_FIELD2(self, val):
"""Set register CRC_FIELD2"""
self.write(REG.CRC_FIELD2, val, 8)
def getCRC_FIELD2(self):
"""Get register CRC_FIELD2"""
return self.read(REG.CRC_FIELD2, 8)
# Bits CRC_FIELD2
# CRC field 2 of the received packet.
# Register CRC_FIELD1
def setCRC_FIELD1(self, val):
"""Set register CRC_FIELD1"""
self.write(REG.CRC_FIELD1, val, 8)
def getCRC_FIELD1(self):
"""Get register CRC_FIELD1"""
return self.read(REG.CRC_FIELD1, 8)
# Bits CRC_FIELD1
# CRC field 1 of the received packet.
# Register CRC_FIELD0
def setCRC_FIELD0(self, val):
"""Set register CRC_FIELD0"""
self.write(REG.CRC_FIELD0, val, 8)
def getCRC_FIELD0(self):
"""Get register CRC_FIELD0"""
return self.read(REG.CRC_FIELD0, 8)
# Bits CRC_FIELD0
# CRC field 0 of the received packet.
# Register RX_ADDRE_FIELD1
def setRX_ADDRE_FIELD1(self, val):
"""Set register RX_ADDRE_FIELD1"""
self.write(REG.RX_ADDRE_FIELD1, val, 8)
def getRX_ADDRE_FIELD1(self):
"""Get register RX_ADDRE_FIELD1"""
return self.read(REG.RX_ADDRE_FIELD1, 8)
# Bits RX_ADDRE_FIELD1
# Source address field of the received packet.
# Register RX_ADDRE_FIELD0
def setRX_ADDRE_FIELD0(self, val):
"""Set register RX_ADDRE_FIELD0"""
self.write(REG.RX_ADDRE_FIELD0, val, 8)
def getRX_ADDRE_FIELD0(self):
"""Get register RX_ADDRE_FIELD0"""
return self.read(REG.RX_ADDRE_FIELD0, 8)
# Bits RX_ADDRE_FIELD0
# Destination address field of the received packet.
# Register RSSI_LEVEL_RUN
# RSSI level of the received packet, which supports continuous fast SPI reading.
def setRSSI_LEVEL_RUN(self, | |
<filename>instmakelib/instmake_log.py
# Copyright (c) 2010 by Cisco Systems, Inc.
"""
Variables and functions to aid in reading instmake logs.
"""
from __future__ import nested_scopes
import cPickle as pickle
import sys
import os
import gzip
import socket
from instmakelib import instmake_toolnames
from instmakelib import shellsyntax
from instmakelib import instmake_build
# This is imported for backwards-compatibility for
# using clearaudit data from LogRecord 5 - 11.
# With LogRecord_12, the appropriate audit plugin is loaded
# via the PluginManager.
from instmakeplugins import audit_clearaudit
VERSION_ROOT = "INSTMAKE LOG VERSION "
INSTMAKE_VERSION_1 = VERSION_ROOT + "1"
INSTMAKE_VERSION_2 = VERSION_ROOT + "2"
INSTMAKE_VERSION_3 = VERSION_ROOT + "3"
INSTMAKE_VERSION_4 = VERSION_ROOT + "4"
INSTMAKE_VERSION_5 = VERSION_ROOT + "5"
INSTMAKE_VERSION_6 = VERSION_ROOT + "6"
INSTMAKE_VERSION_7 = VERSION_ROOT + "7"
INSTMAKE_VERSION_8 = VERSION_ROOT + "8"
INSTMAKE_VERSION_9 = VERSION_ROOT + "9"
INSTMAKE_VERSION_10 = VERSION_ROOT + "10"
INSTMAKE_VERSION_11 = VERSION_ROOT + "11"
INSTMAKE_VERSION_12 = VERSION_ROOT + "12"
INSTMAKE_VERSION_13 = VERSION_ROOT + "13"
INSTMAKE_VERSION_14 = VERSION_ROOT + "14"
INSTMAKE_VERSION_15 = VERSION_ROOT + "15"
LATEST_VERSION = INSTMAKE_VERSION_15
ORIGIN_NOT_RECORDED = "not-recorded"
CLI_PLUGIN_PREFIX = "cli"
# These are the plugins needed during reporting
PLUGIN_PREFIXES = [ instmake_toolnames.TOOLNAME_PLUGIN_PREFIX,
CLI_PLUGIN_PREFIX, instmake_build.AUDIT_PLUGIN_PREFIX ]
# This points to a single ToolNameManager object
toolname_manager = None
# This points to a single PluginManager object
global_plugins = None
# This points to a single Printer plugin
global_printer = None
def SetPlugins(plugins):
"""Allow another module to set our 'global_plugins' variable."""
global global_plugins
global_plugins = plugins
global toolname_manager
toolname_manager = instmake_toolnames.ToolNameManager(global_plugins)
def GetPlugins():
"""Returns the 'global_plugins' variable."""
return global_plugins
def SetPrinterPlugin(printer):
"""Allow another module to set our 'global_printer' variable."""
global global_printer
global_printer = printer
def WriteLatestHeader(fd, log_file_name,
audit_plugin_name, audit_env_options, audit_cli_options):
"""Write a header to the log file. We put the version string
first so that if someone uses "more" to view a log file,
they easily know what it is. This function will call sys.exit()
on failure."""
# 0 = dump as ASCII
header_text = pickle.dumps(LATEST_VERSION, 0)
header = LogHeader1(audit_plugin_name, audit_env_options, audit_cli_options)
header_text += pickle.dumps(header)
try:
num_written = os.write(fd, header_text)
except OSError, err:
sys.exit("Failed to write to %s: %s" % (log_file_name, err))
if num_written != len(header_text):
sys.exit("Failed to write to %s: %s" % (log_file_name, err))
class LogRecord:
ppid = None # Parent Process ID
pid = None # Process ID
cwd = None # Current working directory
retval = None # Return value of process
times_start = None # Start TIMES
times_end = None # End TIMES
diff_times = None # (End - Start) TIMES
cmdline = None # Command-line as a string
cmdline_args = None # Command-line as individual arguments
make_target = None # Make target, if using jmake --debug=e
makefile_filename = None # Makefile of rule, if using jmake --debug=e
makefile_lineno = None # Line number of rule, if using jmake --debug=e
tool = None # The tool mentioned in the command-line,
# calculated via ToolName plugins.
input_files = None # Input files, if an appropriate audit
# plugin was used.
output_files = None # Output files, if an appropriate audit
# plugin was used.
execed_files = None # Executed files, if an appropriate audit
# plugin was used.
audit_ok = None # True/False: did the audit plugin succeed in
# auditing this command?
env_vars = None # Recorded environment-variables hash table.
open_fds = None # List of open file descriptors before the
# command started.
make_vars = None # Recorded make-variables hash table.
make_var_origins = None # Origins of make vars
app_inst = None # Application-specific instrumentation fields
# For instmake logs prior to 14, or for
# corrupt or missing app-inst data, this is
# None. Otherwise, it's a dictionary, which
# could be empty, if the app wrote an
# empty json dictionary.
USER_TIME = None
SYS_TIME = None
REAL_TIME = None
CPU_TIME = None
# Does this version of the instmake log have a file header (log header)?
HAS_LOG_HEADER = False
# Does this version of the instmake log allow variable types of
# audit plugins, not just clearaudit?
HAS_VARIABLE_AUDIT_PLUGINS = False
# Does this version of the instmake log have record classes that
# use the LogHeader in their init() ?
NEEDS_LOG_HEADER_IN_RECORD_INIT = False
# Does the "real time" indicate clock time? Prior to version 15
# of the log file, it did not, as the OS could use any arbitrary
# point in time as the epoch. In version 15 of the log, we use
# a system call that uses the traditional Jan 1, 1970 as the epoch.
REAL_TIME_IS_CLOCK_TIME = False
def TimeIndex(self, field):
"""Return the index used in the time array for a specified time,
be it: "USER", "SYS", "REAL", or "CPU"."""
if field == "USER":
return self.USER_TIME
elif field == "SYS":
return self.SYS_TIME
elif field == "REAL":
return self.REAL_TIME
elif field == "CPU":
return self.CPU_TIME
else:
sys.exit("TimeIndex field '%s' not recognized." % (field,))
def RealStartTime(self):
"""Return the real start time; this is useful for sorting records"""
return self.times_end[self.REAL_TIME]
def NormalizePath(self, path):
"""Given a path, if it's relative, join it with this record's CWD.
Normalize the result, even if the path is absolute and is not joined
with the CWD. Return the result. This method does not modify the
record's data, even though you might think it does by its name."""
return normalize_path(path, self.cwd)
class LogRecord_1(LogRecord):
PARENT_PID = 0
PID = 1
CWD = 2
RETVAL = 3
START_TIMES = 4
END_TIMES = 5
DIFF_TIMES = 6
ARGS = 7
# The fields in the "times" arrays.
SELF_USER_TIME = 0
SELF_SYS_TIME = 1
CHILD_USER_TIME = 2 # Normally, you use this instaed of SELF_USER_TIME
CHILD_SYS_TIME = 3 # Normally, you use this instead of SELF_SYS_TIME
ELAPSED_REAL_TIME = 4
CHILD_CPU_TIME = 5
# Short-hand time-tuple indices
USER_TIME = CHILD_USER_TIME
SYS_TIME = CHILD_SYS_TIME
REAL_TIME = ELAPSED_REAL_TIME
CPU_TIME = CHILD_CPU_TIME
def __init__(self, array):
self.ppid = array[self.PARENT_PID]
self.pid = array[self.PID]
self.cwd = array[self.CWD]
self.retval = array[self.RETVAL]
self.times_start = array[self.START_TIMES]
self.times_end = array[self.END_TIMES]
self.ConvertArgsToCmdline(array[self.ARGS])
# Calculate CPU_TIME for diff_times
if self.DIFF_TIMES != None:
self.diff_times = array[self.DIFF_TIMES]
self.diff_times += (self.diff_times[self.USER_TIME] + self.diff_times[self.SYS_TIME],)
def ConvertArgsToCmdline(self, args):
"""Converts an array of command-line arguments to a single
command-line string. Instmake is called 4 ways:
1. Top-level; args are stored in an array
2. -c, from make; the command-line is a single string.
3 . Directly from make if make knows that the argument to $(SHELL) is
a shell script (i.e., no -c). Args are stored in an array.
4. -r, from jmake, to store special records. args can be stored
as an array.
"""
# Convert array of arguments to a single string.
self.cmdline = ' '.join(args)
# Then split it on whitespace, but honor quotes.
self.cmdline_args = shellsyntax.split_shell_cmdline(self.cmdline, 1)
def CalculateDiffTimes(self):
"""In case the caller modifies start/end times and needs to re-calculate
diff times."""
# Compute the time difference.
self.diff_times = [self.times_end[0] - self.times_start[0], # SELF USER
self.times_end[1] - self.times_start[1], # SELF SYS
self.times_end[2] - self.times_start[2], # CHILD USER
self.times_end[3] - self.times_start[3], # CHILD SYS
self.times_end[4] - self.times_start[4], # REAL
-1] # CPU
# Compute CPU time.
self.diff_times[self.CPU_TIME] = self.diff_times[self.USER_TIME] + \
self.diff_times[self.SYS_TIME]
def SetTimesEnd(self, user, sys, real):
"""Set the 'times_end' attribute using the 3 new values."""
self.times_end = (0, 0, user, sys, real)
class LogRecord_2(LogRecord_1):
"""Fields that can record jmake-exported environment variables were
were added."""
MAKE_TARGET = 8
MAKEFILE_FILENAME = 9
MAKEFILE_LINENO = 10
def __init__(self, array):
LogRecord_1.__init__(self, array)
self.make_target = array[self.MAKE_TARGET]
self.makefile_filename = array[self.MAKEFILE_FILENAME]
self.makefile_lineno = array[self.MAKEFILE_LINENO]
class LogRecord_3(LogRecord_2):
"""The TOOL field was added."""
# TOOL = 11 # No longer needed, as TOOL is dynamically generated,
# even when reading a VERSION 3 log file; the new dynamically-generated
# TOOL will override the TOOL field that was stored in the log.
def __init__(self, array):
LogRecord_2.__init__(self, array)
self.tool = toolname_manager.GetTool(self.cmdline_args, self.cwd)
class LogRecord_4(LogRecord_2):
"""TOOL is no longer computed during the running of the build. Rather,
it is computed during the reading of the log file. This allows
for interesting ways of computing TOOL, esp. in the cases of
interpreted languages in which you really want to know the name
of the script and not the name of the interpretor."""
def __init__(self, array):
# Yes, we're a sub-class of version 2, not version | |
with data types which are not safe to be stored in text form
(i.e. BLOB) are converted to Base64, hence the size of such columns
cannot exceed approximately 0.74 * max_allowed_packet bytes, as
configured through that system variable at the target server.
Details
This operation writes table data dump to the specified by the user files.
Requires an open, global Shell session, and uses its connection options,
such as compression, ssl-mode, etc., to establish additional connections.
Options
The dialect option predefines the set of options fieldsTerminatedBy (FT),
fieldsEnclosedBy (FE), fieldsOptionallyEnclosed (FOE), fieldsEscapedBy
(FESC) and linesTerminatedBy (LT) in the following manner:
- default: no quoting, tab-separated, LF line endings. (LT=<LF>,
FESC='\', FT=<TAB>, FE=<empty>, FOE=false)
- csv: optionally quoted, comma-separated, CRLF line endings.
(LT=<CR><LF>, FESC='\', FT=",", FE='"', FOE=true)
- tsv: optionally quoted, tab-separated, CRLF line endings. (LT=<CR><LF>,
FESC='\', FT=<TAB>, FE='"', FOE=true)
- csv-unix: fully quoted, comma-separated, LF line endings. (LT=<LF>,
FESC='\', FT=",", FE='"', FOE=false)
The maxRate option supports unit suffixes:
- k - for kilobytes,
- M - for Megabytes,
- G - for Gigabytes,
i.e. maxRate="2k" - limit throughput to 2000 bytes per second.
Dumping to a Bucket in the OCI Object Storage
If the osBucketName option is used, the dump is stored in the specified
OCI bucket, connection is established using the local OCI profile. The
directory structure is simulated within the object name.
The osNamespace, ociConfigFile and ociProfile options cannot be used if
the osBucketName option is set to an empty string.
The osNamespace option overrides the OCI namespace obtained based on the
tenancy ID from the local OCI profile.
EXCEPTIONS
ArgumentError in the following scenarios:
- If any of the input arguments contains an invalid value.
RuntimeError in the following scenarios:
- If there is no open global session.
- If creating or writing to the output file fails.
#@<OUT> util import_json help
NAME
import_json - Import JSON documents from file to collection or table in
MySQL Server using X Protocol session.
SYNTAX
util.import_json(file[, options])
WHERE
file: Path to JSON documents file
options: Dictionary with import options
DESCRIPTION
This function reads standard JSON documents from a file, however, it also
supports converting BSON Data Types represented using the MongoDB
Extended Json (strict mode) into MySQL values.
The options dictionary supports the following options:
- schema: string - name of target schema.
- collection: string - name of collection where the data will be
imported.
- table: string - name of table where the data will be imported.
- tableColumn: string (default: "doc") - name of column in target table
where the imported JSON documents will be stored.
- convertBsonTypes: bool (default: false) - enables the BSON data type
conversion.
- convertBsonOid: bool (default: the value of convertBsonTypes) - enables
conversion of the BSON ObjectId values.
- extractOidTime: string (default: empty) - creates a new field based on
the ObjectID timestamp. Only valid if convertBsonOid is enabled.
The following options are valid only when convertBsonTypes is enabled.
They are all boolean flags. ignoreRegexOptions is enabled by default,
rest are disabled by default.
- ignoreDate: disables conversion of BSON Date values
- ignoreTimestamp: disables conversion of BSON Timestamp values
- ignoreRegex: disables conversion of BSON Regex values.
- ignoreBinary: disables conversion of BSON BinData values.
- decimalAsDouble: causes BSON Decimal values to be imported as double
values.
- ignoreRegexOptions: causes regex options to be ignored when processing
a Regex BSON value. This option is only valid if ignoreRegex is
disabled.
If the schema is not provided, an active schema on the global session, if
set, will be used.
The collection and the table options cannot be combined. If they are not
provided, the basename of the file without extension will be used as
target collection name.
If the target collection or table does not exist, they are created,
otherwise the data is inserted into the existing collection or table.
The tableColumn implies the use of the table option and cannot be
combined with the collection option.
BSON Data Type Processing.
If only convertBsonOid is enabled, no conversion will be done on the rest
of the BSON Data Types.
To use extractOidTime, it should be set to a name which will be used to
insert an additional field into the main document. The value of the new
field will be the timestamp obtained from the ObjectID value. Note that
this will be done only for an ObjectID value associated to the '_id'
field of the main document.
NumberLong and NumberInt values will be converted to integer values.
NumberDecimal values are imported as strings, unless decimalAsDouble is
enabled.
Regex values will be converted to strings containing the regular
expression. The regular expression options are ignored unless
ignoreRegexOptions is disabled. When ignoreRegexOptions is disabled the
regular expression will be converted to the form: /<regex>/<options>.
EXCEPTIONS
Throws ArgumentError when:
- Option name is invalid
- Required options are not set and cannot be deduced
- Shell is not connected to MySQL Server using X Protocol
- Schema is not provided and there is no active schema on the global
session
- Both collection and table are specified
Throws LogicError when:
- Path to JSON document does not exists or is not a file
Throws RuntimeError when:
- The schema does not exists
- MySQL Server returns an error
Throws InvalidJson when:
- JSON document is ill-formed
#@<OUT> util import_table help
NAME
import_table - Import table dump stored in files to target table using
LOAD DATA LOCAL INFILE calls in parallel connections.
SYNTAX
util.import_table(files[, options])
WHERE
files: Path or list of paths to files with user data. Path name can
contain a glob pattern with wildcard '*' and/or '?'. All selected
files must be chunks of the same target table.
options: Dictionary with import options
DESCRIPTION
The scheme part of a filename contains infomation about the transport
backend. Supported transport backends are: file://, http://, https://. If
the scheme part of a filename is omitted, then file:// transport backend
will be chosen.
Supported filename formats:
- /path/to/file - Path to a locally or remotely (e.g. in OCI Object
Storage) accessible file or directory
- file:///path/to/file - Path to a locally accessible file or directory
- http[s]://host.domain[:port]/path/to/file - Location of a remote file
accessible through HTTP(s) (import_table() only)
If the osBucketName option is given, the path argument must specify a
plain path in that OCI (Oracle Cloud Infrastructure) Object Storage
bucket.
The OCI configuration profile is located through the oci.profile and
oci.configFile global shell options and can be overridden with ociProfile
and ociConfigFile, respectively.
Options dictionary:
- schema: string (default: current shell active schema) - Name of target
schema
- table: string (default: filename without extension) - Name of target
table
- columns: array of strings and/or integers (default: empty array) - This
option takes an array of column names as its value. The order of the
column names indicates how to match data file columns with table
columns. Use non-negative integer `i` to capture column value into user
variable @i. With user variables, the decodeColumns option enables you
to perform preprocessing transformations on their values before
assigning the result to columns.
- fieldsTerminatedBy: string (default: "\t") - This option has the same
meaning as the corresponding clause for LOAD DATA INFILE.
- fieldsEnclosedBy: char (default: '') - This option has the same meaning
as the corresponding clause for LOAD DATA INFILE.
- fieldsEscapedBy: char (default: '\') - This option has the same meaning
as the corresponding clause for LOAD DATA INFILE.
- fieldsOptionallyEnclosed: bool (default: false) - Set to true if the
input values are not necessarily enclosed within quotation marks
specified by fieldsEnclosedBy option. Set to false if all fields are
quoted by character specified by fieldsEnclosedBy option.
- linesTerminatedBy: string (default: "\n") - This option has the same
meaning as the corresponding clause for LOAD DATA | |
# loop until the entire file is read
while not end_of_file:
# read up to the start of a record by finding the sync marker
end_of_file = self.find_record_start()
if end_of_file:
# make sure we break out of this loop if there are no more bytes in the file
continue
# now that the sync marker has been found, get the record type which follows
record_type = self._file_handle.read(1)
if record_type in RECORD_SIZE_DICT.keys():
# this record type does not contain the record size, get it from the dictionary
record_size_bytes = RECORD_SIZE_DICT.get(record_type)
full_record = vel3d_velpt_common.SYNC_MARKER + record_type
else:
# this record type does contain the record size, read it from the file
record_size_words = self._file_handle.read(2)
# unpack and convert from words to bytes
record_size_bytes = struct.unpack('<H', record_size_words)[0] * 2
full_record = vel3d_velpt_common.SYNC_MARKER + record_type + record_size_words
# based on the obtained record size, read the rest of the record
remain_bytes = record_size_bytes - len(full_record)
remain_record = self._file_handle.read(remain_bytes)
# store the full record
full_record += remain_record
if len(remain_record) < remain_bytes:
# if we did not read as many bytes as were requested, we ran into the end of the file
msg = 'Incomplete record 0x%s' % binascii.hexlify(full_record)
log.warning(msg)
self._exception_callback(SampleException(msg))
end_of_file = True
continue
# compare checksums
if not vel3d_velpt_common.match_checksum(full_record):
# checksums did not match, do not process this record further
msg = 'Checksums do not match for record type 0x%s' % binascii.hexlify(record_type)
log.warn(msg)
self._exception_callback(SampleException(msg))
continue
# process record based on the type
self.process_records(record_type, full_record)
if self.stored_velocity_records:
# If stored velocity records are present here, we only got a partial set at the end of the file
# without a terminating system record. Use the previous number of samples.
if self.stored_n_velocity_records != 0:
time_offset = 1.0/float(self.stored_n_velocity_records)
self.extract_velocities(time_offset)
else:
msg = 'Unable to calculating timestamp for last set of velocity records'
log.warn(msg)
self._exception_callback(SampleException(msg))
def find_record_start(self):
"""
Find the start of the next record by looking for the sync marker
:return: True if the end of the file was found, False if it was not
"""
end_of_file = False
read_buffer = ''
# read one byte at a time until the sync marker is found
one_byte = self._file_handle.read(1)
while one_byte != vel3d_velpt_common.SYNC_MARKER:
# store anything we find before the sync marker in the read buffer
read_buffer += one_byte
one_byte = self._file_handle.read(1)
if one_byte == '':
# no more bytes to read, break out of this loop
end_of_file = True
break
if len(read_buffer) > 1 and not DATE_TIME_MATCHER.match(read_buffer):
# we expect a version of the file to have ascii date time strings prior to each record, if this
# is something other than that call the exception
msg = 'Found unexpected data 0x%s' % binascii.hexlify(read_buffer)
log.warning(msg)
self._exception_callback(UnexpectedDataException(msg))
return end_of_file
def process_records(self, record_type, full_record):
"""
based on the record type process the data, if the record type is not mentioned here it is ignored
:param record_type: the record type associated with this record
:param full_record: the full data string associated with this record
"""
if record_type == vel3d_velpt_common.USER_CONFIGURATION_ID:
self.process_user_config(full_record)
elif record_type == vel3d_velpt_common.HARDWARE_CONFIGURATION_ID:
self.process_hardware_config(full_record)
elif record_type == vel3d_velpt_common.HEAD_CONFIGURATION_ID:
self.process_head_config(full_record)
elif record_type == VELOCITY_ID:
# append velocity record to buffer, these are collected until the timestamp can be calculated
self.stored_velocity_records.append(full_record)
elif record_type == SYSTEM_ID:
self.process_system(full_record)
elif record_type == HEADER_DATA_ID:
self.process_header_data(full_record)
def process_user_config(self, full_record):
"""
Extract the user config particle, and set the first timestamp if it has not been set yet
:param full_record: The raw data string of the user config particle
"""
# get the timestamp for this particle
timestamp = vel3d_velpt_common.get_timestamp(full_record, start_byte=48)
timestamp = self.adjust_timestamp(timestamp)
# if the first timestamp has not been set, set it here
if self.first_timestamp is None:
self.first_timestamp = timestamp
# check if head or hardware messages have been received and not sent yet
self.extract_h_config()
self.simple_extract(self.user_config_class, full_record, timestamp)
def process_hardware_config(self, full_record):
"""
If the first timestamp has been set, use this as the timestamp of this particle and extract it,
otherwise store it until the first timestamp has been set
:param full_record: The raw data string to pass into the hardware configuration particle
"""
# first_timestamp is used as the timestamp of this particle, if it is not set yet wait until it is
if self.first_timestamp:
self.simple_extract(self.hardware_config_class, full_record, self.first_timestamp)
else:
self.stored_hardware_config = full_record
def process_head_config(self, full_record):
"""
If the first timestamp has been set, use this as the timestamp of this particle and extract it,
otherwise store it until the first timestamp has been set
:param full_record: The raw data string to pass into the head configuration particle
"""
# first_timestamp is used as the timestamp of this particle, if it is not set yet wait until it is
if self.first_timestamp:
self.simple_extract(self.head_config_class, full_record, self.first_timestamp)
else:
self.stored_head_config = full_record
def process_system(self, full_record):
"""
Extract a system record, and if there is a pair of system records with velocities in between determine
the time offset between velocity timestamps and extract the velocity records. Also if the first timestamp
has not been set yet, set it
:param full_record: The raw data string to pass into the system particle
"""
if self.previous_system_timestamp is not None and self.stored_velocity_records != []:
# there has been a pair of system records and with velocity records in between
n_vel_records = len(self.stored_velocity_records)
time_offset = 1.0/float(n_vel_records)
# calculate the timestamps and extract velocity records
self.extract_velocities(time_offset)
self.stored_n_velocity_records = n_vel_records
# get the timestamp associated with this system record
timestamp = vel3d_velpt_common.get_timestamp(full_record)
timestamp = self.adjust_timestamp(timestamp)
# extract the system record
self.simple_extract(self.system_class, full_record, timestamp)
self.previous_system_timestamp = float(timestamp)
if self.first_timestamp is None:
self.first_timestamp = timestamp
# check if head or hardware messages have been received and not sent yet
self.extract_h_config()
def extract_velocities(self, time_offset):
"""
loop calculating timestamp and extracting stored velocity records
:param time_offset: The time offset (in seconds) between velocity records to use in calculating the timestamp
"""
for i in range(0, len(self.stored_velocity_records)):
timestamp = self.previous_system_timestamp + (i * time_offset)
self.simple_extract(self.velocity_class, self.stored_velocity_records[i], timestamp)
# now that they have been extracted, clear the velocity record buffer
self.stored_velocity_records = []
def extract_h_config(self):
"""
If hardware config or head config messages have been received and not extracted yet, extract them here
"""
if self.stored_hardware_config:
self.simple_extract(self.hardware_config_class, self.stored_hardware_config, self.first_timestamp)
self.stored_hardware_config = None
if self.stored_head_config:
self.simple_extract(self.head_config_class, self.stored_head_config, self.first_timestamp)
self.stored_head_config = None
def process_header_data(self, full_record):
"""
Extract the header data particle, and set the first timestamp if it has not been set
:param full_record: The raw data string to pass into the header data particle
"""
# get the timestamp for this particle
timestamp = vel3d_velpt_common.get_timestamp(full_record)
timestamp = self.adjust_timestamp(timestamp)
# check if the first timestamp has been set, if not set it
if self.first_timestamp is None:
self.first_timestamp = timestamp
# check if head or hardware messages have been received and not sent yet
self.extract_h_config()
# extract the data header particle
self.simple_extract(self.data_header_class, full_record, timestamp)
def simple_extract(self, class_type, data, timestamp):
"""
Extract the particle and appending it to the record buffer
:param class_type: The class of the particle to extract
:param data: The raw data to pass into the particle
:param timestamp: The timestamp to pass into the particle
"""
particle = self._extract_sample(class_type, None, data, internal_timestamp=timestamp)
self._record_buffer.append(particle)
def adjust_timestamp(self, timestamp):
"""
The instrument runs every half hour. It is powered down between runs so its internal clock is reset
to 19700101 before every run. All timestamps in the file are therefor incorrect. To correct the sample
times, extract the DCL logging start time from filename that it generates and round to nearest 30 minutes
to approximate when the instrument started up. Subtract the first timestamp reported in the file from
this value to get the adjustment offset to be used to correct all the other timestamps in the file.
:param timestamp: The raw (incorrect) NTP timestamp from a record in file
:return: The corrected NTP timestamp
"""
if self.instrument_timestamp_adjustment is None:
# This is the first timestamp in the file which was generated shortly after the | |
to a the right space before
using the :class:`Averager` object, and also automates the
boundary recovery process. If no boundary method is specified,
this simply performs the action of the :class: `Averager`.
:arg v_in: the :class:`ufl.Expr` or
:class:`.Function` to project. (e.g. a VDG0 function)
:arg v_out: :class:`.Function` to put the result in. (e.g. a CG1 function)
:arg VDG: optional :class:`.FunctionSpace`. If not None, v_in is interpolated
to this space first before recovery happens.
:arg boundary_method: an Enum object, .
"""
def __init__(self, v_in, v_out, VDG=None, boundary_method=None):
# check if v_in is valid
if isinstance(v_in, expression.Expression) or not isinstance(v_in, (ufl.core.expr.Expr, function.Function)):
raise ValueError("Can only recover UFL expression or Functions not '%s'" % type(v_in))
self.v_in = v_in
self.v_out = v_out
self.V = v_out.function_space()
if VDG is not None:
self.v = Function(VDG)
self.interpolator = Interpolator(v_in, self.v)
else:
self.v = v_in
self.interpolator = None
self.VDG = VDG
self.boundary_method = boundary_method
self.averager = Averager(self.v, self.v_out)
# check boundary method options are valid
if boundary_method is not None:
if boundary_method != Boundary_Method.dynamics and boundary_method != Boundary_Method.physics:
raise ValueError("Boundary method must be a Boundary_Method Enum object.")
if VDG is None:
raise ValueError("If boundary_method is specified, VDG also needs specifying.")
# now specify things that we'll need if we are doing boundary recovery
if boundary_method == Boundary_Method.physics:
# check dimensions
if self.V.value_size != 1:
raise ValueError('This method only works for scalar functions.')
self.boundary_recoverer = Boundary_Recoverer(self.v_out, self.v, method=Boundary_Method.physics)
else:
mesh = self.V.mesh()
# this ensures we get the pure function space, not an indexed function space
V0 = FunctionSpace(mesh, self.v_in.function_space().ufl_element())
VCG1 = FunctionSpace(mesh, "CG", 1)
if V0.extruded:
cell = mesh._base_mesh.ufl_cell().cellname()
DG1_hori_elt = FiniteElement("DG", cell, 1, variant="equispaced")
DG1_vert_elt = FiniteElement("DG", interval, 1, variant="equispaced")
DG1_element = TensorProductElement(DG1_hori_elt, DG1_vert_elt)
else:
cell = mesh.ufl_cell().cellname()
DG1_element = FiniteElement("DG", cell, 1, variant="equispaced")
VDG1 = FunctionSpace(mesh, DG1_element)
if self.V.value_size == 1:
coords_to_adjust = find_coords_to_adjust(V0, VDG1)
self.boundary_recoverer = Boundary_Recoverer(self.v_out, self.v,
coords_to_adjust=coords_to_adjust,
method=Boundary_Method.dynamics)
else:
VuDG1 = VectorFunctionSpace(mesh, DG1_element)
coords_to_adjust = find_coords_to_adjust(V0, VuDG1)
# now, break the problem down into components
v_scalars = []
v_out_scalars = []
self.boundary_recoverers = []
self.project_to_scalars_CG = []
self.extra_averagers = []
coords_to_adjust_list = []
for i in range(self.V.value_size):
v_scalars.append(Function(VDG1))
v_out_scalars.append(Function(VCG1))
coords_to_adjust_list.append(Function(VDG1).project(coords_to_adjust[i]))
self.project_to_scalars_CG.append(Projector(self.v_out[i], v_out_scalars[i]))
self.boundary_recoverers.append(Boundary_Recoverer(v_out_scalars[i], v_scalars[i],
method=Boundary_Method.dynamics,
coords_to_adjust=coords_to_adjust_list[i]))
# need an extra averager that works on the scalar fields rather than the vector one
self.extra_averagers.append(Averager(v_scalars[i], v_out_scalars[i]))
# the boundary recoverer needs to be done on a scalar fields
# so need to extract component and restore it after the boundary recovery is done
self.interpolate_to_vector = Interpolator(as_vector(v_out_scalars), self.v_out)
def project(self):
"""
Perform the fully specified recovery.
"""
if self.interpolator is not None:
self.interpolator.interpolate()
self.averager.project()
if self.boundary_method is not None:
if self.V.value_size > 1:
for i in range(self.V.value_size):
self.project_to_scalars_CG[i].project()
self.boundary_recoverers[i].apply()
self.extra_averagers[i].project()
self.interpolate_to_vector.interpolate()
else:
self.boundary_recoverer.apply()
self.averager.project()
return self.v_out
def find_coords_to_adjust(V0, DG1):
"""
This function finds the coordinates that need to be adjusted
for the recovery at the boundary. These are assigned by a 1,
while all coordinates to be left unchanged are assigned a 0.
This field is returned as a DG1 field.
Fields can be scalar or vector.
:arg V0: the space of the original field (before recovery).
:arg DG1: a DG1 space, in which the boundary recovery will happen.
"""
# check that spaces are correct
mesh = DG1.mesh()
if DG1.extruded:
cell = mesh._base_mesh.ufl_cell().cellname()
DG1_hori_elt = FiniteElement("DG", cell, 1, variant="equispaced")
DG1_vert_elt = FiniteElement("DG", interval, 1, variant="equispaced")
DG1_element = TensorProductElement(DG1_hori_elt, DG1_vert_elt)
else:
cell = mesh.ufl_cell().cellname()
DG1_element = FiniteElement("DG", cell, 1, variant="equispaced")
scalar_DG1 = FunctionSpace(mesh, DG1_element)
vector_DG1 = VectorFunctionSpace(mesh, DG1_element)
# check DG1 field is correct
if type(DG1.ufl_element()) == VectorElement:
if DG1 != vector_DG1:
raise ValueError('The function space entered as vector DG1 is not vector DG1.')
elif DG1 != scalar_DG1:
raise ValueError('The function space entered as DG1 is not DG1.')
# STRATEGY
# We need to pass the boundary recoverer a field denoting the location
# of nodes on the boundary, which denotes the coordinates to adjust to be new effective
# coords. This field will be 1 for these coords and 0 otherwise.
# How do we do this?
# 1. Obtain a DG1 field which is 1 at all exterior DOFs by applying Dirichlet
# boundary conditions. i.e. for cells in the bottom right corner of a domain:
# ------- 0 ------- 0 ------- 1
# | | ||
# | | ||
# | | ||
# ======= 1 ======= 1 ======= 1
# 2. Obtain a field in DG1 that is 1 at exterior DOFs adjacent to the exterior
# DOFs of V0 (i.e. the original space). For V0=DG0 there will be no exterior
# DOFs, but could be if velocity is in RT or if there is a temperature space.
# This is done by applying topological boundary conditions to a field in V0,
# before interpolating these into DG1.
# For instance, marking V0 DOFs with x, for rho and theta spaces this would give
# ------- 0 ------- 0 ------- 0 ---x--- 0 ---x--- 0 ---x--- 0
# | | || | | ||
# x | x | x || | | ||
# | | || | | ||
# ======= 0 ======= 0 ======= 0 ===x=== 1 ===x=== 1 ===x=== 1
# 3. Obtain a field that is 1 at corners in 2D or along edges in 3D.
# We do this by using that corners in 2D and edges in 3D are intersections
# of edges/faces respectively. In 2D, this means that if a field which is 1 on a
# horizontal edge is summed with a field that is 1 on a vertical edge, the
# corner value will be 2. Subtracting the exterior DG1 field from step 1 leaves
# a field that is 1 in the corner. This is generalised to 3D.
# ------- 0 ------- 0 ------- 0 ------- 1 ------- 0 ------- 1 ------- 0 ------- 0
# | || | || | || | ||
# | || + | || - | || = | ||
# | || | || | || | ||
# ======= 1 ======= 1 ======= 0 ======= 1 ======= 1 ======= 1 ======= 0 ======= 1
# 4. The field of coords to be adjusted is then found by the following formula:
# f1 + f3 - f2
# where f1, f2 and f3 are the DG1 fields obtained from steps 1, 2 and 3.
# make DG1 field with 1 at all exterior coords
all_ext_in_DG1 = Function(DG1)
bcs = [DirichletBC(DG1, Constant(1.0), "on_boundary", method="geometric")]
if DG1.extruded:
bcs.append(DirichletBC(DG1, Constant(1.0), "top", method="geometric"))
bcs.append(DirichletBC(DG1, Constant(1.0), "bottom", method="geometric"))
for bc in bcs:
bc.apply(all_ext_in_DG1)
# make DG1 field with 1 at coords surrounding exterior coords of V0
# first do topological BCs to get V0 function which is 1 at DOFs on edges
all_ext_in_V0 = Function(V0)
bcs = [DirichletBC(V0, Constant(1.0), "on_boundary", method="topological")]
if V0.extruded:
bcs.append(DirichletBC(V0, Constant(1.0), "top", method="topological"))
bcs.append(DirichletBC(V0, Constant(1.0), "bottom", method="topological"))
for bc in bcs:
bc.apply(all_ext_in_V0)
if DG1.value_size > 1:
# for vector valued functions, DOFs aren't pointwise evaluation. We break into components and use a conditional interpolation to get values of 1
V0_ext_in_DG1_components = []
for i in range(DG1.value_size):
V0_ext_in_DG1_components.append(Function(scalar_DG1).interpolate(conditional(abs(all_ext_in_V0[i]) > 0.0, 1.0, 0.0)))
V0_ext_in_DG1 = Function(DG1).project(as_vector(V0_ext_in_DG1_components))
else:
# for scalar functions (where DOFs are pointwise evaluation) we can simply interpolate to get these values
V0_ext_in_DG1 = Function(DG1).interpolate(all_ext_in_V0)
corners_in_DG1 = Function(DG1)
if DG1.mesh().topological_dimension() == 2:
if DG1.extruded:
DG1_ext_hori = Function(DG1)
DG1_ext_vert = Function(DG1)
hori_bcs = [DirichletBC(DG1, Constant(1.0), "top", method="geometric"),
DirichletBC(DG1, Constant(1.0), "bottom", method="geometric")]
vert_bc = DirichletBC(DG1, Constant(1.0), "on_boundary", method="geometric")
for bc in hori_bcs:
bc.apply(DG1_ext_hori)
vert_bc.apply(DG1_ext_vert)
corners_in_DG1.assign(DG1_ext_hori + DG1_ext_vert - all_ext_in_DG1)
else:
# we don't know whether its periodic or in how many directions
DG1_ext_x = Function(DG1)
DG1_ext_y = Function(DG1)
x_bcs = [DirichletBC(DG1, Constant(1.0), 1, method="geometric"),
DirichletBC(DG1, Constant(1.0), 2, method="geometric")]
y_bcs = [DirichletBC(DG1, Constant(1.0), 3, method="geometric"),
DirichletBC(DG1, Constant(1.0), 4, method="geometric")]
# there is no easy way to know if | |
# Copyright (c) 2013 Rackspace, Inc.
# Copyright (c) 2015 Catalyst IT Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import re
import uuid
from oslo_utils import timeutils
from urllib import parse as urllib_parse
from zaqar.common import consts
from zaqar.conf import transport
from zaqar.i18n import _
MIN_MESSAGE_TTL = 60
MIN_CLAIM_TTL = 60
MIN_CLAIM_GRACE = 60
MIN_DELAY_TTL = 0
MIN_SUBSCRIPTION_TTL = 60
_PURGBLE_RESOURCE_TYPES = {'messages', 'subscriptions'}
# NOTE(kgriffs): Don't use \w because it isn't guaranteed to match
# only ASCII characters.
QUEUE_NAME_REGEX = re.compile(r'^[a-zA-Z0-9_\-.]+$')
QUEUE_NAME_MAX_LEN = 64
PROJECT_ID_MAX_LEN = 256
class ValidationFailed(ValueError):
"""User input did not follow API restrictions."""
def __init__(self, msg, *args, **kwargs):
msg = msg.format(*args, **kwargs)
super(ValidationFailed, self).__init__(msg)
class Validator(object):
def __init__(self, conf):
self._conf = conf
self._conf.register_opts(transport.ALL_OPTS,
group=transport.GROUP_NAME)
self._limits_conf = self._conf[transport.GROUP_NAME]
self._supported_operations = ('add', 'remove', 'replace')
def queue_identification(self, queue, project):
"""Restrictions on a project id & queue name pair.
:param queue: Name of the queue
:param project: Project id
:raises ValidationFailed: if the `name` is longer than 64
characters or contains anything other than ASCII digits and
letters, underscores, and dashes. Also raises if `project`
is not None but longer than 256 characters.
"""
if project is not None and len(project) > PROJECT_ID_MAX_LEN:
msg = _(u'Project ids may not be more than {0} characters long.')
raise ValidationFailed(msg, PROJECT_ID_MAX_LEN)
if len(queue) > QUEUE_NAME_MAX_LEN:
msg = _(u'Queue names may not be more than {0} characters long.')
raise ValidationFailed(msg, QUEUE_NAME_MAX_LEN)
if not QUEUE_NAME_REGEX.match(queue):
raise ValidationFailed(
_(u'Queue names may only contain ASCII letters, digits, '
'underscores, and dashes.'))
def _get_change_operation_d10(self, raw_change):
op = raw_change.get('op')
if op is None:
msg = (_('Unable to find `op` in JSON Schema change. '
'It must be one of the following: %(available)s.') %
{'available': ', '.join(self._supported_operations)})
raise ValidationFailed(msg)
if op not in self._supported_operations:
msg = (_('Invalid operation: `%(op)s`. '
'It must be one of the following: %(available)s.') %
{'op': op,
'available': ', '.join(self._supported_operations)})
raise ValidationFailed(msg)
return op
def _get_change_path_d10(self, raw_change):
try:
return raw_change['path']
except KeyError:
msg = _("Unable to find '%s' in JSON Schema change") % 'path'
raise ValidationFailed(msg)
def _decode_json_pointer(self, pointer):
"""Parse a json pointer.
Json Pointers are defined in
http://tools.ietf.org/html/draft-pbryan-zyp-json-pointer .
The pointers use '/' for separation between object attributes, such
that '/A/B' would evaluate to C in {"A": {"B": "C"}}. A '/' character
in an attribute name is encoded as "~1" and a '~' character is encoded
as "~0".
"""
self._validate_json_pointer(pointer)
ret = []
for part in pointer.lstrip('/').split('/'):
ret.append(part.replace('~1', '/').replace('~0', '~').strip())
return ret
def _validate_json_pointer(self, pointer):
"""Validate a json pointer.
We only accept a limited form of json pointers.
"""
if not pointer.startswith('/'):
msg = _('Pointer `%s` does not start with "/".') % pointer
raise ValidationFailed(msg)
if re.search(r'/\s*?/', pointer[1:]):
msg = _('Pointer `%s` contains adjacent "/".') % pointer
raise ValidationFailed(msg)
if len(pointer) > 1 and pointer.endswith('/'):
msg = _('Pointer `%s` end with "/".') % pointer
raise ValidationFailed(msg)
if pointer[1:].strip() == '/':
msg = _('Pointer `%s` does not contains valid token.') % pointer
raise ValidationFailed(msg)
if re.search(r'~[^01]', pointer) or pointer.endswith('~'):
msg = _('Pointer `%s` contains "~" not part of'
' a recognized escape sequence.') % pointer
raise ValidationFailed(msg)
def _get_change_value(self, raw_change, op):
if 'value' not in raw_change:
msg = _('Operation "{0}" requires a member named "value".')
raise ValidationFailed(msg, op)
return raw_change['value']
def _validate_change(self, change):
if change['op'] == 'remove':
return
path_root = change['path'][0]
if len(change['path']) >= 1 and path_root.lower() != 'metadata':
msg = _("The root of path must be metadata, e.g /metadata/key.")
raise ValidationFailed(msg)
def _validate_path(self, op, path):
limits = {'add': 2, 'remove': 2, 'replace': 2}
if len(path) != limits.get(op, 2):
msg = _("Invalid JSON pointer for this resource: "
"'/%s, e.g /metadata/key'") % '/'.join(path)
raise ValidationFailed(msg)
def _parse_json_schema_change(self, raw_change, draft_version):
if draft_version == 10:
op = self._get_change_operation_d10(raw_change)
path = self._get_change_path_d10(raw_change)
else:
msg = _('Unrecognized JSON Schema draft version')
raise ValidationFailed(msg)
path_list = self._decode_json_pointer(path)
return op, path_list
def _validate_retry_policy(self, metadata):
retry_policy = metadata.get('_retry_policy') if metadata else None
if retry_policy and not isinstance(retry_policy, dict):
msg = _('retry_policy must be a dict.')
raise ValidationFailed(msg)
if retry_policy:
valid_keys = ['retries_with_no_delay', 'minimum_delay_retries',
'minimum_delay', 'maximum_delay',
'maximum_delay_retries', 'retry_backoff_function',
'ignore_subscription_override']
for key in valid_keys:
retry_value = retry_policy.get(key)
if key == 'retry_backoff_function':
if retry_value and not isinstance(retry_value, str):
msg = _('retry_backoff_function must be a string.')
raise ValidationFailed(msg)
# Now we support linear, arithmetic, exponential
# and geometric retry backoff function.
fun = {'linear', 'arithmetic', 'exponential', 'geometric'}
if retry_value and retry_value not in fun:
msg = _('invalid retry_backoff_function.')
raise ValidationFailed(msg)
elif key == 'ignore_subscription_override':
if retry_value and not isinstance(retry_value, bool):
msg = _('ignore_subscription_override must be a '
'boolean.')
raise ValidationFailed(msg)
else:
if retry_value and not isinstance(retry_value, int):
msg = _('Retry policy: %s must be a integer.') % key
raise ValidationFailed(msg)
min_delay = retry_policy.get('minimum_delay',
consts.MINIMUM_DELAY)
max_delay = retry_policy.get('maximum_delay',
consts.MAXIMUM_DELAY)
if max_delay < min_delay:
msg = _('minimum_delay must less than maximum_delay.')
raise ValidationFailed(msg)
if ((max_delay - min_delay) < 2*consts.LINEAR_INTERVAL):
msg = _('invalid minimum_delay and maximum_delay.')
raise ValidationFailed(msg)
def queue_patching(self, request, changes):
washed_changes = []
content_types = {
'application/openstack-messaging-v2.0-json-patch': 10,
}
json_schema_version = content_types[request.content_type]
if not isinstance(changes, list):
msg = _('Request body must be a JSON array of operation objects.')
raise ValidationFailed(msg)
for raw_change in changes:
if not isinstance(raw_change, dict):
msg = _('Operations must be JSON objects.')
raise ValidationFailed(msg)
(op, path) = self._parse_json_schema_change(raw_change,
json_schema_version)
# NOTE(flwang): Now the 'path' is a list.
self._validate_path(op, path)
change = {'op': op, 'path': path,
'json_schema_version': json_schema_version}
if not op == 'remove':
change['value'] = self._get_change_value(raw_change, op)
self._validate_change(change)
washed_changes.append(change)
return washed_changes
def queue_listing(self, limit=None, **kwargs):
"""Restrictions involving a list of queues.
:param limit: The expected number of queues in the list
:param kwargs: Ignored arguments passed to storage API
:raises ValidationFailed: if the limit is exceeded
"""
uplimit = self._limits_conf.max_queues_per_page
if limit is not None and not (0 < limit <= uplimit):
msg = _(u'Limit must be at least 1 and no greater than {0}.')
raise ValidationFailed(msg, self._limits_conf.max_queues_per_page)
def queue_metadata_length(self, content_length):
"""Restrictions on queue's length.
:param content_length: Queue request's length.
:raises ValidationFailed: if the metadata is oversize.
"""
if content_length is None:
return
if content_length > self._limits_conf.max_queue_metadata:
msg = _(u'Queue metadata is too large. Max size: {0}')
raise ValidationFailed(msg, self._limits_conf.max_queue_metadata)
def queue_metadata_putting(self, queue_metadata):
"""Checking if the reserved attributes of the queue are valid.
:param queue_metadata: Queue's metadata.
:raises ValidationFailed: if any reserved attribute is invalid.
"""
if not queue_metadata:
return
queue_default_ttl = queue_metadata.get('_default_message_ttl')
if queue_default_ttl and not isinstance(queue_default_ttl, int):
msg = _(u'_default_message_ttl must be integer.')
raise ValidationFailed(msg)
if queue_default_ttl is not None:
if not (MIN_MESSAGE_TTL <= queue_default_ttl <=
self._limits_conf.max_message_ttl):
msg = _(u'_default_message_ttl can not exceed {0} '
'seconds, and must be at least {1} seconds long.')
raise ValidationFailed(
msg, self._limits_conf.max_message_ttl, MIN_MESSAGE_TTL)
queue_max_msg_size = queue_metadata.get('_max_messages_post_size',
None)
if queue_max_msg_size and not isinstance(queue_max_msg_size, int):
msg = _(u'_max_messages_post_size must be integer.')
raise ValidationFailed(msg)
if queue_max_msg_size is not None:
if not (0 < queue_max_msg_size <=
self._limits_conf.max_messages_post_size):
raise ValidationFailed(
_(u'_max_messages_post_size can not exceed {0}, '
' and must be at least greater than 0.'),
self._limits_conf.max_messages_post_size)
max_claim_count = queue_metadata.get('_max_claim_count', None)
if max_claim_count and not isinstance(max_claim_count, int):
msg = _(u'_max_claim_count must be integer.')
raise ValidationFailed(msg)
dlq_ttl = queue_metadata.get('_dead_letter_queue_messages_ttl', None)
if dlq_ttl and not isinstance(dlq_ttl, int):
msg = _(u'_dead_letter_queue_messages_ttl must be integer.')
raise ValidationFailed(msg)
if dlq_ttl is not None and not (MIN_MESSAGE_TTL <= dlq_ttl <=
self._limits_conf.max_message_ttl):
msg = _(u'The TTL for a message may not exceed {0} seconds, '
'and must be at least {1} seconds long.')
raise ValidationFailed(msg, self._limits_conf.max_message_ttl,
MIN_MESSAGE_TTL)
queue_delay = queue_metadata.get('_default_message_delay',
None)
if queue_delay and not isinstance(queue_delay, int):
msg = _(u'_default_message_delay must be integer.')
raise ValidationFailed(msg)
if queue_delay is not None:
if not (MIN_DELAY_TTL <= queue_delay <=
self._limits_conf.max_message_delay):
msg = _(u'The TTL can not exceed {0} seconds, and must '
| |
<reponame>HDRUK/schemata<filename>docs/dataset/2.0.0/impact-assessment/generated_mapping.py
#### START DEFINE MAPPING v1 --> v2 ####
def map_identifier(dm_v1, dm_v2):
dm_id = dm_v1.get('id', None)
if dm_id:
dm_v2['identifier'] = f"https://web.www.healthdatagateway.org/dataset/{dm_id}"
def map_version(dm_v1, dm_v2):
dm_version = dm_v1.get('documentationVersion', None)
dm_v2['version'] = dm_version
def mapl_revisions(dm_v1, dm_v2):
dm_revisions = dm_v1.get('revisions', [])
if dm_revisions:
dm_v2['revisions'] = []
for k,v in dm_revisions.items():
if 'latest'==k:
continue
dm_v2['revisions'].append({'version': k,
'url': f"https://web.www.healthdatagateway.org/dataset/{v}"})
#[{'revisions_version':'revisions_version', 'revisions_url':'revisions_url'}]
def map_issued(dm_v1, dm_v2):
dm_issued = dm_v1.get('issued', None)
dm_v2['issued'] = dm_issued
def map_modified(dm_v1, dm_v2):
dm_modified = dm_v1.get('modified', None)
if dm_modified:
dm_v2['modified'] = dm_modified
def map_summary_title(dm_v1, dm_v2):
dm_title = dm_v1.get('title', None)
if not dm_title:
dm_title = dm_v1.get('label', 'title')
dm_v2["summary"]['title'] = dm_title
def map_summary_abstract(dm_v1, dm_v2):
dm_abstract = dm_v1.get('abstract', None)
dm_v2["summary"]['abstract'] = dm_abstract
def map_summary_publisher_memberOf(dm_v1, dm_v2):
pub_dict = {'Barts Health NHS Trust': 'ALLIANCE > BARTS',
'NHS Digital': 'ALLIANCE > NHS Digital',
'NIHR Health Informatics Collaborative Cardiovascular Theme': 'OTHER > NIHR HIC',
'NIHR Health Informatics Collaborative Critical Care Theme': 'OTHER > NIHR HIC',
'NIHR Health Informatics Collaborative Renal Transplantation Theme': 'OTHER > NIHR HIC',
'NIHR Health Informatics Collaborative Viral Hepatitis Theme': 'OTHER > NIHR HIC',
'Oxford University Hospitals NHS Foundation Trust': 'ALLIANCE > Oxford',
'SLaM': 'ALLIANCE > South London and Maudsley'}
dm_membership = dm_v1.get('publisher', None)
if dm_membership:
dm_membership = pub_dict.get(dm_membership, dm_membership)
publisher = dm_membership.split('>')
membership = publisher[0].strip()
if 'HUBS' == membership:
membership = 'HUB'
if membership not in ['HUB', 'ALLIANCE', 'OTHER']:
print(f"ERROR: Member Of: '{dm_membership}'")
dm_v2["summary"]["publisher"]['memberOf'] = membership #'summary_publisher_memberOf'
def map_summary_publisher_accessRights(dm_v1, dm_v2):
dm_accessRights = dm_v1.get('accessRights', 'In Progress')
dm_v2["summary"]["publisher"]['accessRights'] = dm_accessRights
def map_summary_publisher_deliveryLeadTime(dm_v1, dm_v2):
dm_deliveryLeadTime = dm_v1.get('accessRequestDuration', 'OTHER')
dm_deliveryLeadTime = dm_deliveryLeadTime.upper()
if ('LESS 1 WEEK'==dm_deliveryLeadTime
or 'INSTANT ACCESS'==dm_deliveryLeadTime[:14]):
dm_deliveryLeadTime='LESS 1 WEEK'
elif '1-2 WEEKS'==dm_deliveryLeadTime:
dm_deliveryLeadTime='1-2 WEEKS'
elif ('2-4 WEEKS'==dm_deliveryLeadTime
or '>1 WEEK'==dm_deliveryLeadTime
or '2 WEEKS'==dm_deliveryLeadTime[:7]):
dm_deliveryLeadTime='2-4 WEEKS'
elif ('1-2 MONTHS'==dm_deliveryLeadTime
or '4 - 6 WEEKS'==dm_deliveryLeadTime
or '4-6 WEEKS'==dm_deliveryLeadTime
or '48 DAYS'==dm_deliveryLeadTime[:8]):
dm_deliveryLeadTime='1-2 MONTHS'
elif ( '2-6 MONTHS'==dm_deliveryLeadTime
or '2-3 MONTHS'==dm_deliveryLeadTime
or '~ 3 MONTHS.'==dm_deliveryLeadTime[-11:]
or '16 WEEKS'==dm_deliveryLeadTime[:9]
or '3-6 MONTHS'==dm_deliveryLeadTime):
dm_deliveryLeadTime='2-6 MONTHS'
elif ('MORE 6 MONTHS'==dm_deliveryLeadTime
or '~6-12 MONTHS'==dm_deliveryLeadTime):
dm_deliveryLeadTime='MORE 6 MONTHS'
elif ('VARIABLE'==dm_deliveryLeadTime
or 'SUBJECT TO NEGOTIATION'==dm_deliveryLeadTime):
dm_deliveryLeadTime='VARIABLE'
elif 'NOT APPLICABLE'==dm_deliveryLeadTime:
dm_deliveryLeadTime='NOT APPLICABLE'
elif 'OTHER'==dm_deliveryLeadTime:
dm_deliveryLeadTime='OTHER'
dm_v2["summary"]["publisher"]['deliveryLeadTime'] = dm_deliveryLeadTime
def map_summary_publisher_dataUseLimitation(dm_v1, dm_v2):
#dm_v2["summary"]["publisher"]['dataUseLimitation'] = ['summary_publisher_dataUseLimitation']
pass
def map_summary_publisher_dataUseRequirements(dm_v1, dm_v2):
#dm_v2["summary"]["publisher"]['dataUseRequirements'] = ['summary_publisher_dataUseRequirements']
pass
def map_summary_publisher_accessService(dm_v1, dm_v2):
#dm_v2["summary"]["publisher"]['accessService'] = ['summary_publisher_accessService']
pass
def map_summary_publisher_name(dm_v1, dm_v2):
dm_publisher = dm_v1.get('publisher', None)
dm_v2["summary"]["publisher"]['name'] = dm_publisher
def map_summary_publisher_logo(dm_v1, dm_v2):
#dm_v2["summary"]["publisher"]['logo'] = 'https://images.app.goo.gl/MXpLA4Dku7dZe8PL6'
pass
def map_summary_publisher_description(dm_v1, dm_v2):
if False:
dm_publisher = dm_v1.get('creator', None)
dm_v2["summary"]["publisher"]['description'] = dm_publisher
def map_summary_publisher_contactPoint(dm_v1, dm_v2):
dm_contactPoint = dm_v1.get('contactPoint', None)
dm_v2["summary"]["publisher"]['contactPoint'] = dm_contactPoint
def map_summary_contactPoint(dm_v1, dm_v2):
dm_contactPoint = dm_v1.get('contactPoint', None)
dm_v2["summary"]['contactPoint'] = dm_contactPoint
def mapl_summary_keywords(dm_v1, dm_v2):
dm_keywords = dm_v1.get('keywords', '')
dm_v2["summary"]["keywords"] = []
if dm_keywords:
keywords = dm_keywords.split(',')
unique_keywords = set()
for kw in keywords:
kw = kw.lstrip()
unique_keywords.add(kw)
dm_v2["summary"]["keywords"] = list(unique_keywords)
def map_summary_alternateIdentifiers(dm_v1, dm_v2):
#dm_v2["summary"]['alternateIdentifiers'] = ['summary_alternateIdentifiers']
pass
def map_summary_doiName(dm_v1, dm_v2):
dm_doi = dm_v1.get('doi', None)
if dm_doi:
dm_doi = dm_doi.lower()
if 'https://doi.org/'==dm_doi[:16]:
dm_doi = dm_doi[16:]
elif 'doi:'==dm_doi[:4]:
dm_doi = dm_doi[4:]
dm_doi = dm_doi.lstrip()
dm_v2["summary"]['doiName'] = dm_doi
def map_documentation_description(dm_v1, dm_v2):
dm_description = dm_v1.get('description', None)
dm_v2["documentation"]['description'] = dm_description
def mapl_documentation_associatedMedia(dm_v1, dm_v2):
# dm_v2["documentation"]["associatedMedia"] = ['documentation_associatedMedia']
pass
def mapl_documentation_isPartOf(dm_v1, dm_v2):
dm_isPartOf = dm_v1.get('group', 'NOT APPLICABLE')
dm_v2["documentation"]["isPartOf"] = [dm_isPartOf]
def map_coverage_spatial(dm_v1, dm_v2):
dm_spatial = dm_v1.get('geographicCoverage', None)
dm_v2["coverage"]['spatial'] = dm_spatial
def map_coverage_typicalAgeRange(dm_v1, dm_v2):
dm_ageRange = dm_v1.get('ageBand', None)
dm_v2["coverage"]['typicalAgeRange'] = dm_ageRange
def mapl_coverage_physicalSampleAvailability(dm_v1, dm_v2):
dm_physicalSamples = dm_v1.get('physicalSampleAvailability', None)
dm_v2["coverage"]["physicalSampleAvailability"] = []
if dm_physicalSamples:
dm_physicalSamples = dm_physicalSamples.replace(';', ',')
dm_physicalSamples = dm_physicalSamples.upper()
physicalSamples = dm_physicalSamples.split(',')
for p in physicalSamples:
dm_v2["coverage"]["physicalSampleAvailability"].append(p.lstrip())
def map_coverage_followup(dm_v1, dm_v2):
dm_v2["coverage"]['followup'] = 'UNKNOWN'
def mapl_coverage_pathway(dm_v1, dm_v2):
#dm_v2["coverage"]["pathway"] = 'Please indicate if the dataset is representative of the patient pathway and any limitations the dataset may have with respect to pathway coverage.'
dm_v2["coverage"]["pathway"] = None
def mapl_provenance_origin_purpose(dm_v1, dm_v2):
dm_v2["provenance"]["origin"]["purpose"] = [] #['Please indicate the purpose(s) that the dataset was collected']
def mapl_provenance_origin_source(dm_v1, dm_v2):
dm_v2["provenance"]["origin"]["source"] = [] #['Please indicate the source of the data extraction.']
def mapl_provenance_origin_collectionSituation(dm_v1, dm_v2):
dm_v2["provenance"]["origin"]["collectionSituation"] = [] #['Please indicate the setting(s) where data was collected. Multiple settings may be provided.']
def map_provenance_temporal_accrualPeriodicity(dm_v1, dm_v2):
dm_periodicity = dm_v1.get('periodicity', 'OTHER')
dm_periodicity = dm_periodicity.upper()
if ('ANNUAL' == dm_periodicity[:6]
or 'UPDATED ANNUALLY' == dm_periodicity
or 'A NEW DATASET IS ADDED APPROXIMATELY ANNUALLY' == dm_periodicity):
dm_periodicity = 'ANNUAL'
elif 'BIANNUAL' == dm_periodicity[:8]:
dm_periodicity = 'BIANNUAL'
elif 'BIENNIAL' == dm_periodicity:
dm_periodicity = 'BIENNIAL'
elif 'BIMONTHLY' == dm_periodicity:
dm_periodicity = 'BIMONTHLY'
elif 'BIWEEKLY' == dm_periodicity:
dm_periodicity = 'BIWEEKLY'
elif ('CONTINUOUS' == dm_periodicity
or 'DATA IS UPDATED HOURLY' == dm_periodicity):
dm_periodicity = 'CONTINUOUS'
elif 'DAILY' == dm_periodicity:
dm_periodicity = 'DAILY'
elif 'IRREGULAR' == dm_periodicity:
dm_periodicity = 'IRREGULAR'
elif ('MONTHLY' == dm_periodicity
or '1 MONTH CYCLE' == dm_periodicity
or 'PROVISIONAL DATA: MONTHLY' == dm_periodicity):
dm_periodicity = 'MONTHLY'
elif 'OTHER' == dm_periodicity:
dm_periodicity = 'OTHER'
elif ('QUARTERLY' == dm_periodicity[:9]
or 'PROVISIONAL DATA: QUARTERLY' == dm_periodicity
or 'GENOMICS ENGLAND DATASET ARE UPDATED ON A QUARTERLY BASIS.' == dm_periodicity
or 'GENOMICS ENGLAND DATASET ARE UPDATED ON A QUARTERLY BASIS' == dm_periodicity):
dm_periodicity = 'QUARTERLY'
elif ('SEMIWEEKLY' == dm_periodicity
or 'TWICE WEEKLY' == dm_periodicity
or 'TWICE A WEEK (APPROXIMATELY)' == dm_periodicity):
dm_periodicity = 'SEMIWEEKLY'
elif ('STATIC' == dm_periodicity
or 'NA (SINGLE RELEASE)' == dm_periodicity
or 'SINGLE EXTRACT' == dm_periodicity
or 'NOT APPLICABLE (SINGLE RELEASE)' == dm_periodicity):
dm_periodicity = 'STATIC'
elif ('WEEKLY' == dm_periodicity
or 'UPDATED AT LEAST WEEKLY' == dm_periodicity):
dm_periodicity = 'WEEKLY'
dm_v2["provenance"]["temporal"]['accrualPeriodicity'] = dm_periodicity
def map_provenance_temporal_distributionReleaseDate(dm_v1, dm_v2):
dm_releaseDate = dm_v1.get('releaseDate', None)
dm_v2["provenance"]["temporal"]['distributionReleaseDate'] = dm_releaseDate
def map_provenance_temporal_endDate(dm_v1, dm_v2):
dm_endDate = dm_v1.get('datasetEndDate', None)
dm_v2["provenance"]["temporal"]['endDate'] = dm_endDate
def map_provenance_temporal_timeLag(dm_v1, dm_v2):
#dm_v2["provenance"]["temporal"]['timeLag'] = ['Please indicate the typical time-lag between an event and the data for that event appearing in the dataset"']
pass
def map_provenance_temporal_startDate(dm_v1, dm_v2):
dm_startDate = dm_v1.get('datasetStartDate', None)
dm_v2["provenance"]["temporal"]['startDate'] = dm_startDate
def map_accessibility_access_accessRights(dm_v1, dm_v2):
dm_accessRights = dm_v1.get('accessRights', None)
dm_v2["accessibility"]["access"]['accessRights'] = dm_accessRights
def map_accessibility_access_accessService(dm_v1, dm_v2):
dm_v2["accessibility"]["access"]['accessService'] = None #'Please provide a brief description of the data access environment that is currently available to researchers.'
def map_accessibility_access_deliveryLeadTime(dm_v1, dm_v2):
dm_accessRequestDuration = dm_v1.get('accessRequestDuration', None)
if not dm_accessRequestDuration:
return
dm_accessRequestDuration = dm_accessRequestDuration.upper()
if ('1-2 MONTHS' == dm_accessRequestDuration
or '4-6 WEEKS' == dm_accessRequestDuration
or '4 - 6 WEEKS' == dm_accessRequestDuration
or '48 DAYS' == dm_accessRequestDuration[:7]):
dm_accessRequestDuration = '1-2 MONTHS'
elif '1-2 WEEKS' == dm_accessRequestDuration:
dm_accessRequestDuration = '1-2 WEEKS'
elif ('2-4 WEEKS' == dm_accessRequestDuration
or '2 WEEKS' == dm_accessRequestDuration[:7]):
dm_accessRequestDuration = '2-4 WEEKS'
elif ('2-6 MONTHS' == dm_accessRequestDuration
or '3-6 MONTHS' == dm_accessRequestDuration
or '2-3 MONTHS' == dm_accessRequestDuration
or '16 WEEKS' == dm_accessRequestDuration[:8]):
dm_accessRequestDuration = '2-6 MONTHS'
elif ('LESS 1 WEEK' == dm_accessRequestDuration
or 'INSTANT ACCESS' == dm_accessRequestDuration[:14]
or 'ACCESS TO FULL GWAS' == dm_accessRequestDuration[:19]):
dm_accessRequestDuration = 'LESS 1 WEEK'
elif ('MORE 6 MONTHS' == dm_accessRequestDuration
or '~6-12 MONTHS' == dm_accessRequestDuration):
dm_accessRequestDuration = 'MORE 6 MONTHS'
elif 'NOT APPLICABLE' == dm_accessRequestDuration:
dm_accessRequestDuration = 'NOT APPLICABLE'
elif 'OTHER' == dm_accessRequestDuration:
dm_accessRequestDuration = 'OTHER'
elif 'VARIABLE' == dm_accessRequestDuration:
dm_accessRequestDuration = 'VARIABLE'
dm_v2["accessibility"]["access"]['deliveryLeadTime'] = dm_accessRequestDuration
def mapl_accessibility_access_jurisdiction(dm_v1, dm_v2):
dm_jurisdiction = dm_v1.get('jurisdiction', None)
dm_v2["accessibility"]["access"]["jurisdiction"] = [dm_jurisdiction]
def map_accessibility_access_dataController(dm_v1, dm_v2):
dm_dataController = dm_v1.get('dataController', None)
dm_v2["accessibility"]["access"]['dataController'] = dm_dataController
def map_accessibility_access_dataProcessor(dm_v1, dm_v2):
dm_dataProcessor = dm_v1.get('dataProcessor', None)
dm_v2["accessibility"]["access"]['dataProcessor'] = dm_dataProcessor
def mapl_accessibility_usage_dataUseLimitation(dm_v1, dm_v2):
dm_v2["accessibility"]["usage"]["dataUseLimitation"] = [] #['Please provide an indication of consent permissions for datasets and/or materials.']
def mapl_accessibility_usage_dataUseRequirements(dm_v1, dm_v2):
dm_v2["accessibility"]["usage"]["dataUseRequirements"] = [] #['Please indicate if there are any additional conditions set for use of the data.']
def map_accessibility_usage_resourceCreator(dm_v1, dm_v2):
dm_creator = dm_v1.get('creator', None)
dm_v2["accessibility"]["usage"]['resourceCreator'] = dm_creator
def mapl_accessibility_usage_investigations(dm_v1, dm_v2):
dm_v2["accessibility"]["usage"]["investigations"] = ['accessibility_usage_investigations']
def mapl_accessibility_usage_isReferencedBy(dm_v1, dm_v2):
dm_citations = dm_v1.get('citations', None)
dm_v2["accessibility"]["usage"]["isReferencedBy"] = []
if dm_citations:
dm_citations = dm_citations.replace(';', ',')
citations = dm_citations.split(',')
for c in citations:
citation = c.lstrip()
if 'https://doi.org/' == citation[:16]:
citation = citation[16:]
elif 'doi:' == citation[:4]:
citation = citation[4:]
dm_v2["accessibility"]["usage"]["isReferencedBy"].append(citation)
def mapl_accessibility_formatAndStandards_vocabularyEncodingScheme(dm_v1, dm_v2):
dm_controlledVocabulary = dm_v1.get('controlledVocabulary', None)
dm_v2["accessibility"]["formatAndStandards"]["vocabularyEncodingScheme"] = []
if dm_controlledVocabulary:
dm_controlledVocabulary = dm_controlledVocabulary.replace(';', ',')
dm_controlledVocabulary = dm_controlledVocabulary.upper()
encodingSchemes = dm_controlledVocabulary.split(',')
for e in encodingSchemes:
e = e.lstrip()
if 'AMT' == e:
e = 'AMT'
elif 'APC' == e:
e = 'APC'
elif 'ATC' == e:
e = 'ATC'
elif 'CIEL' == e:
e = 'CIEL'
elif 'CPT4' == e:
e = 'CPT4'
elif ('DM PLUS D' == e
or 'DM+D' == e):
e = 'DM PLUS D'
elif 'DPD' == e:
e = 'DPD'
elif 'DRG' == e:
e = 'DRG'
elif 'HEMONC' == e:
e = 'HEMONC'
elif 'HPO' == e:
e = 'HPO'
elif ('ICD10' in e
or 'ICD-10' in e
or 'ICD 10' in e
or 'INTERNATIONAL CLASSIFICATION OF DISEASES VERSION 10' in e):
e = | |
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 5 14:59:30 2016
@author: alex
"""
from AlexRobotics.dynamic import Hybrid_Manipulator as HM
from AlexRobotics.control import ComputedTorque as CTC
from AlexRobotics.estimation import ManipulatorDisturbanceObserver as OBS
import numpy as np
'''
###############################################################################
######### Controllers for hybrid-manipulator robots
###############################################################################
'''
##############################################################################
###
##############################################################################
class RminComputedTorqueController( CTC.ComputedTorqueController ):
""" Feedback law """
############################
def __init__( self , R = HM.HybridTwoLinkManipulator() ):
""" """
CTC.ComputedTorqueController.__init__( self , R )
self.n_gears = 4
self.hysteresis = False
self.hys_level = 1
self.last_gear_i = 0 # Default gear
self.min_delay = -10000 # default is not constraint
self.last_shift_t = -1
# Integral action with dist observer (beta)
self.dist_obs_active = False
self.obs = OBS.DistObserver( R )
############################
def reset_hysteresis( self ):
""" Reset all memorized info in controlled, ex: before restarting a simulation """
self.last_gear_i = 0 # Default gear
self.last_shift_t = -1
############################
def traj_following_ctl( self , x , t ):
"""
Given desired loaded trajectory and actual state, compute torques and optimal gear ratio
"""
ddq_d , dq_d , q_d = self.get_traj( t )
ddq_r = self.compute_ddq_r( ddq_d , dq_d , q_d , x )
u = self.u_star( ddq_r , x , t )
return u
############################
def fixed_goal_ctl( self , x , t = 0 ):
"""
Given desired fixed goal state and actual state, compute torques and optimal gear ratio
"""
ddq_d = np.zeros( self.R.dof )
[ q_d , dq_d ] = self.R.x2q( self.goal ) # from state vector (x) to angle and speeds (q,dq)
ddq_r = self.compute_ddq_r( ddq_d , dq_d , q_d , x )
u = self.u_star( ddq_r , x , t )
# Disturbance Observer Test
if self.dist_obs_active:
self.obs.update_estimate( x , u , t )
self.R.f_dist_steady = self.obs.f_ext_hat
return u
############################
def manual_acc_ctl( self , x , t = 0 ):
"""
Given desired acc, compute torques and optimal gear ratio
"""
ddq_r = self.ddq_manual_setpoint
u = self.u_star( ddq_r , x , t )
return u
############################
def u_star( self , ddq_r , x , t ):
"""
Compute optimal u given desired accel and actual states
"""
# Cost is Q
Q = np.zeros( self.n_gears )
T = np.zeros( ( self.n_gears , self.R.dof ) )
#for all gear ratio options
for i in range( self.n_gears ):
T[i] = self.computed_torque( ddq_r , x , self.uD(i) )
# Cost is norm of torque
#Q[i] = np.dot( T[i] , T[i] )
# Verify validity
u_test = np.append( T[i] , self.uD(i) )
if self.R.isavalidinput( x , u_test ):
# valid option
# Cost is norm of torque
Q[i] = np.dot( T[i] , T[i] )
else:
# Bad option
Q[i] = 9999999999 # INF
#print 'bad option'
# Optimal dsicrete mode
i_star = Q.argmin()
#print Q , i_star , t , x
# Hysteresis
if self.hysteresis:
# if optimal gear is new
if not(i_star == self.last_gear_i ):
gear_shift_gain = np.linalg.norm( T[ i_star ] - T[ self.last_gear_i ] )
# if gain of changing is small
if gear_shift_gain < self.hys_level :
# Keep old gear ratio
i_star = self.last_gear_i
#print 'shifting not Allowed !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!'
#print 'too small gain !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!'
# if changed recently
elif ( t < self.min_delay + self.last_shift_t ):
# Keep old gear ratio
i_star = self.last_gear_i
#print 'shifting not Allowed !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!'
#print 'too sshort delay !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!'
#print ' t:', t , ' timing:' , self.min_delay + self.last_shift_t
# ok to gear-shift
else:
self.last_gear_i = i_star
self.last_shift_t = t
u = np.append( T[ i_star ] , self.uD( i_star ) )
return u
############################
def computed_torque( self , ddq_r , x , R ):
"""
Given actual state and gear ratio, compute torque necessarly for a given acceleration vector
"""
[ q , dq ] = self.R.x2q( x ) # from state vector (x) to angle and speeds (q,dq)
F = self.R.T( q , dq , ddq_r , R ) # Generalized force necessarly
return F
############################
def uD( self , i ):
"""
Return the discrete value for the gear ratio
1-Dof # -> input is directly the ratio
else # -> input is the index
"""
if self.R.dof == 1 :
return self.R.R[ i ]
else:
return i
###############################################################################
### Computed Torque for fixed
###############################################################################
class RfixComputedTorqueController( RminComputedTorqueController ):
""" Feedback law """
############################
def __init__( self , R = HM.HybridTwoLinkManipulator() , R_index = 0 ):
""" """
CTC.ComputedTorqueController.__init__( self , R )
self.R_index = R_index # Fixed gear ratio index
# Integral action with dist observer (beta)
self.dist_obs_active = False
self.obs = OBS.DistObserver( R )
self.obs.ishybrid = True
############################
def reset_hysteresis( self ):
""" Reset all memorized info in controlled, ex: before restarting a simulation """
pass
############################
def u_star( self , ddq_r , x , t ):
"""
only one gear option
"""
Ratio = self.uD( self.R_index )
Torque = self.computed_torque( ddq_r , x , Ratio )
u = np.append( Torque , Ratio )
return u
###############################################################################
### Sliding Mode with Gear optimization
###############################################################################
class RminSlidingModeController( RminComputedTorqueController , CTC.SlidingModeController ):
""" Feedback law """
############################
def __init__( self , R = HM.HybridTwoLinkManipulator() ):
""" """
RminComputedTorqueController.__init__( self , R )
self.lam = 1 # Sliding surface slope
self.D = 1 # Discontinuous gain
self.nab = 0.1 # min convergence rate
############################
def K( self , q , k , t ):
""" Discontinuous gain matrix """
dist_max = np.diag( np.ones( self.R.dof ) ) * self.D
conv_min = np.diag( np.ones( self.R.dof ) ) * self.nab
if (self.R.dof == 1) :
H = self.R.H_all( q , self.uD( k ) )
H_inv = 1./ H
R_inv = 1./ self.uD( k )
else:
H = self.R.H_all( q , k )
H_inv = np.linalg.inv( H )
R_inv = np.linalg.inv( self.R.R[k] )
Diag_Gain = np.diag( np.diag( np.dot( H_inv , dist_max ) + conv_min )) # take only the diagonal value
K = np.dot( R_inv , np.dot( H , Diag_Gain ))
#print H , R_inv , K
return K
############################
def sliding_torque( self , ddq_r , s , x , k , t ):
"""
Given actual state, compute torque necessarly to guarantee convergence
k = discrete mode
"""
[ q , dq ] = self.R.x2q( x ) # from state vector (x) to angle and speeds (q,dq)
F_computed = self.R.T( q , dq , ddq_r , self.uD(k) ) # Generalized force necessarly
F_discontinuous = np.dot( self.K( q , k , t ) , np.sign( s ) )
F_tot = F_computed - F_discontinuous # np.dot( np.linalg.inv( self.R.R[k] ) , F_discontinuous )
return F_tot
############################
def traj_following_ctl( self , x , t ):
"""
Given desired loaded trajectory and actual state, compute torques and optimal gear ratio
"""
ddq_d , dq_d , q_d = self.get_traj( t )
[ s , dq_r , ddq_r ] = self.compute_sliding_variables( ddq_d , dq_d , q_d , x )
u = self.u_star( ddq_r , x , s , t )
return u
############################
def manual_acc_ctl( self , x , t = 0 ):
"""
experimental
"""
s = self.ddq_manual_setpoint # control directly discontinuous term
ddq_r = np.zeros( self.R.dof )
u = self.u_star( ddq_r , x , s | |
0.00032777508],
[1447.06506, 0.00107791089, 0.000314022414],
[1467.23621, 0.00104861567, 0.000300961867],
[1487.68884, 0.00102122454, 0.00028841052],
[1508.42639, 0.00099408708, 0.000274052145],
[1529.453, 0.000966451073, 0.000259600638],
[1550.77271, 0.0009381891, 0.000246109441],
[1572.38977, 0.000909807102, 0.000233984785],
[1594.30786, 0.000882222608, 0.000223469149],
[1616.53174, 0.000855906866, 0.000213524661],
[1639.06531, 0.000830591307, 0.000203938136],
[1661.91296, 0.00080544065, 0.000194046908],
[1685.0791, 0.000780332251, 0.000184760502],
[1708.56824, 0.000755786663, 0.00017641345],
[1732.38477, 0.000731986947, 0.000168592887],
[1756.53333, 0.000708520762, 0.000160838827],
[1781.01831, 0.000685224426, 0.000154182751],
[1805.84485, 0.000663393934, 0.000148304898],
[1831.01721, 0.000641870429, 0.000141600889],
[1856.54065, 0.000620299485, 0.000135309107],
[1882.41992, 0.00059929362, 0.00012975678],
[1908.65979, 0.00057907094, 0.000124406724],
[1935.2655, 0.000559124397, 0.000118640208],
[1962.24194, 0.000538711087, 0.000112786089],
[1989.59473, 0.000518058601, 0.000107714601],
[2017.32861, 0.000497654604, 0.000103007813],
[2045.44897, 0.000477144145, 9.83261634E-05],
[2073.96143, 0.000456168404, 9.37005825E-05],
[2102.87134, 0.000434328482, 8.91867312E-05],
[2132.18433, 0.000411080488, 8.4838568E-05],
[2161.90576, 0.000385541556, 8.07193428E-05],
[2192.0415, 0.00035512005, 7.66349185E-05],
[2222.59741, 0.000304328278, 8.77657294E-05],
[2253.57935, 0.000284653419, 0.000132101253],
[2284.99292, 0.00027678872, 0.000162094628],
[2316.84448, 0.000274261314, 0.000180490621],
[2349.13989, 0.000282311725, 0.0002117923],
[2381.8855, 0.000302988075, 0.000215160588],
[2415.08789, 0.000315953628, 0.000211770879],
[2448.75293, 0.000319738203, 0.000204800686],
[2482.88721, 0.000321180938, 0.000198571273],
[2517.49707, 0.000321295549, 0.000192112595],
[2552.58984, 0.000319914892, 0.000184955046],
[2588.17163, 0.000316436286, 0.000177570822],
[2624.24927, 0.000311077951, 0.000171155654],
[2660.82983, 0.000306326197, 0.000166974642],
[2697.92041, 0.00029622906, 0.00016041621],
[2735.52783, 0.000288173731, 0.000164850047],
[2773.65967, 0.000287770585, 0.000166646147],
[2812.323, 0.000289621705, 0.000162986296],
[2851.52539, 0.000289405085, 0.000156758688],
[2891.27393, 0.000286930182, 0.000150997337],
[2931.57666, 0.00028419515, 0.000145106285],
[2972.44141, 0.000280389766, 0.000138730917],
[3013.87549, 0.000275848113, 0.000132811518],
[3055.88745, 0.000270997814, 0.000127259569],
[3098.48486, 0.000265349547, 0.000121834579],
[3141.67603, 0.00025944112, 0.000117789423],
[3185.46924, 0.000254448067, 0.000115431911],
[3229.8728, 0.000251068472, 0.000112598871],
[3274.89575, 0.000247620803, 0.000107656],
[3320.54614, 0.000243323477, 0.000103038183],
[3366.83252, 0.000238371664, 9.85808074E-05],
[3413.7644, 0.000233227038, 9.49850146E-05],
[3461.35059, 0.000228896577, 9.19403246E-05],
[3509.59961, 0.000225408527, 8.84907204E-05],
[3558.52148, 0.00022161896, 8.40877983E-05],
[3608.12549, 0.000216917717, 8.01675487E-05],
[3658.4209, 0.000212276878, 7.65144359E-05],
[3709.41724, 0.000207665405, 7.30517495E-05],
[3761.12451, 0.000203113887, 6.97747091E-05],
[3813.55249, 0.000198618596, 6.65971093E-05],
[3866.71143, 0.000194151275, 6.35092438E-05],
[3920.61133, 0.00018969395, 6.05629721E-05],
[3975.26221, 0.00018527887, 5.77637365E-05],
[4030.67554, 0.000180941977, 5.50962977E-05],
[4086.86084, 0.000176640795, 5.24910574E-05],
[4143.82959, 0.000172471337, 5.00457463E-05],
[4201.59229, 0.000168194441, 4.75209708E-05],
[4260.16016, 0.000163842604, 4.5243145E-05],
[4319.54443, 0.000159654592, 4.33770292E-05],
[4379.75635, 0.000155779257, 4.13362613E-05],
[4440.80811, 0.000151880056, 3.94581184E-05],
[4502.71045, 0.000148073246, 3.76098833E-05],
[4565.47559, 0.000144323698, 3.58553734E-05],
[4629.11572, 0.000140643446, 3.41770647E-05],
[4693.64355, 0.000137033261, 3.25731889E-05],
[4759.07031, 0.000133489099, 3.10472351E-05],
[4825.40918, 0.000130013024, 2.9598139E-05],
[4892.67285, 0.000126616898, 2.82368637E-05],
[4960.87402, 0.000123285892, 2.69292977E-05],
[5030.02588, 0.000120038007, 2.5723155E-05],
[5100.14209, 0.00011690383, 2.45648134E-05],
[5171.23486, 0.000113827526, 2.34215622E-05],
[5243.31885, 0.000110821653, 2.23736897E-05],
[5316.4082, 0.000107960426, 2.14585252E-05],
[5390.51611, 0.00010521974, 2.03656655E-05],
[5465.65723, 0.000102400511, 1.93965661E-05],
[5541.84521, 9.96691961E-05, 1.84744094E-05],
[5619.09521, 9.70041947E-05, 1.76009289E-05],
[5697.42285, 9.43996492E-05, 1.67736725E-05],
[5776.8418, 9.18723599E-05, 1.59986685E-05],
[5857.36768, 8.94320838E-05, 1.52547082E-05],
[5939.01611, 8.70488147E-05, 1.44794676E-05],
[6021.80273, 8.46967087E-05, 1.3836674E-05],
[6105.74316, 8.2425875E-05, 1.31666584E-05],
[6190.85449, 8.01967471E-05, 1.25245906E-05],
[6277.15186, 7.80176342E-05, 1.19159604E-05],
[6364.65137, 7.58937094E-05, 1.13378983E-05],
[6453.37158, 7.38237723E-05, 1.07874039E-05],
[6543.32812, 7.18057781E-05, 1.02630947E-05],
[6634.53809, 6.98385556E-05, 9.76390402E-06],
[6727.02051, 6.79209988E-05, 9.2884984E-06],
[6820.7915, 6.60525329E-05, 8.83600478E-06],
[6915.86914, 6.42322047E-05, 8.40526263E-06],
[7012.27295, 6.24589011E-05, 7.99521149E-06],
[7110.02002, 6.0731556E-05, 7.60486455E-06],
[7209.12988, 5.90491145E-05, 7.23327912E-06],
[7309.62109, 5.74103797E-05, 6.87977217E-06],
[7411.51318, 5.58141219E-05, 6.54349424E-06],
[7514.82617, 5.42599118E-05, 6.22345487E-06],
[7619.57861, 5.27465709E-05, 5.91882872E-06],
[7725.79102, 5.12729966E-05, 5.62901778E-06],
[7833.48438, 4.98382287E-05, 5.35337404E-06],
[7942.67969, 4.84414268E-05, 5.09120491E-06],
[8053.396, 4.70816522E-05, 4.84185102E-06],
[8165.65625, 4.57575989E-05, 4.60465753E-06],
[8279.48047, 4.44685356E-05, 4.37901645E-06],
[8394.89258, 4.32135785E-05, 4.16436887E-06],
[8511.91211, 4.19918106E-05, 3.96022233E-06],
[8630.56348, 4.08023225E-05, 3.76610456E-06],
[8750.86914, 3.96442374E-05, 3.58154625E-06],
[8872.85156, 3.8516675E-05, 3.40608403E-06],
[8996.53418, 3.7418642E-05, 3.23929657E-06],
[9121.94141, 3.63491017E-05, 3.08068752E-06],
[9249.0957, 3.53075047E-05, 2.92987102E-06],
[9378.02344, 3.42929015E-05, 2.78646075E-06],
[9508.74805, 3.33043754E-05, 2.65008657E-06],
[9641.29492, 3.23410495E-05, 2.52041991E-06],
[9775.68848, 3.14020544E-05, 2.39715382E-06],
[9911.95703, 3.04863024E-05, 2.27996406E-06],
[10050.125, 2.95921345E-05, 2.1686044E-06],
[10190.2178, 2.87191324E-05, 2.06276309E-06],
[10332.2637, 2.78660864E-05, 1.96214182E-06],
[10476.29, 2.70316359E-05, 1.86649299E-06],
[10622.3232, 2.62142294E-05, 1.77557763E-06],
[10770.3926, 2.54120114E-05, 1.68915096E-06],
[10920.5264, 2.462269E-05, 1.6070004E-06],
[11072.7529, 2.38396115E-05, 1.52892721E-06],
[11227.1006, 2.30578098E-05, 1.4547054E-06],
[11383.6006, 2.22663057E-05, 1.38415396E-06],
[11542.2822, 2.14397242E-05, 1.31709339E-06],
[11703.1748, 2.04854259E-05, 1.25335453E-06],
[11866.3105, 1.7929231E-05, 1.19275307E-06],
[12031.7207, 1.91125182E-05, 2.82612041E-06],
[12199.4365, 1.90790906E-05, 2.68429517E-06],
[12369.4902, 1.87949627E-05, 2.54955216E-06],
[12541.9141, 1.84239525E-05, 2.42158762E-06],
[12716.7412, 1.80098777E-05, 2.30005003E-06],
[12894.0059, 1.75698424E-05, 2.18462287E-06],
[13073.7412, 1.71102602E-05, 2.0749892E-06],
[13255.9824, 1.6630358E-05, 1.97084046E-06],
[13440.7637, 1.61163425E-05, 1.87194019E-06],
[13628.1211, 1.5447029E-05, 1.77798802E-06],
[13818.0889, 1.44683399E-05, 2.35606217E-06],
[14010.7061, 1.48751942E-05, 2.23948064E-06],
[14206.0068, 1.44834885E-05, 2.1286653E-06],
[14404.0312, 1.37876641E-05, 2.32722232E-06],
[14604.8164, 1.40040975E-05, 2.21812297E-06],
[14808.3994, 1.37464795E-05, 2.11415386E-06],
[15014.8203, 1.34591846E-05, 2.01477314E-06],
[15224.1182, 1.31634242E-05, 1.91983827E-06],
[15436.335, 1.28640968E-05, 1.82917256E-06],
[15651.5098, 1.25641982E-05, 1.74259492E-06],
[15869.6816, 1.22656129E-05, 1.65993981E-06],
[16090.8965, 1.19695715E-05, 1.58105388E-06],
[16315.1953, 1.16769697E-05, 1.50576795E-06],
[16542.6191, 1.13880951E-05, 1.43390128E-06],
[16773.2148, 1.11037198E-05, 1.36533504E-06],
[17007.0254, 1.08242912E-05, 1.29992418E-06],
[17244.0938, 1.0550003E-05, 1.23753762E-06],
[17484.4668, 1.0281E-05, 1.17805041E-06],
[17728.1914, 1.00173947E-05, 1.12133057E-06],
[17975.3125, 9.75925104E-06, 1.06724758E-06],
[18225.8789, 9.50651884E-06, 1.01569401E-06],
[18479.9375, 9.25921177E-06, 9.66546622E-07],
[18737.5371, 9.01743533E-06, 9.19706963E-07],
[18998.7285, 8.78115588E-06, 8.75077035E-07],
[19263.5605, 8.55033068E-06, 8.32554178E-07],
[19532.084, 8.3249015E-06, 7.92044773E-07],
[19804.3516, 8.10480287E-06, 7.53460426E-07],
[20080.4141, 7.88996203E-06, 7.1670388E-07],
[20360.3223, 7.68024165E-06, 6.81694075E-07],
[20644.1348, 7.47564627E-06, 6.48351659E-07],
[20931.9023, 7.27609267E-06, 6.16605632E-07],
[21223.6816, 7.08149082E-06, 5.86383294E-07],
[21519.5293, 6.89175022E-06, 5.57611429E-07],
[21819.5, 6.70677809E-06, 5.30223588E-07],
[22123.6523, 6.52648259E-06, 5.04154116E-07],
[22432.0449, 6.35073047E-06, 4.79338439E-07],
[22744.7324, 6.17946671E-06, 4.55721789E-07],
[23061.7832, 6.01259899E-06, 4.33249824E-07],
[23383.252, 5.85004E-06, 4.11867376E-07],
[23709.2012, 5.69169288E-06, 3.91522946E-07],
[24039.6953, 5.53745667E-06, 3.72168103E-07],
[24374.7949, 5.38724771E-06, 3.5375524E-07],
[24714.5664, 5.2409564E-06, 3.36239481E-07],
[25059.0742, 5.09849087E-06, 3.195793E-07],
[25408.3848, 4.95978065E-06, 3.03732264E-07],
[25762.5645, 4.82473661E-06, 2.88660743E-07],
[26121.6797, 4.69327006E-06, 2.74327931E-07],
[26485.8008, 4.56529369E-06, 2.60697846E-07],
[26855., 4.44071657E-06, 2.47738768E-07],
[27229.3438, 4.31945546E-06, 2.35416152E-07],
[27608.9062, 4.20143078E-06, 2.23699431E-07],
[27993.7598, 4.08656342E-06, 2.12559627E-07],
[28383.9766, 3.97478198E-06, 2.0196974E-07],
[28779.6348, 3.86601778E-06, 1.91902828E-07],
[29180.8066, 3.76032222E-06, 1.8233338E-07],
[29587.5723, 3.65740061E-06, 1.73237069E-07],
[30000.0059, 3.55550719E-06, 1.64591498E-07]]),
'Ir' : numpy.array([[30., 0.382560968, 0.510462046],
[30.4181843, 0.382750332, 0.490292817],
[30.8421955, 0.381290048, 0.470920324],
[31.2721214, 0.379120499, 0.452312291],
[31.7080383, 0.376925349, 0.434441537],
[32.150032, 0.375724018, 0.415502936],
[32.5981827, 0.372667342, 0.393861353],
[33.0525818, 0.3665663, 0.372298717],
[33.5133171, 0.357777506, 0.351916134],
[33.9804764, 0.346969247, 0.333494574],
[34.4541473, 0.334692717, 0.318296671],
[34.9344177, 0.323916674, 0.304725826],
[35.4213829, 0.313165724, 0.291733176],
[35.9151382, 0.301930398, 0.279294193],
[36.4157791, 0.289861679, 0.268692076],
[36.9233932, 0.278274477, 0.259898156],
[37.4380875, 0.267920673, 0.25211969],
[37.9599533, 0.258289635, 0.24457407],
[38.4890938, 0.248987883, 0.237253889],
[39.0256119, 0.239589691, 0.230153188],
[39.5696068, 0.229955137, 0.223434657],
[40.1211853, 0.22053729, 0.218160704],
[40.6804543, 0.211778849, 0.213563561],
[41.2475166, 0.203661174, 0.209063441],
[41.8224831, 0.195871159, 0.204658031],
[42.405468, 0.188262433, 0.200345516],
[42.9965744, 0.180541009, 0.196123883],
[43.5959282, 0.172595605, 0.192440212],
[44.2036324, 0.16497086, 0.189108029],
[44.8198051, 0.157440081, 0.185916394],
[45.4445686, 0.149644941, 0.182778478],
[46.0780411, 0.141252339, 0.17985028],
[46.7203445, 0.131770685, 0.177460566],
[47.3715973, 0.119064771, 0.17542316],
[48.0319328, 0.101259857, 0.18233107],
[48.7014732, 0.0937416628, 0.196830064],
[49.3803444, 0.0967554376, 0.215103939],
[50.0686798, 0.106974699, 0.223752931],
[50.7666092, 0.117786579, 0.219126746],
[51.4742699, 0.122661784, 0.214596227],
[52.1917915, 0.125372499, 0.210160419],
[52.9193192, 0.126870394, 0.205815613],
[53.6569862, 0.127688512, 0.20156014],
[54.4049339, 0.127996936, 0.197393194],
[55.163311, 0.127900422, 0.193312213],
[55.9322586, 0.127458349, 0.189314991],
[56.7119217, 0.126692161, 0.185400382],
[57.5024567, 0.12558192, 0.181567967],
[58.3040123, 0.123890027, 0.177814037],
[59.1167336, 0.12111818, 0.174802601],
[59.9407921, 0.118741453, 0.173566192],
[60.7763329, 0.117135741, 0.173008338],
[61.6235237, 0.116037883, 0.172622234],
[62.4825172, 0.115406178, 0.172873974],
[63.3534927, 0.116084509, 0.174368411],
[64.2366028, 0.118819058, 0.175876856],
[65.1320343, 0.124322928, 0.175379485],
[66.0399323, 0.129360929, 0.173348844],
[66.960495, 0.135074094, 0.170606419],
[67.8938904, 0.140986279, 0.165474132],
[68.8402939, 0.145844921, 0.157734275],
[69.7998886, 0.149296522, 0.150164276],
[70.7728577, 0.152850837, 0.141594574],
[71.7593994, 0.153600723, 0.131851643],
[72.7596817, 0.152858958, 0.122712426],
[73.7739182, 0.151138082, 0.114206612],
[74.8022842, 0.148742929, 0.106392302],
[75.844986, 0.145890653, 0.0992223099],
[76.9022217, 0.14273943, 0.092549935],
[77.9741974, 0.139319792, 0.0863260403],
[79.0611115, 0.135596335, 0.0805203095],
[80.1631851, 0.13161467, 0.0751055628],
[81.2806168, 0.127234384, 0.0701379031],
[82.4136276, 0.122814655, 0.0661047772],
[83.5624237, 0.118677497, 0.0627902821],
[84.7272415, 0.114894234, 0.0596419685],
[85.9082947, 0.111310229, 0.0566515662],
[87.1058121, 0.107837372, 0.053811077],
[88.320015, 0.104443975, 0.0511129759],
[89.5511475, 0.101099603, 0.0485501103],
[90.7994385, 0.0977524668, 0.0461158417],
[92.0651398, 0.0944425687, 0.0439386815],
[93.3484802, 0.0912626758, 0.042052608],
[94.649704, 0.0882748365, 0.0402622335],
[95.9690704, 0.0854120702, 0.0385480598],
[97.3068314, 0.0826460049, 0.036906831],
[98.6632385, 0.079962723, 0.0353354961],
[100.038544, 0.077354975, 0.0338310711],
[101.433029, 0.0748210773, 0.0323877521],
[102.846954, 0.0723493174, 0.0309909545],
[104.280579, 0.0699169561, 0.0296282824],
[105.734192, 0.06751322, 0.0283253342],
[107.208076, 0.0651270449, 0.0270798225],
[108.702492, 0.0627477542, 0.0258890297],
[110.217751, 0.06032186, 0.024817083],
[111.754128, 0.0579578504, 0.0238846596],
[113.311913, 0.0557114147, 0.0230465867],
[114.891418, 0.0535378605, 0.0222379137],
[116.492943, 0.0513820127, 0.0214610212],
[118.116798, 0.0490898527, 0.0208237302],
[119.763275, 0.0469982438, 0.0204020403],
[121.432716, 0.04506227, 0.0200180486],
[123.125427, 0.0432211272, 0.0196412876],
[124.841721, 0.0414179638, 0.0192716122],
[126.581955, 0.0396303236, 0.0189522915],
[128.346436, 0.037893828, 0.0187846497],
[130.135513, 0.0363188237, 0.0186778046],
[131.949539, 0.0348493159, 0.0185714737],
[133.788834, 0.0334570557, 0.0184659138],
[135.653778, 0.0321289711, 0.0183609519],
[137.544724, 0.0308521856, 0.0182565842],
[139.462021, 0.0296051782, 0.0181628596],
[141.406052, 0.0283748601, 0.018105628],
[143.377167, 0.0272418112, 0.0181231815],
[145.375778, 0.0261932202, 0.0181409195],
[147.402222, 0.0252082031, 0.0181586165],
[149.45694, 0.0242803507, 0.0181763433],
[151.540268, 0.0234073363, 0.018193299],
[153.652664, 0.0225845147, 0.018206818],
[155.794495, 0.0218049269, 0.0182149597],
[157.966202, 0.0210659653, 0.0182232857],
[160.168167, 0.0203722883, 0.0182317197],
[162.400818, 0.0197206829, 0.0182400551],
[164.664597, 0.0191128068, 0.0182482675],
[166.95993, 0.0185729023, 0.0182500724],
[169.287262, 0.0180800203, 0.0182124097],
[171.647034, 0.0175886527, 0.0181176178],
[174.039703, 0.0170894675, 0.0180232991],
[176.465729, 0.0165994577, 0.0179296322],
[178.925552, 0.0161197502, 0.0178362355],
[181.419678, 0.0156447645, 0.0177433696],
[183.948563, 0.0151728299, 0.0176745877],
[186.512726, 0.0147241652, 0.0176351927],
[189.112595, 0.0143197253, 0.0176124796],
[191.748734, 0.0139609398, 0.0175884422],
[194.4216, 0.0136473412, 0.0175493862],
[197.131729, 0.0133422995, 0.0174819957],
[199.879639, 0.0130483387, 0.01741294],
[202.665863, 0.0127689736, 0.0173441563],
[205.490906, 0.0125064775, 0.017275624],
[208.355347, 0.0122621348, 0.0172074009],
[211.259705, 0.0120368833, 0.0171395633],
[214.204544, 0.0118322773, 0.0170717873],
[217.190445, 0.011665835, 0.0170044713],
[220.217957, 0.0115283858, 0.0169102699],
[223.287674, 0.01139249, 0.0167907067],
[226.400192, 0.01125581, 0.0166620128],
[229.556091, 0.0111241564, 0.0165343452],
[232.755981, 0.0110025592, 0.0164076537],
[236.000458, 0.0108945193, 0.0162818972],
[239.290176, 0.0108075701, 0.0161572099],
[242.625763, 0.0107466131, 0.0160333812],
[246.007828, 0.0107569434, 0.0158781987],
[249.437057, 0.0107690739, 0.0156499315],
[252.914062, 0.0107366331, 0.0153759867],
[256.439545, 0.0106628677, 0.0151068484],
[260.01416, 0.0105769811, 0.0148424571],
[263.638641, 0.0104823206, 0.01458262],
[267.313599, 0.0103826942, 0.0143273249],
[271.039825, 0.0102831274, 0.0140751852],
[274.817963, 0.010181047, 0.0138233416],
[278.648773, 0.0100753913, 0.013573519],
[282.53299, 0.00996799953, 0.0133281723],
[286.471375, 0.00986275263, 0.0130860694],
[290.4646, 0.00976337958, 0.0128399935],
[294.513519, 0.00965594407, 0.0125856586],
[298.618896, 0.00953903608, 0.0123358592],
[302.781494, 0.00941686705, 0.0120910387],
[307.002106, 0.00929041766, 0.0118510742],
[311.281525, 0.00915712025, 0.0116158593],
[315.620605, 0.00901611615, 0.011389968],
[320.020203, 0.00887788087, 0.0111911781],
[324.48111, 0.00875185989, 0.0109988088],
[329.004181, 0.00863394421, 0.0108097745],
[333.590363, 0.00852184184, 0.0106239785],
[338.240417, 0.00841491576, 0.0104413806],
[342.955322, 0.0083131846, 0.0102619063],
[347.735901, 0.00821710285, 0.010085511],
[352.58316, 0.00812787842, 0.00991214812],
[357.497986, 0.00805322547, 0.00974179711],
[362.481323, 0.00798339397, 0.00955115352],
[367.534119, 0.00790747162, 0.00935510825],
[372.657349, 0.00782479066, 0.00916082039],
[377.85199, 0.0077383928, 0.00897055399],
[383.119019, 0.0076512401, 0.00878423639],
[388.459473, 0.00756413257, 0.00860180706],
[393.87439, 0.00747790281, 0.00842315797],
[399.364807, 0.00739345793, 0.0082481876],
[404.931732, 0.00731134461, 0.00807688665],
[410.576233, 0.00723261293, 0.00790914707],
[416.299469, 0.00715993857, 0.00774487481],
[422.102448, 0.00709540863, 0.00757851033],
[427.986359, 0.00703656161, 0.0074007581],
[433.95224, 0.00696994038, 0.00721171731],
[440.001282, 0.00688958354, 0.00702750031],
[446.134674, 0.00680606347, 0.00684798229],
[452.353577, 0.00672027422, 0.00667304732],
[458.659119, 0.00663290778, 0.00650257524],
[465.052582, 0.00654456392, 0.00633646129],
[471.535156, 0.0064557218, 0.00617460208],
[478.108093, 0.00636671996, 0.00601687143],
[484.772644, 0.00627796166, 0.00586316735],
[491.530151, 0.00618969975, 0.00571338367],
[498.381775, 0.00610219734, 0.00556743843],
[505.328979, 0.00601569004, 0.00542521803],
[512.372986, 0.00593041163, 0.00528663537],
| |
<reponame>virbickt/tlc-data-pipeline
import os
import time
import pyodbc
from azure.common.credentials import ServicePrincipalCredentials
from azure.mgmt.resource import ResourceManagementClient
from azure.mgmt.sql import SqlManagementClient
from dotenv import find_dotenv, load_dotenv
load_dotenv(find_dotenv())
class Database:
def __init__(
self, subscription_id: str, client_id: str, client_secret: str, tenant_id: str
) -> None:
self.subscription_id = subscription_id
self.client_id = client_id
self.client_secret = client_secret
self.tenant_id = tenant_id
def get_clients(self) -> None:
"""
Authenticates the service principal to get the clients required to create server and database.
:returns: None
:rtype: NoneType
"""
credentials = ServicePrincipalCredentials(
client_id=self.client_id, secret=self.client_secret, tenant=self.tenant_id
)
resource_client = ResourceManagementClient(credentials, self.subscription_id)
sql_client = SqlManagementClient(credentials, self.subscription_id)
return resource_client, sql_client
def create_resource_group(
self, group_name: str = "sample-rg", region: str = "northeurope"
) -> None:
"""
Creates a new resource group.
:param group_name str: the name for the new resource group that is to be created.
:param region str: region which the new resource group is going to be assigned to. To list all the available regions in the accessible format, use 'az account list-locations -o table' on Azure CLI.
:returns: None
:rtype: NoneType
"""
resource_client, _ = self.get_clients()
print(f"Creating a new resource group '{group_name}' ({region})...\n")
resource_client.resource_groups.create_or_update(
group_name, {"location": region}
)
print(f"Resource group '{group_name}' created successfully.\n")
def create_server(
self,
server_name: str = "sample-server",
administrator_login: str = "admin",
administrator_login_password: str = "please_God_just_make_this_work",
group_name: str = "tlc-data-rg",
region: str = "northeurope",
) -> None:
"""
Creates a new server.
:param server_name str: the name for the server
:param group_name str: the name for the new resource group that is to be created.
:param region str: region which the new resource group is going to be assigned to. To list all the available regions in the accessible format, use 'az account list-locations -o table' on Azure CLI.
:returns: None
:rtype: NoneType
"""
_, sql_client = self.get_clients()
print(f"Creating a new server '{server_name}' ({region})...\n")
server = sql_client.servers.create_or_update(
group_name,
server_name,
{
"location": region,
"version": "12.0", # Required for create
"administrator_login": os.getenv(
"ADMINISTRATOR_LOGIN"
), # Required for create
"administrator_login_password": os.getenv(
"ADMINISTRATOR_LOGIN_PASSWORD"
), # Required for create
},
)
print(f"Server '{server_name}' created successfully.\n")
def create_database(
self,
group_name: str = "tlc-data-rg",
server_name: str = "tlc-data-server",
database_name: str = "tlc-data-db",
region: str = "northeurope",
collation: str = "SQL_Latin1_General_CP1_CI_AS",
pricing_tier: str = "S0",
) -> None:
"""
Creates a new SQL database.
:param group_name str: the name for the new resource group that the database to be created will belong to.
:param server_name str: the name of the server that will host the database to be created.
:param database_name str: the naem for the database to be created.
:param region str: region which the new resource group is going to be assigned to. To list all the available regions in the accessible format, use 'az account list-locations -o table' on Azure CLI.
:param collation str: type of collation to be used. Collations determine sorting rules as well as case/accent sensitivity for the data which means that the results of the exactly same query might differ when it is on databases with different collations. For the types of collations available, please refer to https://docs.microsoft.com/en-us/sql/relational-databases/collations/collation-and-unicode-support?view=sql-server-ver15.
:param pricing_tier str: the pricing tier for the database to be created. Pricing tier determines fixed amount of compute resource that is to be allocated to the database for a fixed price billed hourly.
:returns: None
:rtype: NoneType
"""
_, sql_client = self.get_clients()
print(f"Creating a new database '{database_name}' ({region})...\n")
database = sql_client.databases.create_or_update(
group_name,
server_name,
database_name,
{
"location": region,
"collation": collation,
"create_mode": "default",
"requested_service_objective_name": pricing_tier,
},
)
print(f"Database '{database_name}' created successfully.\n")
def whitelist_ip(
self,
rule_name: str = "test-rule",
resource_group: str = "tlc-data-rg",
server_name: str = "tlc-data-server",
ip_address: str = "192.168.127.12",
) -> None:
"""
Add the given IP adress to the list of IP adressess that have access to the database. While the indended use case is adding a single IP address, it is originally intended to whitelist a range of IP adresses. This is useful for cases when IP adress change as it would still fall inside the range of the whitelisted IP addresses.
:param group_name str: the name for the new resource group that the database belongs to.
:param server_name str: the name of the database that the access is to be granted to.
:param ip_address str: the IP address to grant the access to the database.
:returns: None
:rtype: NoneType
"""
print(f"Creating a new firewall rule '{rule_name}'...")
os.system(
f"az sql server firewall-rule create --name {rule_name} --resource-group {resource_group} --server {server_name} --start-ip-address {ip_address} --end-ip-address {ip_address}"
)
print("\nWaiting for whitelisting of IP to take effect...")
time.sleep(60)
def encrypt_database(
self,
server_name: str = "sample-server",
database_name: str = "sample-database",
login_username: str = "secretagent",
login_password: str = "<PASSWORD>",
encryption_password: str = "<PASSWORD>",
driver: int = 17,
) -> None:
"""
Encrypt the database.
:param server_name str: the name of the server that hosts the database to be encrypted.
:param database_name str: the name of the database that is to be encrypted.
:param login_username str: the login username for the database which was set when creating the database.
:param login_password str: the password for the database which was set when creating the database.
:param encryption_password str: the password used for the encryption of the database.
:param driver int: ODBC driver version. The default version is ODBC Driver 17 for SQL Server.
:returns: None
:rtype: NoneType
"""
query = f"CREATE MASTER KEY ENCRYPTION BY PASSWORD = '{encryption_password}'"
with pyodbc.connect(
"DRIVER="
+ f"{{ODBC Driver {driver} for SQL Server}}"
+ ";SERVER=tcp:"
+ f"{server_name}.database.windows.net"
+ ";PORT=1433;DATABASE="
+ database_name
+ ";UID="
+ os.getenv("ADMINISTRATOR_LOGIN")
+ ";PWD="
+ os.getenv("ADMINISTRATOR_LOGIN_PASSWORD")
) as conn:
with conn.cursor() as cursor:
cursor.execute(query)
def create_credentials(
self,
server_name: str = "sample-server",
database_name: str = "sample-database",
login_username: str = "secretagent",
login_password: str = "<PASSWORD>",
driver: int = 17,
sas_token: str = "'sv=2020-08-04&ss=bfqt&srt=sco&sp=rwdlacupitfx&se=2022-01-20T04:01:24Z&st=2022-01-08T20:01:24Z&spr=https&sig=ymHDIq%2FKSemvQFeGQwR%2FFifUX3yyQNdH8N7l9QNNq7U%3D'",
) -> None:
"""
Encrypt the database.
:param server_name str: the name of the server that hosts the database to be encrypted.
:param database_name str: the name of the database that is to be encrypted.
:param login_username str: the login username for the database which was set when creating the database.
:param login_password str: the password for the database which was set when creating the database.
:param sas_token str: shared access signature (SAS) which is required to be generated using Azure Platform prior to executing the function.
:param driver int: ODBC driver version. The default version is ODBC Driver 17 for SQL Server.
:returns: None
:rtype: NoneType
"""
query = f"""
CREATE DATABASE SCOPED CREDENTIAL BlobCredential
WITH IDENTITY = 'SHARED ACCESS SIGNATURE',
SECRET = {sas_token};
"""
with pyodbc.connect(
"DRIVER="
+ f"{{ODBC Driver {driver} for SQL Server}}"
+ ";SERVER=tcp:"
+ f"{server_name}.database.windows.net"
+ ";PORT=1433;DATABASE="
+ database_name
+ ";UID="
+ os.getenv("ADMINISTRATOR_LOGIN")
+ ";PWD="
+ os.getenv("ADMINISTRATOR_LOGIN_PASSWORD")
) as conn:
with conn.cursor() as cursor:
cursor.execute(query)
def create_external_data_source(
self,
server_name: str = "sample-server",
database_name: str = "sample-database",
login_username: str = "secretagent",
login_password: str = "<PASSWORD>",
driver: int = 17,
datasource_name: str = "sample-datasource",
container_name: str = "tlc-data",
location: str = "storageassessmentmacaw",
) -> None:
"""
Creates an external data source.
:param server_name str: the name of the server that hosts the database to be encrypted.
:param database_name str: the name of the database that is to be encrypted.
:param login_username str: the login username for the database which was set when creating the database.
:param login_password str: the password for the database which was set when creating the database.
:param driver int: ODBC driver version. The default version is ODBC Driver 17 for SQL Server.
:param datasource_name: custom name for the external datasource which is to be used to upload data to the database.
:param location str: the name of the storage which is required to be created prior to executing the function.
:param container_name str: the name of the container which is to be established as an external data source.
:returns: None
:rtype: NoneType
"""
datasource_name = f"'{datasource_name}'"
location = f"'https://{location}.blob.core.windows.net/{container_name}'"
query = f"""
CREATE EXTERNAL DATA SOURCE AzureBlob
WITH (
TYPE = BLOB_STORAGE,
LOCATION | |
<reponame>ecdavis/pants<filename>pants/server.py
###############################################################################
#
# Copyright 2011-2012 Pants Developers (see AUTHORS.txt)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###############################################################################
"""
Streaming (TCP) server implementation.
Servers are one of the two main types of channels in Pants - the other
being :mod:`streams <pants.stream>`. Servers listen for connections to
your application, accept those connections and allow you to handle them
easily. Pants servers support SSL and IPv6.
Servers
=======
Writing Servers
---------------
You have two choices when writing a server application: either use
Pants' default :class:`~pants.server.Server` class without modification
or subclass :class:`~pants.server.Server` in order to implement custom
behaviour.
Pants' default :class:`~pants.server.Server` class will wrap every new
connection in an instance of a connection class which you provide (see
below). In most cases, this provides you with sufficient freedom to
implement your application logic and has the added benefit of
simplicity. To use the default server, simply instantiate
:class:`~pants.server.Server` and pass your connection class to the
constructor.
If you need to implement custom server behaviour, you can subclass
:class:`~pants.server.Server` and define your connection class as a
class attribute::
class MyServer(pants.Server):
ConnectionClass = MyConnectionClass
It is recommended that you use the default :class:`~pants.server.Server`
class where possible and try to implement your application logic in your
connection class.
Connection Classes
------------------
A connection class is a subclass of :class:`~pants.stream.Stream` which
a server will use to wrap each incoming connection. Every time a new
connection is made to the server, a new instance of your connection
class will be created to handle it. You can override the various event
handler methods of :class:`~pants.stream.Stream` to implement your
application's logic.
Running Servers
---------------
Having defined your connection class and instantiated your server, you
can start it listening for new connections with the
:meth:`~pants.server.Server.listen` method. This will bind the server
to your chosen address and once the :mod:`~pants.engine` is started,
the server will begin accepting new connections. Once the server has
started listening for connections it can be stopped using the
:meth:`~pants.server.Server.close` method. When
:meth:`~pants.server.Server.close` is called, the default server
implementation will close any connections that were made to it which are
still open.
SSL
===
Pants servers have SSL support. If you want to start an SSL-enabled
server, call the :meth:`~pants.server.Server.startSSL` method before
calling the :meth:`~pants.server.Server.listen` method. When you call
:meth:`~pants.server.Server.startSSL` you must provide a dictionary of
SSL options as detailed in the method documentation. It is also
possible to pass the SSL options dictionary directly to the
:class:`~pants.server.Server` constructor in order to enable SSL.
Here is an example of how you might start an SSL-enabled server::
server = pants.Server(MyConnectionClass)
server.startSSL({
'certfile': '/home/user/certfile.pem',
'keyfile': '/home/user/keyfile.pem'
})
server.listen(('', 8080))
If you are writing an SSL-enabled application you should read the
entirety of Python's :mod:`ssl` documentation. Pants does not override
any of Python's SSL defaults unless clearly stated in this documentation.
"""
###############################################################################
# Imports
###############################################################################
import socket
import ssl
import weakref
from pants._channel import _Channel, HAS_IPV6, sock_type
from pants.stream import Stream
###############################################################################
# Logging
###############################################################################
import logging
log = logging.getLogger("pants")
###############################################################################
# Server Class
###############################################################################
class Server(_Channel):
"""
A stream-oriented server channel.
A :class:`~pants.server.Server` instance represents a local server
capable of listening for connections from remote hosts over a
connection-oriented protocol such as TCP/IP.
================= ================================================
Keyword Argument Description
================= ================================================
engine *Optional.* The engine to which the channel
should be added. Defaults to the global engine.
socket *Optional.* A pre-existing socket to wrap. This
can be a regular :py:obj:`~socket.socket` or an
:py:obj:`~ssl.SSLSocket`. If a socket is not
provided, a new socket will be created for the
channel when required.
ssl_options *Optional.* If provided,
:meth:`~pants.server.Server.startSSL` will be
called with these options once the server is
ready. By default, SSL will not be enabled.
================= ================================================
"""
ConnectionClass = Stream
def __init__(self, ConnectionClass=None, **kwargs):
sock = kwargs.get("socket", None)
if sock and sock_type(sock) != socket.SOCK_STREAM:
raise TypeError("Cannot create a %s with a socket type other than SOCK_STREAM."
% self.__class__.__name__)
_Channel.__init__(self, **kwargs)
# Socket
self._remote_address = None
self._local_address = None
self._slave = None
# Channel state
self.listening = False
# SSL state
self.ssl_enabled = False
self._ssl_options = None
if kwargs.get("ssl_options", None) is not None:
self.startSSL(kwargs["ssl_options"])
# Connection class
if ConnectionClass is not None:
self.ConnectionClass = ConnectionClass
self.channels = weakref.WeakValueDictionary()
##### Properties ##########################################################
@property
def remote_address(self):
"""
"""
return self._remote_address or self._socket.getpeername()
@remote_address.setter
def remote_address(self, val):
self._remote_address = val
@property
def local_address(self):
"""
"""
return self._local_address or self._socket.getsockname()
@local_address.setter
def local_address(self, val):
self._local_address = val
##### Control Methods #####################################################
def startSSL(self, ssl_options={}):
"""
Enable SSL on the channel.
Enabling SSL on a server channel will cause any new connections
accepted by the server to be automatically wrapped in an SSL
context before being passed to
:meth:`~pants.server.Server.on_accept`. If an error occurs while
a new connection is being wrapped,
:meth:`~pants.server.Server.on_ssl_wrap_error` is called.
SSL is enabled immediately. Typically, this method is called
before :meth:`~pants.server.Server.listen`. If it is called
afterwards, any connections made in the meantime will not have
been wrapped in SSL contexts.
The SSL options argument will be passed through to each
invocation of :func:`ssl.wrap_socket` as keyword arguments - see
the :mod:`ssl` documentation for further information. You will
typically want to provide the ``keyfile``, ``certfile`` and
``ca_certs`` options. The ``do_handshake_on_connect`` option
**must** be ``False`` and the ``server_side`` option **must** be
true, or a :exc:`ValueError` will be raised.
Attempting to enable SSL on a closed channel or a channel that
already has SSL enabled on it will raise a :exc:`RuntimeError`.
Returns the channel.
============ ===================================================
Arguments Description
============ ===================================================
ssl_options *Optional.* Keyword arguments to pass to
:func:`ssl.wrap_socket`.
============ ===================================================
"""
if self.ssl_enabled:
raise RuntimeError("startSSL() called on SSL-enabled %r." % self)
if self._closed:
raise RuntimeError("startSSL() called on closed %r." % self)
if ssl_options.setdefault("server_side", True) is not True:
raise ValueError("SSL option 'server_side' must be True.")
if ssl_options.setdefault("do_handshake_on_connect", False) is not False:
raise ValueError("SSL option 'do_handshake_on_connect' must be False.")
self.ssl_enabled = True
self._ssl_options = ssl_options
return self
def listen(self, address, backlog=1024, slave=True):
"""
Begin listening for connections made to the channel.
The given ``address`` is resolved, the channel is bound to the
address and then begins listening for connections. Once the
channel has begun listening,
:meth:`~pants.server.Server.on_listen` will be called.
Addresses can be represented in a number of different ways. A
single string is treated as a UNIX address. A single integer is
treated as a port and converted to a 2-tuple of the form
``('', port)``. A 2-tuple is treated as an IPv4 address and a
4-tuple is treated as an IPv6 address. See the :mod:`socket`
documentation for further information on socket addresses.
If no socket exists on the channel, one will be created with a
socket family appropriate for the given address.
An error will occur if the given address is not of a valid
format or of an inappropriate format for the socket (e.g. if an
IP address is given to a UNIX socket).
Calling :meth:`listen()` on a closed channel or a channel that
is already listening will raise a :exc:`RuntimeError`.
Returns the channel.
=============== ================================================
Arguments Description
=============== ================================================
address The local address to listen for connections on.
backlog *Optional.* The maximum size of the
connection queue.
slave *Optional.* If True, this will cause a
Server listening on IPv6 INADDR_ANY to
create a slave Server that listens on the
IPv4 INADDR_ANY.
=============== ================================================
"""
if self.listening:
raise RuntimeError("listen() called on active %r." % self)
if self._closed:
raise RuntimeError("listen() called on closed %r." % self)
address, family, resolved = self._format_address(address)
if not family:
raise ValueError("Unable to determine address family from "
"address: %s" % repr(address))
self._do_listen(address, family, backlog, slave)
return self
def close(self):
"""
Close the channel.
The channel will be closed immediately and will cease to accept
new connections. Any connections accepted by this channel will
remain open and will need to be closed separately. If this
channel has an IPv4 slave (see
:meth:`~pants.server.Server.listen`) it will be closed.
Once closed, a channel cannot be re-opened.
"""
| |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# Copyright (c) The Lab of Professor <NAME> (<EMAIL>),
# School of Computer Science and Engineering, South China University of Technology.
# A-Tune is licensed under the Mulan PSL v2.
# You can use this software according to the terms and conditions of the Mulan PSL v2.
# You may obtain a copy of Mulan PSL v2 at:
# http://license.coscl.org.cn/MulanPSL2
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR
# PURPOSE.
# See the Mulan PSL v2 for more details.
# Create: 2020-01-04
x1 = 3
x2 = 1
x3 = 5
x4 = 3
x5 = 3
x6 = 3
x7 = 3
x8 = 3
x9 = 3
x10 = 2
x11 = 2
x12 = 4
x13 = 4
x14 = 2
x15 = 2
x16 = 1
x17 = 2
x18 = 5
x19 = 1
x20 = 1
x21 = 1
x22 = 1
x23 = 1
x24 = 2
x25 = 4
x26 = 2
x27 = 3
x28 = 1
x29 = 2
x30 = 4
x31 = 4
x32 = 1
x33 = 4
x34 = 1
x35 = 2
x36 = 1
x37 = 3
x38 = 2
x39 = 1
x40 = 2
x41 = 3
x42 = 3
x43 = 2
x44 = 2
x45 = 2
x46 = 4
x47 = 4
x48 = 2
x49 = 2
x50 = 2
x51 = 2
x52 = 1
x53 = 4
x54 = 3
x55 = 3
x56 = 1
x57 = 2
x58 = 3
x59 = 3
x60 = 3
x61 = 1
x62 = 3
x63 = 3
x64 = 4
x65 = 3
x66 = 2
x67 = 3
x68 = 3
x69 = 3
x70 = 2
x71 = 4
x72 = 1
x73 = 3
x74 = 2
x75 = 3
x76 = 1
x77 = 3
x78 = 1
x79 = 4
x80 = 2
x81 = 1
x82 = 1
x83 = 2
x84 = 4
x85 = 5
x86 = 3
x87 = 4
x88 = 2
x89 = 2
x90 = 1
x91 = 2
x92 = 1
x93 = 2
x94 = 1
x95 = 2
x96 = 3
x97 = 3
x98 = 2
x99 = 2
x100 = 3
x101 = 4
x102 = 3
x103 = 2
x104 = 2
x105 = 3
x106 = 5
x107 = 4
x108 = 2
x109 = 1
x110 = 4
x111 = 3
x112 = 4
x113 = 2
x114 = 2
x115 = 4
x116 = 4
x117 = 2
x118 = 3
x119 = 2
x120 = 4
x121 = 3
x122 = 2
x123 = 4
x124 = 4
x125 = 3
x126 = 4
x127 = 1
x128 = 3
x129 = 3
x130 = 5
x131 = 4
x132 = 3
x133 = 1
x134 = 2
x135 = 1
x136 = 1
x137 = 4
x138 = 4
x139 = 3
x140 = 1
x141 = 4
x142 = 1
x143 = 1
x144 = 4
x145 = 5
x146 = 4
x147 = 1
x148 = 4
x149 = 3
x150 = 3
y = 1 * x147 ** 1 + 2 * x80 ** 1 + 3 * x55 ** 1 + 4 * x81 ** 1 + 5 * x87 ** 1 + 1 * x82 ** 2 + 2 * x88 ** 2 + \
3 * x83 ** 2 + 4 * x144 ** 2 + 5 * x38 ** 2 + 1 * x135 ** 3 + 2 * x125 ** 3 + 3 * x14 ** 3 + 4 * x65 ** 3 + \
5 * x95 ** 3 + 1 * x73 ** 4 + 2 * x37 ** 4 + 3 * x105 ** 4 + 4 * x28 ** 4 + 5 * x121 ** 4 + 1 * x100 ** 5 + \
2 * x141 ** 5 + 3 * x69 ** 5 + 4 * x97 ** 5 + 5 * x53 ** 5 + 1 * x126 ** 6 + 2 * x104 ** 6 + 3 * x103 ** 6 + \
4 * x27 ** 6 + 5 * x10 ** 6 + 1 * x140 ** 7 + 2 * x54 ** 7 + 3 * x5 ** 7 + 4 * x70 ** 7 + 5 * x114 ** 7 + \
1 * x57 ** 8 + 2 * x74 ** 8 + 3 * x26 ** 8 + 4 * x19 ** 8 + 5 * x111 ** 8 + 1 * x108 ** 9 + 2 * x48 ** 9 + \
3 * x11 ** 9 + 4 * x59 ** 9 + 5 * x123 ** 9 + 1 * x61 ** 10 + 2 * x6 ** 10 + 3 * x79 ** 10 + 4 * x71 ** 10 + \
5 * x98 ** 10 + 1 * x34 ** 11 + 2 * x112 ** 11 + 3 * x25 ** 11 + 4 * x93 ** 11 + 5 * x86 ** 11 + 1 * x64 ** 12 + \
2 * x120 ** 12 + 3 * x20 ** 12 + 4 * x16 ** 12 + 5 * x94 ** 12 + 1 * x76 ** 13 + 2 * x21 ** 13 + 3 * x129 ** 13 + \
4 * x146 ** 13 + 5 * x77 ** 13 + 1 * x46 ** 14 + 2 * x91 ** 14 + 3 * x31 ** 14 + 4 * x67 ** 14 + 5 * x150 ** 14 + \
1 * x72 ** 15 + 2 * x84 ** 15 + 3 * x136 ** 15 + 4 * x15 ** 15 + 5 * x149 ** 15 + 1 * x2 ** 16 + 2 * x116 ** 16 + \
3 * x66 ** 16 + 4 * x42 ** 16 + 5 * x45 ** 16 + 1 * x63 ** 17 + 2 * x85 ** 17 + 3 * x143 ** 17 + 4 * x4 ** 17 + \
5 * x29 ** 17 + 1 * x113 ** 18 + 2 * x50 ** 18 + 3 * x132 ** 18 + 4 * x127 ** 18 + 5 * x30 ** 18 + 1 * x109 ** 19 +\
2 * x131 ** 19 + 3 * x36 ** 19 + 4 * x9 ** 19 + 5 * x43 ** 19 + 1 * x119 ** 20 + 2 * x8 ** 20 + 3 * x68 ** 20 + \
4 * x107 ** 20 + 5 * x12 ** 20 + 1 * x32 ** 21 + 2 * x122 ** 21 + 3 * x115 ** 21 + 4 * x75 ** 21 + 5 * x49 ** 21 + \
1 * x110 ** 22 + 2 * x40 ** 22 + 3 * x17 ** 22 + 4 * x134 ** 22 + 5 * x128 ** 22 + 1 * x18 ** 23 + 2 * x142 ** 23 +\
3 * x133 ** 23 + 4 * x24 ** 23 + 5 * x102 ** 23 + 1 * x145 ** 24 + 2 * x33 ** 24 + 3 * x106 ** 24 + 4 * x58 ** 24 +\
5 * x47 ** 24 + 1 * x22 ** 25 + 2 * x118 ** 25 + 3 * x44 ** 25 + 4 * x35 ** 25 + 5 * x90 ** 25 + 1 * x96 ** 26 + \
2 * x62 ** 26 + 3 * x78 ** 26 + 4 * x39 ** 26 + 5 * x99 ** 26 + 1 * x117 ** 27 + 2 * x1 ** 27 + 3 * x3 ** 27 + \
4 * x7 ** 27 + 5 * x52 ** 27 + 1 * x60 ** 28 + 2 * x124 ** 28 + 3 * x139 ** 28 + 4 * x101 ** 28 + 5 * x23 ** 28 + \
1 * x92 ** 29 + 2 * x148 ** 29 + 3 * x137 ** 29 + 4 * x89 ** | |
from copy import deepcopy
from json import dumps
from re import IGNORECASE, escape, findall
from typing import (
Dict,
Iterable,
Iterator,
List,
MutableMapping,
Optional,
Tuple,
Type,
Union,
)
from kiss_headers.structures import CaseInsensitiveDict
from kiss_headers.utils import (
extract_comments,
header_content_split,
header_name_to_class,
header_strip,
is_legal_header_name,
normalize_str,
prettify_header_name,
unfold,
unpack_protected_keyword,
unquote,
)
OUTPUT_LOCK_TYPE: bool = False
class Header(object):
"""
Object representation of a single Header.
"""
# Most common attribute that are associated with value in headers.
# Used for type hint, auto completion purpose
charset: str
format: str
boundary: str
expires: str
timeout: str
max: str
path: str
samesite: str
domain: str
filename: str
def __init__(self, name: str, content: str):
"""
:param name: The name of the header, should contain only ASCII characters with no spaces in it.
:param content: Initial content associated with the header.
"""
if not is_legal_header_name(name):
raise ValueError(
f"'{name}' is not a valid header name. Cannot proceed with it."
)
self._name: str = name
self._normalized_name: str = normalize_str(self._name)
self._pretty_name: str = prettify_header_name(self._name)
self._content: str = content
self._members: List[str] = header_content_split(self._content, ";")
self._not_valued_attrs: List[str] = list()
self._valued_attrs: MutableMapping[
str, Union[str, List[str]]
] = CaseInsensitiveDict()
for member in self._members:
if member == "":
continue
if "=" in member:
key, value = tuple(member.split("=", maxsplit=1))
# avoid confusing base64 look alike single value for (key, value)
if value.count("=") == len(value) or len(value) == 0 or " " in key:
self._not_valued_attrs.append(unquote(member))
continue
if key not in self._valued_attrs:
self._valued_attrs[key] = value
else:
if isinstance(self._valued_attrs[key], str):
self._valued_attrs[key] = [self._valued_attrs[key], value] # type: ignore
else:
self._valued_attrs[key].append(value) # type: ignore
continue
self._not_valued_attrs.append(unquote(member))
@property
def name(self) -> str:
"""
Output the original header name as it was captured initially
"""
return self._name
@property
def normalized_name(self) -> str:
"""
Output header name but normalized, lower case and '-' character become '_'.
"""
return self._normalized_name
@property
def pretty_name(self) -> str:
"""
Output a prettified name of the header. First letter capitalized of each word.
"""
return self._pretty_name
@property
def content(self) -> str:
"""
Output associated content to header as it was captured initially.
>>> header = Header("ETag", '"33a64df551425fcc55e4d42a148795d9f25f89d4"')
>>> header.content
'33a64df551425fcc55e4d42a148795d9f25f89d4'
"""
# Unquote content if their is only one value/attribute in it. Like the ETag header.
if len(self.attrs) == 1:
return unquote(self._content)
return self._content
@property
def unfolded_content(self) -> str:
"""Output unfolded associated content to header. Meaning that every LF + n space(s) would be properly
replaced."""
return unfold(self.content)
@property
def comments(self) -> List[str]:
"""Retrieve comments in header content."""
return extract_comments(self.content)
def __lt__(self, other: object) -> bool:
"""
This method is only implemented to make sorted work with Header.
The lower than is based on alphabetical order using the header name.
>>> Header("A", "") < Header("Z", "")
True
"""
if not isinstance(other, Header):
raise NotImplementedError
return self.normalized_name < other.normalized_name
def __gt__(self, other: object) -> bool:
"""
This method is only implemented to make sorted work with Header.
The greater than is based on alphabetical order using the header name.
>>> Header("A", "") > Header("Z", "")
False
"""
if not isinstance(other, Header):
raise NotImplementedError
return self.normalized_name > other.normalized_name
def __deepcopy__(self, memodict: Dict) -> "Header":
"""Simply provide a deepcopy of an Header object. Pointer/Reference free of the initial reference."""
return Header(deepcopy(self.name), deepcopy(self.content))
def __iadd__(self, other: Union[str, "Header"]) -> "Header":
"""
Allow you to assign-add any string to an Header instance. The string will be a new member of your header.
>>> header = Header("X-Hello-World", "")
>>> repr(header)
'X-Hello-World: '
>>> header += "preload"
>>> repr(header)
'X-Hello-World: preload'
>>> header += "inclSubDomain"
>>> repr(header)
'X-Hello-World: preload; inclSubDomain'
>>> header += 1
Traceback (most recent call last):
...
TypeError: Cannot assign-add with type <class 'int'> to an Header.
"""
if not isinstance(other, str):
raise TypeError(
"Cannot assign-add with type {type_} to an Header.".format(
type_=type(other)
)
)
self._not_valued_attrs.append(other)
self._content += "; " + other if self._content.lstrip() != "" else other
return self
def __add__(self, other: Union[str, "Header"]) -> Union["Header", "Headers"]:
"""
This implementation permit to add either a string or a Header to your Header instance.
When you add string to your Header instance, it will create another instance with a new
member in it using the string; see iadd doc about it. But when its another Header the result is an Headers
object containing both Header object.
>>> headers = Header("X-Hello-World", "1") + Header("Content-Type", "happiness=True")
>>> len(headers)
2
>>> headers.keys()
['X-Hello-World', 'Content-Type']
>>> Header("Content-Type", "happiness=True") + 1
Traceback (most recent call last):
...
TypeError: Cannot make addition with type <class 'int'> to an Header.
"""
if not isinstance(other, str) and not isinstance(other, Header):
raise TypeError(
"Cannot make addition with type {type_} to an Header.".format(
type_=type(other)
)
)
if isinstance(other, Header):
headers = Headers()
headers += self
headers += other
return headers
header_ = deepcopy(self)
header_ += other
return header_
def __isub__(self, other: str) -> "Header":
"""
This method should allow you to remove attribute or member from header.
"""
if not isinstance(other, str):
raise TypeError(
"You cannot subtract {type_} to an Header.".format(
type_=str(type(other))
)
)
if other not in self:
raise ValueError(
"You cannot subtract '{element}' from '{header_name}' Header because its not there.".format(
element=other, header_name=self.pretty_name
)
)
other = normalize_str(other)
if other in self._valued_attrs:
del self[other]
if other in self._not_valued_attrs:
self._not_valued_attrs.remove(other)
while True:
try:
self._not_valued_attrs.remove(other)
except ValueError:
break
for elem in findall(
r"{member_name}(?=[;\n])".format(member_name=escape(other)),
self._content + "\n",
IGNORECASE,
):
self._content = header_strip(self._content, elem)
return self
def __sub__(self, other: str) -> "Header":
"""
This method should allow you to remove attribute or member from header.
"""
header_ = deepcopy(self)
header_ -= other
return header_
def __setattr__(self, key: str, value: str) -> None:
"""
Set attribute on header using the property notation.
"""
# Avoid conflict with __init__ sequence of Header
if key in {
"_name",
"_normalized_name",
"_pretty_name",
"_content",
"_members",
"_not_valued_attrs",
"_valued_attrs",
"__class__",
}:
return super().__setattr__(key, value)
key = unpack_protected_keyword(key)
self[key] = value
def __setitem__(self, key: str, value: str) -> None:
"""
Set an attribute bracket syntax like. This will erase previously set attribute named after the key.
Any value that are not a str are casted to str.
"""
if key in self:
del self[key]
if not isinstance(value, str):
value = str(value)
self._valued_attrs[key] = value
self._content += '{semi_colon_r}{key}="{value}"'.format(
key=key,
value=value,
semi_colon_r="; " if self._content.lstrip() != "" else "",
)
def __delitem__(self, key: str) -> None:
"""
Remove any attribute named after the key in header using the bracket syntax.
>>> headers = Header("Content-Type", "text/html; charset=UTF-8") + Header("Allow", "POST")
>>> str(headers.content_type)
'text/html; charset=UTF-8'
>>> del headers.content_type['charset']
>>> str(headers.content_type)
'text/html'
"""
if key not in self._valued_attrs:
raise KeyError(
"'{item}' attribute is not defined within '{header}' header.".format(
item=key, header=self.name
)
)
del self._valued_attrs[key]
for elem in findall(
r"{key_name}=.*?(?=[;\n])".format(key_name=escape(key)),
self._content + "\n",
IGNORECASE,
):
self._content = header_strip(self._content, elem)
def __delattr__(self, item: str) -> None:
"""
Remove any attribute named after the key in header using the property notation.
>>> headers = Header("Content-Type", "text/html; charset=UTF-8") + Header("Vary", "Content-Type")
>>> repr(headers.content_type)
'Content-Type: text/html; charset=UTF-8'
>>> del headers.content_type.charset
>>> repr(headers.content_type)
'Content-Type: text/html'
"""
item = normalize_str(item)
if item not in self._valued_attrs:
raise AttributeError(
"'{item}' attribute is not defined within '{header}' header.".format(
item=item, header=self.name
)
)
del self[item]
def __iter__(self) -> Iterator[Tuple[str, Optional[Union[str, List[str]]]]]:
"""Provide a way to iter over an Header object. This will yield a Tuple of key, value.
Value would be None if the key is a member without associated value."""
for key in self._valued_attrs:
yield key, self[key]
for adjective in self._not_valued_attrs:
yield adjective, None
def __eq__(self, other: object) -> bool:
"""
Verify equality between a Header object and str or another Header object.
If testing against str, the first thing is to match it to raw content, if not equal verify if not in members.
"""
if isinstance(other, str):
return self.content == other or other in self._not_valued_attrs
if isinstance(other, Header):
if (
self.normalized_name == other.normalized_name
and len(self._not_valued_attrs) == len(other._not_valued_attrs)
and len(self._valued_attrs) == len(other._valued_attrs)
):
for adjective in self._not_valued_attrs:
if adjective not in other._not_valued_attrs:
return False
for key in self._valued_attrs:
if key | |
n_Ex_b = (gx_mask[occb]).sum() + (ux_mask[occb]).sum()
n_Ey_b = (gy_mask[occb]).sum() + (uy_mask[occb]).sum()
if abs(n_Ex_a - n_Ey_a) == 1 and abs(n_Ex_b - n_Ey_b) == 1:
# open-shell for both alpha det and beta det e.g. the
# valence part of O2 molecule
addr_x_a, addr_y_a = search_open_shell_det(occa)
addr_x_b, addr_y_b = search_open_shell_det(occb)
if singlet:
if wfn_lz == 0:
ci_1[addr_x_a,addr_x_b] = numpy.sqrt(.5)
ci_1[addr_y_a,addr_y_b] = numpy.sqrt(.5)
else:
ci_1[addr_x_a,addr_x_b] = numpy.sqrt(.5)
ci_1[addr_y_a,addr_y_b] =-numpy.sqrt(.5)
else:
ci_1[addr_x_a,addr_y_b] = numpy.sqrt(.5)
ci_1[addr_y_a,addr_x_b] =-numpy.sqrt(.5)
else:
# TODO: Other direct-product to direct-sum transofromation
# which involves CG coefficients.
ci_1[addra,addrb] = 1
ci0.append(ci_1.ravel())
iroot += 1
if iroot >= nroots:
break
return ci0
def _symmetrize_wfn(ci, strsa, strsb, orbsym, wfnsym=0):
ci = ci.reshape(strsa.size,strsb.size)
airreps = numpy.zeros(strsa.size, dtype=numpy.int32)
birreps = numpy.zeros(strsb.size, dtype=numpy.int32)
orbsym_in_d2h = numpy.asarray(orbsym) % 10
wfnsym_in_d2h = wfnsym % 10
for i, ir in enumerate(orbsym_in_d2h):
airreps[numpy.bitwise_and(strsa, 1 << i) > 0] ^= ir
birreps[numpy.bitwise_and(strsb, 1 << i) > 0] ^= ir
mask = (airreps.reshape(-1,1) ^ birreps) == wfnsym_in_d2h
ci1 = numpy.zeros_like(ci)
ci1[mask] = ci[mask]
ci1 *= 1/numpy.linalg.norm(ci1)
return ci1
def symmetrize_wfn(ci, norb, nelec, orbsym, wfnsym=0):
'''Symmetrize the CI wavefunction by zeroing out the determinants which
do not have the right symmetry.
Args:
ci : 2D array
CI coefficients, row for alpha strings and column for beta strings.
norb : int
Number of orbitals.
nelec : int or 2-item list
Number of electrons, or 2-item list for (alpha, beta) electrons
orbsym : list of int
The irrep ID for each orbital.
Kwags:
wfnsym : int
The irrep ID of target symmetry
Returns:
2D array which is the symmetrized CI coefficients
'''
neleca, nelecb = _unpack_nelec(nelec)
strsa = numpy.asarray(cistring.make_strings(range(norb), neleca))
strsb = numpy.asarray(cistring.make_strings(range(norb), nelecb))
return _symmetrize_wfn(ci, strsa, strsb, orbsym, wfnsym)
def _guess_wfnsym(ci, strsa, strsb, orbsym):
nb = len(strsb)
idx = abs(ci).argmax()
stra = strsa[idx // nb]
strb = strsb[idx % nb ]
orbsym_in_d2h = numpy.asarray(orbsym) % 10 # convert to D2h irreps
airrep = 0
birrep = 0
for i, ir in enumerate(orbsym_in_d2h):
if (stra & (1 << i)):
airrep ^= ir
if (strb & (1 << i)):
birrep ^= ir
return airrep ^ birrep
def guess_wfnsym(ci, norb, nelec, orbsym):
'''Guess the wavefunction symmetry based on the non-zero elements in the
given CI coefficients.
Args:
ci : 2D array
CI coefficients, row for alpha strings and column for beta strings.
norb : int
Number of orbitals.
nelec : int or 2-item list
Number of electrons, or 2-item list for (alpha, beta) electrons
orbsym : list of int
The irrep ID for each orbital.
Returns:
Irrep ID
'''
neleca, nelecb = _unpack_nelec(nelec)
strsa = numpy.asarray(cistring.make_strings(range(norb), neleca))
strsb = numpy.asarray(cistring.make_strings(range(norb), nelecb))
if isinstance(ci, numpy.ndarray) and ci.ndim <= 2:
wfnsym = _guess_wfnsym(ci, strsa, strsb, orbsym)
else:
wfnsym = [_guess_wfnsym(c, strsa, strsb, orbsym) for c in ci]
if any(wfnsym[0] != x for x in wfnsym):
warnings.warn('Different wfnsym %s found in different CI vecotrs' % wfnsym)
wfnsym = wfnsym[0]
return wfnsym
def des_a(ci0, norb, neleca_nelecb, ap_id):
r'''Construct (N-1)-electron wavefunction by removing an alpha electron from
the N-electron wavefunction.
... math::
|N-1\rangle = \hat{a}_p |N\rangle
Args:
ci0 : 2D array
CI coefficients, row for alpha strings and column for beta strings.
norb : int
Number of orbitals.
(neleca,nelecb) : (int,int)
Number of (alpha, beta) electrons of the input CI function
ap_id : int
Orbital index (0-based), for the annihilation operator
Returns:
2D array, row for alpha strings and column for beta strings. Note it
has different number of rows to the input CI coefficients
'''
neleca, nelecb = neleca_nelecb
if neleca <= 0:
return numpy.zeros_like(ci0)
if ci0.ndim == 1:
ci0 = ci0.reshape(cistring.num_strings(norb, neleca),
cistring.num_strings(norb, nelecb))
des_index = cistring.gen_des_str_index(range(norb), neleca)
na_ci1 = cistring.num_strings(norb, neleca-1)
ci1 = numpy.zeros((na_ci1, ci0.shape[1]))
entry_has_ap = (des_index[:,:,1] == ap_id)
addr_ci0 = numpy.any(entry_has_ap, axis=1)
addr_ci1 = des_index[entry_has_ap,2]
sign = des_index[entry_has_ap,3]
#print(addr_ci0)
#print(addr_ci1)
ci1[addr_ci1] = sign.reshape(-1,1) * ci0[addr_ci0]
return ci1
def des_b(ci0, norb, neleca_nelecb, ap_id):
r'''Construct (N-1)-electron wavefunction by removing a beta electron from
N-electron wavefunction.
Args:
ci0 : 2D array
CI coefficients, row for alpha strings and column for beta strings.
norb : int
Number of orbitals.
(neleca,nelecb) : (int,int)
Number of (alpha, beta) electrons of the input CI function
ap_id : int
Orbital index (0-based), for the annihilation operator
Returns:
2D array, row for alpha strings and column for beta strings. Note it
has different number of columns to the input CI coefficients.
'''
neleca, nelecb = neleca_nelecb
if nelecb <= 0:
return numpy.zeros_like(ci0)
if ci0.ndim == 1:
ci0 = ci0.reshape(cistring.num_strings(norb, neleca),
cistring.num_strings(norb, nelecb))
des_index = cistring.gen_des_str_index(range(norb), nelecb)
nb_ci1 = cistring.num_strings(norb, nelecb-1)
ci1 = numpy.zeros((ci0.shape[0], nb_ci1))
entry_has_ap = (des_index[:,:,1] == ap_id)
addr_ci0 = numpy.any(entry_has_ap, axis=1)
addr_ci1 = des_index[entry_has_ap,2]
sign = des_index[entry_has_ap,3]
# This sign prefactor accounts for interchange of operators with alpha and beta spins
if neleca % 2 == 1:
sign *= -1
ci1[:,addr_ci1] = ci0[:,addr_ci0] * sign
return ci1
def cre_a(ci0, norb, neleca_nelecb, ap_id):
r'''Construct (N+1)-electron wavefunction by adding an alpha electron in
the N-electron wavefunction.
... math::
|N+1\rangle = \hat{a}^+_p |N\rangle
Args:
ci0 : 2D array
CI coefficients, row for alpha strings and column for beta strings.
norb : int
Number of orbitals.
(neleca,nelecb) : (int,int)
Number of (alpha, beta) electrons of the input CI function
ap_id : int
Orbital index (0-based), for the creation operator
Returns:
2D array, row for alpha strings and column for beta strings. Note it
has different number of rows to the input CI coefficients.
'''
neleca, nelecb = neleca_nelecb
if neleca >= norb:
return numpy.zeros_like(ci0)
if ci0.ndim == 1:
ci0 = ci0.reshape(cistring.num_strings(norb, neleca),
cistring.num_strings(norb, nelecb))
cre_index = cistring.gen_cre_str_index(range(norb), neleca)
na_ci1 = cistring.num_strings(norb, neleca+1)
ci1 = numpy.zeros((na_ci1, ci0.shape[1]))
entry_has_ap = (cre_index[:,:,0] == ap_id)
addr_ci0 = numpy.any(entry_has_ap, axis=1)
addr_ci1 = cre_index[entry_has_ap,2]
sign = cre_index[entry_has_ap,3]
ci1[addr_ci1] = sign.reshape(-1,1) * ci0[addr_ci0]
return ci1
# construct (N+1)-electron wavefunction by adding a beta electron to
# N-electron wavefunction:
def cre_b(ci0, norb, neleca_nelecb, ap_id):
r'''Construct (N+1)-electron wavefunction by adding a beta electron in
the N-electron wavefunction.
Args:
ci0 : 2D array
CI coefficients, row for alpha strings and column for beta strings.
norb : int
Number of orbitals.
(neleca,nelecb) : (int,int)
Number of (alpha, beta) electrons of the input CI function
ap_id : int
Orbital index (0-based), for the creation operator
Returns:
2D array, row for alpha strings and column for beta strings. Note it
has different number of columns to the input CI coefficients.
'''
neleca, nelecb = neleca_nelecb
if nelecb >= norb:
return numpy.zeros_like(ci0)
if ci0.ndim == 1:
ci0 = ci0.reshape(cistring.num_strings(norb, neleca),
cistring.num_strings(norb, nelecb))
cre_index = cistring.gen_cre_str_index(range(norb), nelecb)
nb_ci1 = cistring.num_strings(norb, nelecb+1)
ci1 = numpy.zeros((ci0.shape[0], nb_ci1))
entry_has_ap = (cre_index[:,:,0] == ap_id)
addr_ci0 = numpy.any(entry_has_ap, axis=1)
addr_ci1 = cre_index[entry_has_ap,2]
sign = cre_index[entry_has_ap,3]
# This sign prefactor accounts for interchange of operators with alpha and beta spins
if neleca % 2 == 1:
sign *= -1
ci1[:,addr_ci1] = ci0[:,addr_ci0] * sign
return ci1
def det_overlap(string1, string2, norb, s=None):
'''Determinants overlap on non-orthogonal one-particle basis'''
if s is None: # orthogonal basis with s_ij = delta_ij
return float(string1 == string2)
else:
if isinstance(string1, str):
nelec = string1.count('1')
string1 = int(string1, 2)
else:
nelec = bin(string1).count('1')
if isinstance(string2, str):
assert(string2.count('1') == nelec)
string2 = int(string2, 2)
else:
assert(bin(string2).count('1') == nelec)
idx1 = [i for i in range(norb) if (1 << i & string1)]
idx2 = [i for i in range(norb) if (1 << i & string2)]
s1 = lib.take_2d(s, idx1, idx2)
return numpy.linalg.det(s1)
def overlap(bra, ket, norb, nelec, s=None):
'''Overlap between two CI wavefunctions
Args:
s : 2D array or a list of 2D array
The overlap matrix of non-orthogonal one-particle basis
'''
if s is not None:
bra = transform_ci_for_orbital_rotation(bra, norb, nelec, s)
return numpy.dot(bra.ravel().conj(), ket.ravel())
def fix_spin_(fciobj, shift=PENALTY, ss=None, **kwargs):
r'''If FCI solver cannot stay on spin eigenfunction, this function can
add a shift to the states which have wrong spin.
.. math::
(H + shift*S^2) |\Psi\rangle = E |\Psi\rangle
Args:
fciobj : An instance of :class:`FCISolver`
Kwargs:
shift : float
Level shift for states which have different spin
ss : | |
<filename>worldcupper.py<gh_stars>0
"""
https://www.fifaindex.com/teams/?type=1
https://www.fifaindex.com/teams/fifa07_3/?league=78&type=1
https://www.fifaindex.com/teams/fifa06_2/?league=78&type=1
Attacks per game - average unknown
Number of attacks,
Goals per game
http://www.slate.com/articles/sports/sports_nut/2013/08/the_numbers_game_why_soccer_teams_score_fewer_goals_than_they_did_100_years.html
"""
import csv
import copy
import random
import math
from collections import OrderedDict
AVERAGE_GOALS_PER_GAME = 2.6
AVERAGE_SHOTS_PER_GAME = 20 # Per team, shots on target about 50%. Attack and midfield + some random boost
EXTRA_ATTACK_RATIO = 15 # For better team
class WorldCupper(object):
def __init__(self, filename='worldcupper.csv'):
self.groups = {} # Team groups in WC
with open(filename, mode='r', encoding='utf-8-sig') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
row['goals_for'] = 0
row['goals_against'] = 0
row['goals_diff'] = 0
row['games_won'] = 0
row['games_lost'] = 0
row['games_tied'] = 0
row['points'] = 0
row['name'] = row['name'].capitalize()
self.groups.setdefault(row['group'], []).append(row)
self.total_group_goals = 0
self.games = 0
self.total_knockout_goals = 0
def run_group_stage(self):
# Run group stage
self.groups = OrderedDict(sorted(self.groups.items()))
for group_name, group_teams in self.groups.items():
for i, team1 in enumerate(group_teams):
for team2 in group_teams[i+1:]:
team1_score, team2_score, total_goals, _ = self.run_match(team1, team2)
self.games += 1
self.total_group_goals += total_goals
team1['goals_for'] += team1_score
team2['goals_for'] += team2_score
team1['goals_against'] += team2_score
team2['goals_against'] += team1_score
team1['goals_diff'] += team1_score - team2_score
team2['goals_diff'] += team2_score - team1_score
if team1_score > team2_score:
team1['games_won'] += 1
team1['points'] += 3
team2['games_lost'] += 1
elif team1_score == team2_score:
team1['games_tied'] += 1
team2['games_tied'] += 1
team1['points'] += 1
team2['points'] += 1
elif team1_score < team2_score:
team2['games_won'] += 1
team1['games_lost'] += 1
team2['points'] += 3
print(team1['name'], ' ', team1_score, ' vs ', team2['name'], ' ',
team2_score)
# Summary of groups
print('total goals in ', self.games, ' games ', self.total_group_goals)
print('goals per game ', self.total_group_goals/self.games)
for group_name, group_teams in self.groups.items():
print('Group {}'.format(group_name))
print('team, points, games won, games lost, games tied, goals for, goals against, goal diff')
# group_teams = sorted(group_teams, key=lambda k: k['points'], reverse=True)
# Sort groups by points,
group_teams = sorted(group_teams, key=lambda k: (k['points'], k['goals_diff']), reverse=True)
for i, team in enumerate(group_teams):
print(team['name'], team['points'], team['games_won'], team['games_lost'], team['games_tied'],
team['goals_for'], team['goals_against'], team['goals_diff'])
print('')
def run_round(self, current_round_matches):
current_round_matches = copy.deepcopy(current_round_matches)
next_round_matches = []
next_match = []
round_total_goals = 0
for i, match_teams in enumerate(current_round_matches):
team1_score, team2_score, total_goals, winning_team = self.run_match(match_teams[0], match_teams[1],
do_penalties=True)
# Save scores in original dict
match_teams[0]['score'] = team1_score
match_teams[1]['score'] = team2_score
round_total_goals += total_goals
# print(match_teams[0]['name'], match_teams[0]['score'], match_teams[1]['name'], match_teams[1]['score'])
# winning_team['score'] = None
next_match.append(winning_team)
if i % 2:
next_round_matches.append(next_match)
next_match = []
# Single winner edge case for round of 2 teams (final)
final_winner = None
if not next_round_matches:
final_winner = next_match[0]
return next_round_matches, current_round_matches, final_winner, round_total_goals
def run_knockout_stage(self):
groups = self.groups
# Final 16 teams
final_16_matches = [
[groups['a'][0], groups['b'][1]], [groups['c'][0], groups['d'][1]],
[groups['e'][0], groups['f'][1]], [groups['g'][0], groups['h'][1]],
[groups['b'][0], groups['a'][1]], [groups['d'][0], groups['c'][1]],
[groups['f'][0], groups['e'][1]], [groups['h'][0], groups['g'][1]]
]
# Final 16
# print('')
# print("Final 16")
quarter_final_matches, final_16_matches, _, total_goals = self.run_round(final_16_matches)
self.total_knockout_goals += total_goals
# Semi finals
# print('')
# print("Semi Finals")
semi_final_matches, quarter_final_matches, _, total_goals = self.run_round(quarter_final_matches)
self.total_knockout_goals += total_goals
# Quarter finals
# print('')
# print("Quarter Finals")
final_match, semi_final_matches, _, total_goals = self.run_round(semi_final_matches)
self.total_knockout_goals += total_goals
# Final
# print('')
# print("Final")
_, final_match, winner, total_goals = self.run_round(final_match)
self.total_knockout_goals += total_goals
# Winner
# print('')
# print("WC Winner: {}".format(winner['name']))
self.draw_knockout_table(final_16_matches, quarter_final_matches, semi_final_matches, final_match, winner)
def attack(self, team_attack, team_overall, number_of_attacks, other_team_defense):
"""
Get team score on attack
:param team_attack:
:param team_overall:
:param number_of_attacks:
:param other_team_defense:
:return:
"""
team_score = 0
for _ in range(number_of_attacks):
# attack = how many are on target, chance of scoring
chance_of_scoring = random.random() * team_attack * team_overall
# defense = how many go in
chance_of_defense = random.random() * other_team_defense * team_overall
# print(chance_of_scoring, chance_of_defense, 'attack ', team_attack, 'other team def', other_team_defense)
if chance_of_scoring > chance_of_defense:
team_score += 1
return team_score
def get_total_goals(self, team1_attack, team2_attack, team1_defense, team2_defense):
total_goals = max(AVERAGE_GOALS_PER_GAME * random.uniform(0, 3) + (
(team1_attack + team2_attack - team1_defense - team2_defense) * 10) - 2, 0)
return total_goals
def normalize_goals(self, team1_score, team2_score, team1_attack, team2_attack, team1_defense, team2_defense,
total_goals=None):
"""
Normalize match goals
:param team1_score:
:param team2_score:
:param team1_attack:
:param team2_attack:
:param team1_defense:
:param team2_defense:
:param total_goals:
:return:
"""
if not total_goals:
# CForce total_goals to be > 0 if the diff b/w team1_score & team2_score is high
total_goals = self.get_total_goals(team1_attack, team2_attack, team1_defense, team2_defense)
if not total_goals and math.fabs(team1_score - team2_score) > 4:
total_goals = self.get_total_goals(team1_attack, team2_attack, team1_defense, team2_defense)
total_goals = int(round(total_goals, 0))
team1_new_score = team2_new_score = 0
if team1_score:
team1_new_score = int(round(team1_score / (team1_score + team2_score) * total_goals, 0))
if team2_score:
team2_new_score = int(round(team2_score / (team1_score + team2_score) * total_goals, 0))
return team1_new_score, team2_new_score, total_goals
def run_match(self, team1, team2, do_penalties=False):
"""
Run a match simulation
:param team1:
:param team2:
:param do_penalties:
:return:
"""
# Lower is better. FIFA rating is not as significant.
team1_overall_rating = float(float(team1['fifa_rating']) / 2.0) + float(team1['elo_rating']) + \
float(team1['goalimpact_rating']) + (5 - float(team1['ea_fifa_18_rating']))
team2_overall_rating = float(float(team1['fifa_rating']) / 2.0) + float(team2['elo_rating']) + \
float(team2['goalimpact_rating']) + (5 - float(team2['ea_fifa_18_rating']))
# print('overall', team1_overall_rating, team2_overall_rating)
team1_relative_rating = team1_overall_rating / max(team1_overall_rating, team2_overall_rating)
team2_relative_rating = team2_overall_rating / max(team1_overall_rating, team2_overall_rating)
team1_relative_rating = 1.0 - team1_relative_rating
team2_relative_rating = 1.0 - team2_relative_rating
# print('relative!!', team1_relative_rating, team2_relative_rating)
# TODO: Man not need random factor
# team1_random_factor = float(team1['overall']) / 100.0 * random.random() + \
# (float(team1['boost']) / 100.0)
# team2_random_factor = float(team2['overall']) / 100.0 * random.random() + \
# (float(team2['boost']) / 100.0)
# print('rand factor', team1_random_factor, team2_random_factor)
# midfield = how many shots you get (attacks)
team1_attack = float(team1['att']) / 100.0
team2_attack = float(team2['att']) / 100.0
team1_midfield = float(team1['mid']) / 100.0
team2_midfield = float(team2['mid']) / 100.0
team1_defense = float(team1['def']) / 100.0
team2_defense = float(team2['def']) / 100.0
team1_overall = float(team1['overall']) / 100.0
team2_overall = float(team2['overall']) / 100.0
# How much better is team 1 vs team 2?
attack_diff_factor = math.fabs(team2_attack - team1_attack) * 2
team1_attack_boost_min = 0.5
team1_attack_boost_max = 1.0
team2_attack_boost_min = 0.5
team2_attack_boost_max = 1.0
if team1_attack > team2_attack:
team1_attack_boost_max = team1_attack_boost_max + attack_diff_factor
team1_attack_boost_min = team1_attack_boost_min - attack_diff_factor
elif team1_attack < team2_attack:
team2_attack_boost_max = team2_attack_boost_max + attack_diff_factor
team2_attack_boost_min = team2_attack_boost_min - attack_diff_factor
team1_number_of_attacks = random.uniform((team1_attack + team1_midfield) / 2.0,
1.0) * AVERAGE_SHOTS_PER_GAME * random.uniform(
team1_attack_boost_min, team1_attack_boost_max)
team2_number_of_attacks = random.uniform((team2_attack + team2_midfield) / 2.0,
1.0) * AVERAGE_SHOTS_PER_GAME * random.uniform(
team2_attack_boost_min, team2_attack_boost_max)
# team2_number_of_attacks = float(team1['att']) / 100.0 * float(team2['mid']) / 100.0 * \
# random.uniform(0.25, 1.0) * AVERAGE_SHOTS_PER_GAME
team1_number_of_attacks = int(round(team1_number_of_attacks, 0))
team2_number_of_attacks = int(round(team2_number_of_attacks, 0))
# print('no of shots/attacks', round(team1_number_of_attacks, 1), team2_number_of_attacks)
# print('attack for ', team1['name'])
team1_score = self.attack(team1_attack, team1_overall, team1_number_of_attacks, team2_defense)
# print('attack for ', team2['name'])
team2_score = self.attack(team2_attack, team2_overall, team2_number_of_attacks, team1_defense)
# print(team1_score, team2_score)
# Additional goals, based on relative rating
# print('relative is', team1['name'], team1_relative_rating, team2['name'], team2_relative_rating)
if team1_relative_rating > team2_relative_rating:
# Team 1 better, lower is better
# team1_score = team1_score + (team1_relative_rating * random.random() * 100)
team1_extra_attacks = int(round(team1_relative_rating * random.random() * EXTRA_ATTACK_RATIO, 0))
# print('team1 better', team1['name'], team1_extra_attacks)
team1_score += self.attack(team1_attack, team1_overall, team1_extra_attacks, team2_defense)
elif team1_relative_rating < team2_relative_rating:
# Team 2 better, lower is better
# team2_score = team2_score + (team2_relative_rating * random.random() * 100)
team2_extra_attacks = int(round(team2_relative_rating * random.random() * EXTRA_ATTACK_RATIO, 0))
# print('team2 better', team2['name'], team2_extra_attacks)
team2_score += self.attack(team2_attack, team2_overall, team2_extra_attacks, team1_defense)
team1_score, team2_score, total_goals = self.normalize_goals(team1_score, team2_score, team1_attack,
team2_attack, team1_defense, team2_defense)
# Penalties if applicable
if team1_score == team2_score and do_penalties:
team1_penalties = 0
team2_penalties = 0
penalties_taken = 0
while team1_penalties == team2_penalties or penalties_taken < 5:
team1_penalties += self.attack(team1_attack, team1_overall, 1, team2_defense)
team2_penalties += self.attack(team2_attack, team2_overall, 1, team1_defense)
penalties_taken += 1
if team1_penalties > team2_penalties:
team1_score += 1
else:
team2_score += 1
winning_team = None
if team1_score > team2_score:
winning_team = team1
elif team2_score > team1_score:
winning_team = team2
# TODO: Return penalties scored if applicable
return team1_score, team2_score, total_goals, winning_team
def draw_knockout_table(self, final_16_matches, quarter_final_matches, semi_final_matches, final_match, winner):
args = []
for match in final_16_matches + quarter_final_matches + semi_final_matches + final_match:
args.append(match[0]['name'] + ' ' + str(match[0]['score']))
args.append(match[1]['name'] + ' ' + str(match[1]['score']))
args.append(winner['name'])
table = ("\n"
"Left:\n"
"{0}\n"
"{1}\n"
" {16}\n"
" {17}\n"
"{2}\n"
" {24}\n"
"{3}\n"
" {28}\n"
"{4}\n"
"{5}\n"
" {25}\n"
" {18}\n"
" {19}\n"
"{6}\n"
"{7}\n"
" {30}\n"
"Right:\n"
"{8}\n"
"{9}\n"
" {20}\n"
" {21}\n"
"{10}\n"
" {26}\n"
"{11}\n"
" {29}\n"
"{12}\n"
"{13}\n"
" {27}\n"
" {22}\n"
" | |
import os
from pathlib import Path
import joblib
import pandas as pd
import numpy as np
from multiprocessing import Pool
from collections import defaultdict
import functools
import re
import sys
sys.path.insert(0, './code')
from utils import DataLogger # noqa: E402
class DataNotFoundException(Exception):
pass
def get_time_split(df):
df_12 = df[df['dt'] <= 12]
df_16 = df[(df['dt'] > 12) & (df['dt'] <= 16)]
# df_20 = df[(df['dt'] > 16) & (df['dt'] <= 19)]
# df_21 = df[(df['dt'] > 17) & (df['dt'] <= 20)]
df_22 = df[(df['dt'] > 18) & (df['dt'] <= 21)]
df_23 = df[(df['dt'] > 19) & (df['dt'] <= 22)]
# df_24 = df[(df['dt'] > 20) & (df['dt'] <= 23)]
# df_25 = df[(df['dt'] > 21) & (df['dt'] <= 24)]
r_dict = {
"one_to_twelve": df_12,
"twelve_to_sixteen": df_16,
# "prev_three_months_20": df_20,
# "prev_three_months_21": df_21,
"prev_three_months_22": df_22,
"prev_three_months_23": df_23,
# "prev_three_months_24": df_24,
# "prev_three_months_25": df_25
}
return r_dict
def get_merge_dict():
merge_dict = {
# 20: ["one_to_twelve", "twelve_to_sixteen", "prev_three_months_20"],
# 21: ["one_to_twelve", "twelve_to_sixteen", "prev_three_months_21"],
22: ["one_to_twelve", "twelve_to_sixteen", "prev_three_months_22"],
23: ["one_to_twelve", "twelve_to_sixteen", "prev_three_months_23"],
# 24: ["one_to_twelve", "twelve_to_sixteen", "prev_three_months_24"],
# 25: ["one_to_twelve", "twelve_to_sixteen", "prev_three_months_25"],
}
return merge_dict
def get_time_split_result(a_func):
@functools.wraps(a_func)
def wrapper(self, df):
r_dict = defaultdict(list)
df_dict = get_time_split(df)
use_dict = {key: a_func(self, df_dict[key]) for key in df_dict.keys()}
merge_dict = get_merge_dict()
for dt in merge_dict.keys():
vals_12 = use_dict[merge_dict[dt][0]]
vals_16 = use_dict[merge_dict[dt][1]]
vals_prevs = use_dict[merge_dict[dt][2]]
for val, val_12, val_16 in zip(vals_prevs, vals_12, vals_16):
name = val[0]
name_12 = "{}_12".format(name)
name_16 = "{}_16".format(name)
r_dict[name].append(val[1])
r_dict[name_12].append(val_12[1])
r_dict[name_16].append(val_16[1])
return r_dict
return wrapper
class DataLoader():
def __init__(self):
self.output_path = Path(os.path.abspath(os.getcwd())) / 'output'
self.input_path = Path(os.path.abspath(os.getcwd())) / 'input'
self.model_path = Path(os.path.abspath(os.getcwd())) / 'model'
def save_data(self, cls, data_name, message):
logger = DataLogger()
logger.save_data("Save data {} is generated from {}".format(
data_name, message))
joblib.dump(cls, self.output_path / data_name)
logger.save_data("{} is sucessfully saved".format(data_name))
def load_data(self, data_name, data_type='joblib', **kwargs):
if data_type == 'joblib':
data = joblib.load(self.input_path / data_name, **kwargs)
elif data_type == 'csv':
data = pd.read_csv(self.input_path / data_name, **kwargs)
return data
def load_result(self, data_name, data_type='joblib', **kwargs):
if data_type == 'joblib':
data = joblib.load(self.output_path / data_name, **kwargs)
elif data_type == 'csv':
data = pd.read_csv(self.output_path / data_name, **kwargs)
return data
class FeatLoader(DataLoader):
def __init__(self):
super(FeatLoader, self).__init__()
self.required_cate = ('2', '6', '10', '12', '13', '15', '18', '19',
'21', '22', '25', '26', '36', '37', '39', '48')
self.shop_cate = [str(i + 1) for i in range(48)] + ['other']
self.shop_amt = [
"shop_{}_amt".format(shop_tag) for shop_tag in self.shop_cate
]
self.shop_cnt = [
"shop_{}_cnt".format(shop_tag) for shop_tag in self.shop_cate
]
self.card_cate = [str(i + 1) for i in range(14)] + ['other']
self.card_amt = [
"card_{}_txn_amt".format(card_cate) for card_cate in self.card_cate
]
self.card_cnt = [
"card_{}_txn_cnt".format(card_cate) for card_cate in self.card_cate
]
self.count = 0
self.profile_cate = [
"masts",
"educd",
"trdtp",
"naty",
"poscd",
"cuorg",
"primary_card",
"slam",
"age",
"gender_code",
]
self.basic_info = [
'masts',
'educd',
'trdtp',
'naty',
'poscd',
'cuorg',
'primary_card',
'age',
'gender_code',
]
self.dts = [dt for dt in range(1, 25)]
def update_data(self, data):
self.data = data.copy()
class AmtFeatLoader(FeatLoader):
def __init__(self):
super(AmtFeatLoader, self).__init__()
self.get_feat_config()
def update_a_df(self, df):
result = {'chid': df['chid'].iloc[0]}
if self.count % 10000 == 0:
print(result)
self.count += 1
for feat_func in self.feat_config:
result.update(feat_func(df))
# result = pd.DataFrame(result)
return result
def get_feat_config(self):
self.feat_config = {self.get_amt_by_months}
def get_amt_by_months(self, df):
def get_shop_amt_cate(x):
dt, shop_tag = x
name = "shop_{}_amt_{}".format(shop_tag, dt)
return name
result = {}
for dt in range(1, 25):
for shop_amt_cate in self.shop_amt:
result.update({shop_amt_cate + '_{}'.format(dt): 0})
if df.empty:
return result
else:
df['shop_amt_cate'] = df[['dt',
'shop_tag']].apply(get_shop_amt_cate,
axis=1)
amt_dict = {
shop_amt_cate: amt
for amt, shop_amt_cate in zip(df['txn_amt'],
df['shop_amt_cate'])
}
result.update(amt_dict)
return result
def fit(self):
if not hasattr(self, 'data'):
raise DataNotFoundException("Data not found! Please update data")
df_group = self.data.groupby(['chid'])
df_group = [df[1] for df in df_group]
pool = Pool(8, maxtasksperchild=1000)
feat_group = pool.map(self.update_a_df, df_group)
pool.close()
self.feats = pd.DataFrame(feat_group)
class ProfileFeatLoader(FeatLoader):
def __init__(self):
super(ProfileFeatLoader, self).__init__()
self.get_feat_config()
self.card_cnt_pct = [
"card_{}_cnt_pct".format(cate) for cate in self.card_cate
]
self.card_avg_amt = [
"card_{}_avg_amt".format(cate) for cate in self.card_cate
]
def fit(self):
# run 500000 times loop
if not hasattr(self, 'data'):
raise DataNotFoundException("Data not found! Please update data")
self.data = self.get_early_calculation(self.data)
df_group = self.data.groupby(['chid'])
df_group = [df[1] for df in df_group]
pool = Pool(8, maxtasksperchild=1000)
feat_group = pool.map(self.update_a_df, df_group)
pool.close()
self.feats = pd.concat(feat_group)
def get_early_calculation(self, df):
df['avg_amt'] = df['txn_amt'] / df['txn_cnt']
df['offline_cnt_pct'] = df['txn_cnt'] / (df['domestic_offline_cnt'] +
df['overseas_offline_cnt'])
df['online_cnt_pct'] = df['txn_cnt'] / (df['domestic_online_cnt'] +
df['overseas_online_cnt'])
df['domestic_cnt_pct'] = df['txn_cnt'] / (df['domestic_offline_cnt'] +
df['domestic_online_cnt'])
df['overseas_cnt_pct'] = df['txn_cnt'] / (df['overseas_offline_cnt'] +
df['overseas_online_cnt'])
# generate card amt
for cate in self.card_cate:
df['card_{}_txn_amt'.format(
cate)] = df['card_{}_txn_amt_pct'.format(cate)] * df['txn_amt']
# generate card cnt ratio
for cate in self.card_cate:
new_key = "card_{}_cnt_pct".format(cate)
cnt_key = "card_{}_txn_cnt".format(cate)
df[new_key] = df[cnt_key] / df['txn_cnt']
# generate the avg for card cate
for cate in self.card_cate:
new_key = "card_{}_avg_amt".format(cate)
amt_key = "card_{}_txn_amt".format(cate)
cnt_key = "card_{}_txn_cnt".format(cate)
df[new_key] = df[amt_key] / df[cnt_key]
return df
def update_a_df(self, df):
# df: user history records
result = {
'dt': [22, 23],
'chid': [df['chid'].iloc[0]] * 2,
}
if self.count % 10000 == 0:
print(result)
self.count += 1
for feat_func in self.feat_config:
result.update(feat_func(df))
result = pd.DataFrame(result)
return result
def get_feat_config(self):
self.feat_config = {
# 最開始使用信用卡時間 #首刷月
# 離首刷月多久
self.get_start_use_dt,
# # 消費多少種類
# # 消費多少重要種類
self.get_how_many_tags,
# # basic info
self.get_basic_profile,
}
def get_basic_profile(self, df):
if df.empty:
r_dict = {
profile_cate: [-1] * 3
for profile_cate in self.profile_cate
}
else:
r_dict = {
profile_cate: df[profile_cate].iloc[0]
for profile_cate in self.profile_cate
}
return r_dict
@get_time_split_result
def get_how_many_tags(self, df):
if df.empty:
r_list = [("how_many_tag", -1), ("how_many_tag_imp", -1)]
else:
how_many_tag = len(df['shop_tag'].unique())
how_many_tag_imp = len(df[df['shop_tag'].isin(
self.required_cate)]['shop_tag'].unique())
r_list = [("how_many_tag", how_many_tag),
("how_many_tag_imp", how_many_tag_imp)]
return r_list
def get_start_use_dt(self, df):
if df.empty:
r_dict = {"start_dt": [-1] * 2, "how_long_dt": [-1] * 2}
else:
start_dt = df['dt'].iloc[0]
how_long_dt = np.array([24, 25]) - np.array([start_dt] * 2)
r_dict = {
"start_dt": [start_dt] * 2,
"how_long_dt": list(how_long_dt)
}
return r_dict
class CntFeatLoader(FeatLoader):
def __init__(self):
super(CntFeatLoader, self).__init__()
self.get_feat_config()
def get_feat_config(self):
self.feat_config = {self.get_cnt_by_months}
def get_cnt_by_months(self, df):
def get_shop_cnt_cate(x):
dt, shop_tag = x
name = "shop_{}_cnt_{}".format(shop_tag, dt)
return name
result = {}
for dt in range(1, 25):
for shop_cnt_cate in self.shop_cnt:
result.update({shop_cnt_cate + '_{}'.format(dt): 0})
if df.empty:
return result
else:
df['shop_cnt_cate'] = df[['dt',
'shop_tag']].apply(get_shop_cnt_cate,
axis=1)
cnt_dict = {
shop_cnt_cate: cnt
for cnt, shop_cnt_cate in zip(df['txn_cnt'],
df['shop_cnt_cate'])
}
result.update(cnt_dict)
return result
def update_a_df(self, df):
result = {'chid': df['chid'].iloc[0]}
if self.count % 10000 == 0:
print(result)
self.count += 1
for feat_func in self.feat_config:
result.update(feat_func(df))
return result
def fit(self):
if not hasattr(self, 'data'):
raise DataNotFoundException("Data not found! Please update data")
df_group = self.data.groupby(['chid'])
df_group = [df[1] for df in df_group]
pool = Pool(8, maxtasksperchild=1000)
feat_group = pool.map(self.update_a_df, df_group)
pool.close()
self.feats = pd.DataFrame(feat_group)
class RankTopFeatLoader(FeatLoader):
def __init__(self):
super(RankTopFeatLoader, self).__init__()
self.get_feat_config()
self.shop_cate_map = {
i: a_shop_cate
for i, a_shop_cate in enumerate(self.shop_cate)
}
self.imp_cate_map = {
i: imp_cate
for i, imp_cate in enumerate(self.required_cate)
}
def update_a_df(self, df):
print(df.columns[0])
result = []
for feat_func in self.feat_config:
result.append(feat_func(df))
tops = pd.concat(result, axis=1)
return tops
def get_feat_config(self):
self.feat_config = [
self.get_tops_by_months,
self.get_imp_tops_by_months,
]
def get_tops_by_months(self, df):
dt = df.columns[0].split('_')[-1]
top3 = df.apply(lambda x: np.argsort(x), axis=1).iloc[:, -3:]
top3.columns = [
'top3_{}'.format(dt), 'top2_{}'.format(dt), 'top1_{}'.format(dt)
]
for col in top3.columns:
top3[col] = top3[col].map(self.shop_cate_map)
top3['how_many_cate_{}'.format(dt)] = df.gt(0).sum(axis=1)
top3.loc[
top3['how_many_cate_{}'.format(dt)] == 0,
['top3_{}'.format(dt), 'top2_{}'.format(dt), 'top1_{}'.
format(dt)]] = "-1"
top3.loc[top3['how_many_cate_{}'.format(dt)] == 1,
['top3_{}'.format(dt), 'top2_{}'.format(dt)]] = "-1"
top3.loc[top3['how_many_cate_{}'.format(dt)] == 2,
['top3_{}'.format(dt)]] = "-1"
return top3
def get_imp_tops_by_months(self, df):
dt = df.columns[0].split('_')[-1]
reg = r"shop_(\d+_|other_)(.+)_\d+"
fetch_type = re.findall(reg, df.columns[0])[0][1]
imp_cols = [
"shop_{}_{}_{}".format(a_cate, fetch_type, dt)
for a_cate in self.required_cate
]
imp_df = df[imp_cols].copy()
imp_top3 = imp_df.apply(lambda x: np.argsort(x), axis=1).iloc[:, -3:]
imp_top3.columns = [
'imp_top3_{}'.format(dt), 'imp_top2_{}'.format(dt),
'imp_top1_{}'.format(dt)
]
for col in imp_top3.columns:
imp_top3[col] = imp_top3[col].map(self.imp_cate_map)
imp_top3['how_many_cate_imp_{}'.format(dt)] = imp_df.gt(0).sum(axis=1)
imp_top3.loc[imp_top3["how_many_cate_imp_{}".format(dt)] == 0, [
"imp_top3_{}".format(dt), "imp_top2_{}".format(dt), "imp_top1_{}".
format(dt)
]] = "-1"
imp_top3.loc[
imp_top3["how_many_cate_imp_{}".format(dt)] == 1,
["imp_top3_{}".format(dt), "imp_top2_{}".format(dt)]] = "-1"
imp_top3.loc[imp_top3['how_many_cate_imp_{}'.format(dt)] == 2,
['imp_top3_{}'.format(dt)]] = "-1"
return imp_top3
def fit(self):
if not hasattr(self, 'data'):
raise DataNotFoundException("Data not found! Please update data")
feats = [self.data[['chid']].reset_index(drop=True)]
df = self.data.drop("chid", axis=1).reset_index(drop=True)
cols = list(df.columns)
cols_group = [cols[dt * 49:(1 + dt) * 49] for dt in range(24)]
df_group = [df[col_seg] for col_seg in cols_group]
pool = Pool(4, maxtasksperchild=1000)
feat_group = pool.map(self.update_a_df, df_group)
pool.close()
self.feats = pd.concat(feats + feat_group, axis=1)
class ProfileShopFeatLoader(FeatLoader):
def __init__(self):
super(ProfileShopFeatLoader, self).__init__()
self.get_feat_config()
def update_data(self, data):
| |
from operator import add, sub
import numpy as np
from scipy.stats import norm
class Elora:
def __init__(self, times, labels1, labels2, values, biases=0):
"""
Elo regressor algorithm for paired comparison time series prediction
Author: <NAME>
Args:
times (array of np.datetime64): comparison datetimes
labels1 (array of str): comparison labels for first entity
labels2 (array of str): comparison labels for second entity
values (array of float): comparison outcome values
biases (array of float or scalar, optional): comparison bias
corrections
Attributes:
examples (np.recarray): time-sorted numpy record array of
(time, label1, label2, bias, value, value_pred) samples
first_update_time (np.datetime64): time of the first comparison
last_update_time (np.datetime64): time of the last comparison
labels (array of string): unique compared entity labels
median_value (float): median expected comparison value
"""
times = np.array(times, dtype='datetime64[s]', ndmin=1)
labels1 = np.array(labels1, dtype='str', ndmin=1)
labels2 = np.array(labels2, dtype='str', ndmin=1)
values = np.array(values, dtype='float', ndmin=1)
if np.isscalar(biases):
biases = np.full_like(times, biases, dtype='float')
else:
biases = np.array(biases, dtype='float', ndmin=1)
self.first_update_time = times.min()
self.last_update_time = times.max()
self.labels = np.union1d(labels1, labels2)
self.median_value = np.median(values)
prior = self.median_value * np.ones_like(values, dtype=float)
self.examples = np.sort(
np.rec.fromarrays([
times,
labels1,
labels2,
biases,
values,
prior,
], names=(
'time',
'label1',
'label2',
'bias',
'value',
'value_pred'
)), order=['time', 'label1', 'label2'], axis=0)
@property
def initial_rating(self):
"""
Customize this function for a given subclass.
It computes the initial rating, equal to the rating one would
expect if all labels were interchangeable.
Default behavior is to return one-half the median outcome value
if the labels commute, otherwise 0.
"""
return .5*self.median_value if self.commutes else 0
def regression_coeff(self, elapsed_time):
"""
Customize this function for a given subclass.
It computes the regression coefficient—prefactor multiplying the
rating of each team evaluated at each update—as a function of
elapsed time since the last rating update for that label.
Default behavior is to return 1, i.e. no rating regression.
"""
return 1.0
def evolve_rating(self, rating, elapsed_time):
"""
Evolves 'state' to 'time', applying rating regression if necessary,
and returns the evolved rating.
Args:
state (dict): state dictionary {'time': time, 'rating': rating}
time (np.datetime64): time to evaluate state
Returns:
state (dict): evolved state dictionary
{'time': time, 'rating': rating}
"""
regress = self.regression_coeff(elapsed_time)
return regress * rating + (1.0 - regress) * self.initial_rating
def fit(self, k, commutes, scale=1, burnin=0):
"""
Primary routine that performs model calibration. It is called
recursively by the `fit` routine.
Args:
k (float): coefficient that multiplies the prediction error to
determine the rating update.
commutes (bool): false if the observed values change sign under
label interchange and true otheriwse.
"""
self.commutes = commutes
self.scale = scale
self.commutator = 0. if commutes else self.median_value
self.compare = add if commutes else sub
record = {label: [] for label in self.labels}
prior_state_dict = {}
for idx, example in enumerate(self.examples):
time, label1, label2, bias, value, value_pred = example
default = (time, self.initial_rating)
prior_time1, prior_rating1 = prior_state_dict.get(label1, default)
prior_time2, prior_rating2 = prior_state_dict.get(label2, default)
rating1 = self.evolve_rating(prior_rating1, time - prior_time1)
rating2 = self.evolve_rating(prior_rating2, time - prior_time2)
value_pred = self.compare(rating1, rating2) + self.commutator + bias
self.examples[idx]['value_pred'] = value_pred
rating_change = k * (value - value_pred)
rating1 += rating_change
rating2 += rating_change if self.commutes else -rating_change
record[label1].append((time, rating1))
record[label2].append((time, rating2))
prior_state_dict[label1] = (time, rating1)
prior_state_dict[label2] = (time, rating2)
for label in record.keys():
record[label] = np.rec.array(
record[label], dtype=[
('time', 'datetime64[s]'), ('rating', 'float')])
self.record = record
residuals = np.rec.fromarrays([
self.examples.time,
self.examples.value - self.examples.value_pred
], names=('time', 'residual'))
return residuals
def get_rating(self, times, labels):
"""
Query label state(s) at the specified time accounting
for rating regression.
Args:
times (array of np.datetime64): Comparison datetimes
labels (array of string): Comparison entity labels
Returns:
rating (array): ratings for each time and label pair
"""
times = np.array(times, dtype='datetime64[s]', ndmin=1)
labels = np.array(labels, dtype='str', ndmin=1)
ratings = np.empty_like(times, dtype='float')
for idx, (time, label) in enumerate(zip(times, labels)):
try:
label_record = self.record[label]
index = label_record.time.searchsorted(time)
prev_index = max(index - 1, 0)
prior_state = label_record[prev_index]
rating = self.evolve_rating(
prior_state.rating, time - prior_state.time)
except KeyError:
rating = self.initial_rating
ratings[idx] = rating
return ratings
def cdf(self, x, times, labels1, labels2, biases=0):
"""
Computes the comulative distribution function (CDF) for each
comparison, i.e. prob(value < x).
Args:
x (array of float): threshold of comparison for each value
times (array of np.datetime64): comparison datetimes
labels1 (array of str): comparison labels for first entity
labels2 (array of str): comparison labels for second entity
values (array of float): comparison value observed outcomes
biases (array of float): comparison bias correct factors,
default value is 0
Returns:
y (array of float): cumulative distribution function value
for each input
"""
times = np.array(times, dtype='datetime64[s]', ndmin=1)
labels1 = np.array(labels1, dtype='str', ndmin=1)
labels2 = np.array(labels2, dtype='str', ndmin=1)
if np.isscalar(biases):
biases = np.full_like(times, biases, dtype='float')
else:
biases = np.array(biases, dtype='float', ndmin=1)
ratings1 = self.get_rating(times, labels1)
ratings2 = self.get_rating(times, labels2)
loc = self.compare(ratings1, ratings2) + self.commutator + biases
return norm.cdf(x, loc=loc, scale=self.scale)
def sf(self, x, times, labels1, labels2, biases=0):
"""
Computes the survival function (SF) for each
comparison, i.e. prob(value > x).
Args:
x (array of float): threshold of comparison for each value
times (array of np.datetime64): comparison datetimes
labels1 (array of str): comparison labels for first entity
labels2 (array of str): comparison labels for second entity
values (array of float): comparison value observed outcomes
biases (array of float): comparison bias correct factors,
default value is 0
Returns:
y (array of float): survival function value for each input
"""
times = np.array(times, dtype='datetime64[s]', ndmin=1)
labels1 = np.array(labels1, dtype='str', ndmin=1)
labels2 = np.array(labels2, dtype='str', ndmin=1)
if np.isscalar(biases):
biases = np.full_like(times, biases, dtype='float')
else:
biases = np.array(biases, dtype='float', ndmin=1)
ratings1 = self.get_rating(times, labels1)
ratings2 = self.get_rating(times, labels2)
loc = self.compare(ratings1, ratings2) + self.commutator + biases
return np.squeeze(norm.sf(x, loc=loc, scale=self.scale))
def pdf(self, x, times, labels1, labels2, biases=0):
"""
Computes the probability distribution function (PDF) for each
comparison, i.e. P(x).
Args:
x (array of float): input values
times (array of np.datetime64): comparison datetimes
labels1 (array of str): comparison labels for first entity
labels2 (array of str): comparison labels for second entity
values (array of float): comparison value observed outcomes
biases (array of float): comparison bias correct factors,
default value is 0
Returns:
y (array of float): probability density at each input
"""
times = np.array(times, dtype='datetime64[s]', ndmin=1)
labels1 = np.array(labels1, dtype='str', ndmin=1)
labels2 = np.array(labels2, dtype='str', ndmin=1)
if np.isscalar(biases):
biases = np.full_like(times, biases, dtype='float')
else:
biases = np.array(biases, dtype='float', ndmin=1)
ratings1 = self.get_rating(times, labels1)
ratings2 = self.get_rating(times, labels2)
loc = self.compare(ratings1, ratings2) + self.commutator + biases
return np.squeeze(norm.pdf(x, loc=loc, scale=self.scale))
def percentile(self, p, times, labels1, labels2, biases=0):
"""
Computes percentiles p of the probability distribution.
Args:
p (array of float): percentiles to evaluate (in range [0, 100])
times (array of np.datetime64): comparison datetimes
labels1 (array of str): comparison labels for first entity
labels2 (array of str): comparison labels for second entity
values (array of float): comparison value observed outcomes
biases (array of float): comparison bias correct factors,
default value is 0
Returns:
x (array of float): values of the distribution corresponding to
each percentile
"""
times = np.array(times, dtype='datetime64[s]', ndmin=1)
labels1 = np.array(labels1, dtype='str', ndmin=1)
labels2 = np.array(labels2, dtype='str', ndmin=1)
if np.isscalar(biases):
biases = np.full_like(times, biases, dtype='float')
else:
biases = np.array(biases, dtype='float', ndmin=1)
ratings1 = self.get_rating(times, labels1)
ratings2 = self.get_rating(times, labels2)
loc = self.compare(ratings1, ratings2) + self.commutator + biases
p = np.true_divide(p, 100.0)
if np.count_nonzero(p < 0.0) or np.count_nonzero(p > 1.0):
raise ValueError("percentiles must be in the range [0, 100]")
return np.squeeze(norm.ppf(p, loc=loc, scale=self.scale))
def quantile(self, q, times, labels1, labels2, biases=0):
"""
Computes quantiles q of the probability distribution.
Same as percentiles but accepts values [0, 1].
Args:
q (array of float): quantiles to evaluate (in range [0, 1])
times (array of np.datetime64): comparison datetimes
labels1 (array of str): comparison labels for first entity
labels2 (array of str): comparison labels for second entity
values (array of float): comparison value observed outcomes
biases (array of float): comparison bias correct factors,
default value is 0
Returns:
x (array of float): values of the distribution | |
weights to use for both CNTK and ELL layers
# CNTK has these in filters, channels, rows, columns order
weightValues = np.random.uniform(
low=-5, high=5, size=(5, 2, 3, 3)).astype(np.float32)
# create an ELL Tensor from the cntk weights, which re-orders the
# weights and produces an appropriately dimensioned tensor
weightTensor = cntk_converters.\
get_tensor_from_cntk_convolutional_weight_value_shape(
weightValues, weightValues.shape)
# Create a Binary Convolution CNTK layer with no bias, no activation,
# stride 1
# Input order for CNTK is channels, rows, columns
x = input((2, 10, 10))
cntkModel = CustomSign(x)
cntkModel = BinaryConvolution(
(10, 10), num_filters=5, channels=2, init=weightValues,
pad=True, bias=False, init_bias=0, activation=False)(cntkModel)
# Create the equivalent ELL predictor
layerParameters = ell.neural.LayerParameters(
# Input order for ELL is rows, columns, channels. Account for
# padding.
ell.math.TensorShape(10 + 2, 10 + 2, 2),
ell.neural.ZeroPadding(1),
ell.math.TensorShape(10, 10, 5),
ell.neural.NoPadding(),
ell.nodes.PortType.smallReal)
convolutionalParameters = ell.neural.BinaryConvolutionalParameters(
3, 1, ell.neural.BinaryConvolutionMethod.bitwise,
ell.neural.BinaryWeightsScale.none)
layer = ell.neural.BinaryConvolutionalLayer(
layerParameters, convolutionalParameters, weightTensor)
predictor = ell.neural.NeuralNetworkPredictor([layer])
# Get the results for both
inputValues = np.random.uniform(
low=-50, high=50, size=(2, 10, 10)).astype(np.float32)
cntkResults = cntkModel(inputValues)
orderedCntkResults = cntk_converters.get_vector_from_cntk_array(
cntkResults)
orderedInputValues = cntk_converters.get_vector_from_cntk_array(
inputValues)
ellResults = predictor.Predict(orderedInputValues)
# Compare the results
np.testing.assert_array_equal(
orderedCntkResults, ellResults,
'results for Binary Convolution layer do not match!')
# now run same over ELL compiled model
self.verify_compiled(
predictor, orderedInputValues, orderedCntkResults,
"binary_convolution", "test")
def test_batch_normalization_layer(self):
"""Test a model with a single CNTK BatchNormalization layer against the
equivalent ELL predictor This verifies that the import functions
reshape and reorder values appropriately and that the equivalent ELL
layer produces comparable output
"""
# Create a test set of scales and biases to use for both CNTK and ELL
# layers
scaleValues = np.linspace(0.1, 0.5, num=16, dtype=np.float32)
scaleVector = cntk_converters.get_vector_from_cntk_array(
scaleValues)
biasValues = np.linspace(1, 2, num=16, dtype=np.float32)
biasVector = cntk_converters.get_vector_from_cntk_array(
biasValues)
meanValues = np.linspace(-0.5, 0.5, num=16, dtype=np.float32)
meanVector = cntk_converters.get_vector_from_cntk_array(
meanValues)
varianceValues = np.linspace(-1, 1, num=16, dtype=np.float32)
varianceVector = cntk_converters.get_vector_from_cntk_array(
varianceValues)
# Create a BatchNormalization CNTK layer
# CNTK's BatchNormalization layer does not support setting the running
# mean and variance, so we use a wrapper function around the
# batch_normalization op
batchNorm = BatchNormalizationTester(
init_scale=scaleValues, norm_shape=scaleValues.shape,
init_bias=biasValues, init_mean=meanValues,
init_variance=varianceValues)
# Input order for CNTK is channels, rows, columns
x = input((16, 10, 10))
cntkModel = batchNorm(x)
# Create the equivalent ELL predictor
layers = []
layerParameters = ell.neural.LayerParameters(
# Input order for ELL is rows, columns, channels
ell.math.TensorShape(10, 10, 16),
ell.neural.NoPadding(),
ell.math.TensorShape(10, 10, 16),
ell.neural.NoPadding(),
ell.nodes.PortType.smallReal)
# CNTK BatchNorm = ELL's BatchNorm + Scaling + Bias
# 1e-5 is the default epsilon for CNTK's BatchNormalization Layer
epsilon = 1e-5
layers.append(ell.neural.BatchNormalizationLayer(
layerParameters, meanVector, varianceVector, epsilon,
ell.neural.EpsilonSummand.variance))
layers.append(ell.neural.ScalingLayer(layerParameters, scaleVector))
layers.append(ell.neural.BiasLayer(layerParameters, biasVector))
predictor = ell.neural.NeuralNetworkPredictor(layers)
inputValues = np.linspace(
-5, 5, num=16 * 10 * 10, dtype=np.float32).reshape(16, 10, 10)
cntkResults = cntkModel(inputValues)
orderedCntkResults = cntk_converters.get_vector_from_cntk_array(
# Note that cntk inserts an extra dimension of 1 in the front
cntkResults)
orderedInputValues = cntk_converters.get_vector_from_cntk_array(
inputValues)
ellResults = predictor.Predict(orderedInputValues)
# Compare the results (precision is 1 less decimal place from epsilon)
np.testing.assert_array_almost_equal(
orderedCntkResults, ellResults, 6,
'results for BatchNormalization layer do not match!')
# now run same over ELL compiled model
self.verify_compiled(
predictor, orderedInputValues, orderedCntkResults, "batch_norm",
"test", precision=6)
def test_prelu_activation_layer(self):
"""Test a model with a single CNTK PReLU activation layer against the
equivalent ELL predictor. This verifies that the import functions
reshape and reorder values appropriately and that the equivalent ELL
layer produces comparable output
"""
# Create a test set of alpha parameters to use for both CNTK and ELL
# layers
# Input order for CNTK is channels, rows, columns
alphaValues = np.linspace(
1, 2, num=16 * 10 * 10, dtype=np.float32).reshape(16, 10, 10)
# create an ELL Tensor from the alpha parameters, which re-orders and
# produces an appropriately dimensioned tensor
alphaTensor = cntk_converters.\
get_tensor_from_cntk_convolutional_weight_value_shape(
alphaValues, alphaValues.shape)
inputValues = np.linspace(
-5, 5, num=16 * 10 * 10, dtype=np.float32).reshape(16, 10, 10)
# Evaluate a PReLU CNTK layer
x = input((16, 10, 10))
p = parameter(shape=x.shape, init=alphaValues, name="prelu")
cntkModel = param_relu(p, x)
# Create the equivalent ELL predictor
layerParameters = ell.neural.LayerParameters(
# Input order for ELL is rows, columns, channels
ell.math.TensorShape(10, 10, 16),
ell.neural.NoPadding(),
ell.math.TensorShape(10, 10, 16),
ell.neural.NoPadding(),
ell.nodes.PortType.smallReal)
layer = ell.neural.PReLUActivationLayer(layerParameters, alphaTensor)
predictor = ell.neural.NeuralNetworkPredictor([layer])
cntkResults = cntkModel(inputValues)
orderedCntkResults = cntk_converters.get_vector_from_cntk_array(
cntkResults)
orderedInputValues = cntk_converters.get_vector_from_cntk_array(
inputValues)
ellResults = predictor.Predict(orderedInputValues)
# Compare the results
np.testing.assert_array_equal(
orderedCntkResults, ellResults,
'results for PReLU Activation layer do not match!')
# now run same over ELL compiled model
self.verify_compiled(
predictor, orderedInputValues, orderedCntkResults,
"prelu_activation", "test")
class CntkXorModelTestCase(common_importer_test.EllImporterTestBase):
def test_simple_xor_model(self):
path = os.path.join(find_ell.find_ell_build(), "tools/importers/CNTK/test/xorModel1.dnn")
predictor = cntk_to_ell.predictor_from_cntk_model(path)
result = predictor.Predict([0, 0])
self.assertAlmostEqual(
result[0], 0, msg='incorrect prediction for [0, 0]')
result = predictor.Predict([0, 1])
self.assertAlmostEqual(
result[0], 1, msg='incorrect prediction for [0, 1]')
result = predictor.Predict([1, 0])
self.assertAlmostEqual(
result[0], 1, msg='incorrect prediction for [1, 0]')
result = predictor.Predict([1, 1])
self.assertAlmostEqual(
result[0], 0, msg='incorrect prediction for [1, 1]')
# create a map and save to file
ell_map = ell.neural.utilities.ell_map_from_predictor(predictor)
ell_map.Save("xor_test.map")
# create a map and save to file
ell_map = ell.neural.utilities.ell_map_from_predictor(
predictor, step_interval_msec=500, lag_threshold_msec=750, function_prefix="XorTest")
ell_map.Save("xor_test_steppable.map")
class CntkToEllFullModelTestBase(common_importer_test.EllImporterTestBase):
CATEGORIES_URL = 'models/ILSVRC2012/categories.txt'
MODEL_URLS = [
'models/ILSVRC2012/d_I160x160x3CMCMCMCMCMCMC1AS/d_I160x160x3CMCMCMCMCMCMC1AS.cntk.zip'
# 'models/ILSVRC2012/BrazilianGuava/BrazilianGuava.cntk.zip',
# 'models/ILSVRC2012/Coconut/Coconut.cntk.zip'
]
def setUp(self):
super(CntkToEllFullModelTestBase, self).setUp()
base_model_uri = self.get_test_model_repo()
if SkipFullModelTests:
self.skipTest('Full model tests are being skipped')
filename = os.path.basename(self.CATEGORIES_URL)
self.label_file = download_file(base_model_uri + self.CATEGORIES_URL)
with open(self.label_file) as categories_file:
self.categories = categories_file.read().splitlines()
self.model_names = []
for m in self.MODEL_URLS:
url = base_model_uri + m
zip_name = os.path.basename(url)
filename = os.path.splitext(zip_name)[0]
filename = download_and_extract_model(url, model_extension=".cntk")
self.model_names += [os.path.splitext(filename)[0]]
class CntkModelsTestCase(CntkToEllFullModelTestBase):
def test_import(self):
_logger = logger.get()
"Tests the importing of CNTK models to ELL"
for modelName in self.model_names:
args = [modelName + '.cntk']
cntk_import.main(args)
# Verify that the ELL model file was produced
self.assertTrue(
os.path.exists(modelName + '.ell'),
'Failed to successfully import model: ' + modelName + '.cntk')
_logger.info('Successfully imported ' + modelName + '.cntk')
def test_model(self):
"""Test the model against the CNTK output for the following cases:
- imported predictor
- imported Model (reference implementation)
- unarchived Model (reference implementation)
- imported Model (compiled implementation)
- unarchived Model (compiled implementation)
"""
for modelName in self.model_names:
self.model_test_impl(modelName)
def compute_ell_map(self, ellMap, ellOrderedInput, cntkResults, modelName):
ellMapFromArchiveResults = ellMap.Compute(ellOrderedInput)
# Verify CNTK results and unarchived ELL model results match
np.testing.assert_array_almost_equal(
cntkResults, ellMapFromArchiveResults, decimal=5,
err_msg=(
'results for CNTK and ELL unarchived map reference (' +
modelName + ') do not match!'))
return ellMapFromArchiveResults
def model_test_impl(self, modelName):
_logger = logger.get()
with self.subTest(modelName=modelName):
_logger.info('Testing {0}.cntk vs ELL ({0})'.format(modelName))
# Load the cntk model
cntkModel = load_model(modelName + '.cntk')
# Import the model into an ELL map live, without unarchiving
predictor = cntk_to_ell.predictor_from_cntk_model(
modelName + '.cntk')
ellMap = ell.neural.utilities.ell_map_from_predictor(predictor)
# Load the map from archive
ellMapFromArchive = ell.model.Map(modelName + '.ell')
inputShape = ellMap.GetInputShape()
# outputShape = ellMap.GetOutputShape()
# Compile the live map
# Note: for testing purposes, callback functions assume the "model" namespace
compiler_options = ell.model.MapCompilerOptions()
compiler_options.useBlas = False
ellCompiledMap = ellMap.Compile('host', 'model', 'predict', compilerOptions=compiler_options)
# Compile the unarchived map
# Note: for testing purposes, callback functions assume the "model" namespace
ellCompiledMapFromArchive = ellMapFromArchive.Compile(
'host', 'model', 'predict', compilerOptions=compiler_options)
cntkInput = np.random.uniform(
high=255, size=(inputShape.channels, inputShape.rows, inputShape.columns)
).astype(np.float)
ellOrderedInput = cntk_converters.\
get_vector_from_cntk_array(cntkInput)
cntkInput = np.ascontiguousarray(cntkInput)
# Get the CNTK results
_, out = cntkModel.forward(
{cntkModel.arguments[0]: [cntkInput],
cntkModel.arguments[1]: [list(range(len(self.categories)))]})
for output in cntkModel.outputs:
if (output.shape == (len(self.categories),)):
out = out[output]
cntkResults = softmax(out[0]).eval()
_logger.info('Comparing predictor output (reference)')
sys.stdout.flush()
ellPredictorResults = predictor.Predict(ellOrderedInput)
# Verify CNTK results and predictor results match
np.testing.assert_array_almost_equal(
cntkResults, ellPredictorResults, decimal=5,
err_msg=('results for CNTK and ELL predictor (' + modelName +
') do not match!'))
_logger.info('Comparing map output (reference)')
sys.stdout.flush()
ellMapResults = ellMap.Compute(ellOrderedInput)
# Verify CNTK results and ELL map results match
np.testing.assert_array_almost_equal(
cntkResults, ellMapResults, decimal=5,
err_msg=('results for CNTK and ELL map reference (' +
modelName + ') do not match!'))
_logger.info('Comparing unarchived map output (reference)')
sys.stdout.flush()
self.compute_ell_map(ellMapFromArchive, ellOrderedInput, cntkResults, modelName)
_logger.info('Comparing map output (compiled)')
sys.stdout.flush()
ellCompiledMapResults = ellCompiledMap.Compute(ellOrderedInput)
# Verify CNTK results and unarchived ELL model results match
np.testing.assert_array_almost_equal(
cntkResults, ellCompiledMapResults, decimal=5,
err_msg=('results for CNTK and ELL map compiled (' +
modelName + ') do not match!'))
_logger.info('Comparing unarchived map output (compiled)')
| |
a is defined in aSet
# As a result, any option defined as 'a' will be used if the
# user specifies the 'b'
ret = a in aSet
if ret:
if b in bSet:
raise OptionMatcherException(
'Bad alias:' + a + '/' + b + ' in ' + self.describe())
bSet.add(b)
for each in self.flags, self.options, self.prefixes:
try:
each[b] = each[a]
except KeyError:
pass
return ret
for s, t in aliases.items():
if self.mode.getopt:
# In getoptmode, aliases must map short and long options,
# that is, options with 1 character and options with more
# than 1 character
if len(s) > len(t):
s, t = t, s
if len(s) > 1 or len(t) == 1:
raise OptionMatcherException('Bad alias:' + s + '/' + t)
if set_alias(t, s, self.defs, self.short_defs):
continue
elif t in self.defs:
# if alias 'l' is already known, we try setting from s->l
s, t = t, s
set_alias(s, t, self.short_defs, self.defs)
def get_index_name(self, index):
# returns the flag/option/parameter name with the given index
# (no prefixes) Note that it will be returned any of its aliases
for n, v in self.flags.items():
if v == index:
return 'flag ' + n
for n, v in self.options.items():
if v == index:
return 'option ' + n
return 'parameter ' + self.pars[index]
def describe(self):
"""Describes the underlying method"""
try:
name = 'method ' + get_method_name(self.func) + '.'
except AttributeError:
name = 'function '
return name + self.func.__name__
def get_doc(self):
"""Return the documentation of the underlying method, if any"""
return self.func.__doc__
def _get_parameters_info(self, f):
# This information includes: the list of variables, if it supports
# varargs, and if it supports kwargs
flags, var_names = get_flags_and_parameter_names(f)
return list(var_names), (flags & 0x0004) != 0, (flags & 0x0008) != 0
def _as_int(self, value):
return int(value)
def _as_float(self, value):
return float(value)
class OptMatcherHandler(OptMatcherInfo):
"""Internal class, representing each specific matcher handler.
It is an OptMatcherInfo extended with operations to handle arguments
"""
def __init__(self, func, mode):
OptMatcherInfo.__init__(self, func, mode)
self.reset()
def reset(self):
# all prefixes are reset as provided as an empty list
self.provided = dict([(i, []) for i in self.prefixes.values()])
self.provided_pars = []
def invoke(self):
"""Invokes the underlying function, unless it cannot be invoked."""
# It is invoked using the options/parameters/defaults already setup
status, args, kwargs = self._get_invoking_pars()
return (status is None) and self.func(*args, **kwargs)
def check_invokable(self, required):
"""Verifies whether the underlying function can be invoked."""
def something_provided():
# just check if the user provided any value.
return self.provided_pars or any(filter(lambda x: x != [],
self.provided.values()))
# It can, if all the options/parameters are specified or have defaults
error_reason = self._get_invoking_pars()[0]
return (required or something_provided()) and error_reason
def _get_invoking_pars(self):
# Returns the parameters required to invoke the underlying function.
# It returns a tuple (problem, *args, **kwargs)
args, parameters = [], self.provided_pars[:]
# we only check the indexes 1...last_arg, so the orphan flags are not
# checked here (they are not used to invoke the method)
for i in range(1, self.last_arg):
try:
value = self.provided[i] # read first the provided value
except KeyError:
# otherwise, the current index could refer to a parameter,
# which are stored separately
if i in self.pars and parameters:
value = parameters.pop(0)
else:
# this argument were not provided: try the default value
try:
value = self.defaults[i]
except KeyError:
# Neither, this function cannot be invoked
return ('Missing required ' + self.get_index_name(i),
None, None)
args.append(value)
# if the function defined a *arg parameter, it can handle the
# remaining provided parameters (if not, we would had already an error)
args.extend(parameters)
# It must be still checked the orphan flags' variables
# These are not passed to the method, but must have been provided to
# consider that the method can be invoked
for c in range(self.orphan_flags, 0):
if c not in self.provided:
return 'Missing required ' + self.get_index_name(c), None, None
return None, args, self.kwargs or {}
def handle_arg(self, command_line):
"""Handles one argument in the command line"""
# Returns None if ok, otherwise the reason why it cannot consume the
# argument
# An exception is raised in not recoverable situations: like flag not
# provided when needed, etc
# This handling can imply, under getopt mode, consuming more
# than one argument in the command line, or just a portion
# of one, if a short option was specified
# Check first options (short/long)
if command_line.option:
if command_line.is_short:
return self._handle_short_arg(command_line)
return self._handle_long_arg(command_line)
# If not, it is a parameter, but perhaps there are already too many...
if not self.vararg and (len(self.provided_pars) >= len(self.pars)):
return 'Unexpected argument: ' + command_line.arg
self.provided_pars.append(command_line.arg)
command_line.set_arg_handled()
return None
def _handle_long_arg(self, cmd):
"""Handles one long argument in the command line."""
name = cmd.name
# only check the name if defined (and not defined as a short option)
ok_name = name in self.defs
if ok_name and self._handle_option(cmd):
return None
flag = ok_name and self.flags.get(name, None)
if flag:
if cmd.split: # flag, but user specified a value
raise UsageException('Incorrect flag ' + name)
self.provided[flag] = True
else:
prefix, name = self._split_prefix(name)
if prefix:
if not name:
# perhaps is given as -D=value(bad) or separate (getopt)
if (cmd.split or not self.mode.getopt or
cmd.set_arg_handled()):
raise UsageException(
'Incorrect prefix usage on argument ' + cmd.arg)
# note that cmd.value is the value of next argument now
name = cmd.name
self.provided[prefix].append((name, cmd.value))
else: # try now the self.kwargs, if possible
try:
self.kwargs[cmd.name] = cmd.value
except TypeError:
# no kwargs, this argument cannot be used
return 'Unexpected argument: ' + cmd.arg
cmd.set_arg_handled()
def _handle_short_arg(self, cmd):
"""Handles one short argument in the command line"""
# This method is only called for getopt mode
name = cmd.name
if name not in self.short_defs:
# in shorts, name is just one letter, so not inclusion in
# short_defs means that it is neither a prefix, do no more checks
return 'Unexpected flag ' + name + ' in argument ' + cmd.arg
flag = self.flags.get(name, None)
if flag:
self.provided[flag] = True
cmd.set_short_arg_handled()
elif not self._handle_option(cmd):
prefix = self.prefixes.get(name, None)
# no flag, no option, but in short_defs->is a prefix!
if not cmd.value:
# given separately
if cmd.set_arg_handled():
raise UsageException('Incorrect prefix ' + name)
cmd.value = cmd.arg
self.provided[prefix].append(cmd.separate(cmd.value)[1:])
cmd.set_arg_handled()
return None
def _handle_option(self, cmd):
"""Checks if the command is a valid option, handling it if so
Returns the option handled, or None if not handled
"""
# the normal case, -name=value, implies command.value
name = cmd.name
option = self.options.get(name, None)
if option:
if cmd.value:
value = cmd.value
else:
# under getoptmode, this is still valid if the value is
# provided as a separate argument (no option, no split)
if not self.mode.getopt or cmd.set_arg_handled() or cmd.split:
raise UsageException('Incorrect option ' + name)
value = cmd.arg
# If a conversion is needed (to integer/float), do it now
try:
value = self.converts[option](value)
except KeyError:
# no conversion required, we treat it always as file
value = os.path.expanduser(os.path.expandvars(value))
except ValueError:
raise UsageException('Incorrect value for ' + name)
self.provided[option] = value
cmd.set_arg_handled()
return option
def _split_prefix(self, name):
# Splits an existing prefix from the given name.
# It does not apply to short prefixes (getopt mode)
# It returns the tuple (prefix, rest), or (None, None) if not found
for each in self.prefixes:
if each in self.defs and name.startswith(each):
return self.prefixes[each], name[len(each):]
return None, None
class UsageAccessor(object):
"""Class to access and to format usage info"""
def __init__(self, handlers, mode):
self.mode = mode
self.handlers = handlers # each is a list [matcher, optsets...]
self.reset()
def get_content(self):
"""Format method, returns the current content"""
return '\n'.join(self.content)
def reset(self, width=72):
"""Format method, clears the content"""
self.content = ['']
self.width = width
def add_line(self, content=None, column=0):
"""
Format method, adds a new line, and places the content on the
given column. See add method
"""
self.content.append('')
if content:
self.add(content, column)
def add(self, content, column=0):
"""
Format | |
return directory_list
def old_list_directory(self, encoded_pathspec, recursive=False):
"""Lists a directory using a pathspec or list of pathspecs"""
directory_list = []
pathspec = PathspecHelper._decode_pathspec(encoded_pathspec)
directory_list.extend(self._list_directory(
resolver.Resolver.OpenFileEntry(pathspec), recursive, 0))
return directory_list
def _old_list_directory(self, file_entry, recursive=False, depth=0):
"""Lists a directory using a file entry"""
directory_list = []
if depth > 0:
directory_list.append(self.get_evidence_item(JsonPathSpecSerializer.WriteSerialized(file_entry.path_spec)))
if (recursive or depth == 0) and file_entry.IsDirectory():
for sub_file_entry in file_entry.sub_file_entries:
directory_list.extend(self._list_directory(sub_file_entry, recursive, depth + 1))
return directory_list
@staticmethod
def _decode_pathspec(encoded_pathspec):
"""Returns a Path Spec object from an encoded path spec, causes a 400 abort if the decode fails"""
if not encoded_pathspec:
logging.warn('Path Spec required but none found')
raise KeyError('Could not find pathspec in request')
return JsonPathSpecSerializer.ReadSerialized(encoded_pathspec)
@staticmethod
def get_inode(encoded_pathspec):
"""Returns the inode for the given pathspec"""
return PathspecHelper._decode_pathspec(encoded_pathspec).inode
@staticmethod
def get_file_path(encoded_pathspec):
"""Returns the full path of the given pathspec"""
return getattr(PathspecHelper._decode_pathspec(encoded_pathspec), 'location', '')
@staticmethod
def get_file_name(encoded_pathspec):
"""Returns the file name with extension of the given pathspec"""
file_name = os.path.basename(PathspecHelper.get_file_path(encoded_pathspec))
if not file_name:
file_name = 'none'
return file_name
@staticmethod
def get_file_directory(encoded_pathspec):
"""Returns the full path of the parent directory of the given pathspec"""
return os.path.dirname(PathspecHelper.get_file_path(encoded_pathspec))
@staticmethod
def get_file_extension(encoded_pathspec):
"""Returns the file extension of the given pathspec"""
return os.path.splitext(PathspecHelper.get_file_name(encoded_pathspec))[1][1:].lower() or ""
@staticmethod
def _get_pathspec_hash(encoded_pathspec):
"""Returns the SHA1 hash of the encoded pathspec, NOT THE FILE"""
return hashlib.sha1(encoded_pathspec).hexdigest()
@staticmethod
def get_file_strings(encoded_pathspec, minimum_characters=4, size=0, seek=0):
chars = r"A-Za-z0-9/\-:.,_$%'()[\]<> "
regexp = '[%s]{%d,}' % (chars, minimum_characters)
pattern = re.compile(regexp)
return pattern.findall(PathspecHelper.read_file(encoded_pathspec, size=size, seek=seek))
@staticmethod
def _open_file_entry(encoded_pathspec):
"""Returns an open File Entry object of the given path spec"""
with PathspecHelper._open_file_entries_lock:
if encoded_pathspec not in PathspecHelper._open_file_entries_locks:
PathspecHelper._open_file_entries_locks[encoded_pathspec] = threading.Lock()
with PathspecHelper._open_file_entries_locks[encoded_pathspec]:
if encoded_pathspec in PathspecHelper._open_file_entries:
PathspecHelper._open_file_entries_count[encoded_pathspec] += 1
if PathspecHelper._open_file_entries[encoded_pathspec]:
return PathspecHelper._open_file_entries[encoded_pathspec]
try:
PathspecHelper._open_file_entries_count[encoded_pathspec] = 1
try:
PathspecHelper._open_file_entries[encoded_pathspec] =\
resolver.Resolver.OpenFileEntry(PathspecHelper._decode_pathspec(encoded_pathspec))
except KeyError:
logging.warn('Unknown KEY ERROR while opening evidence file, attempting again...')
PathspecHelper._open_file_entries[encoded_pathspec] = \
resolver.Resolver.OpenFileEntry(PathspecHelper._decode_pathspec(encoded_pathspec))
except RuntimeError:
logging.warn('Unknown RUNTIME ERROR while opening evidence file, attempting again...')
PathspecHelper._open_file_entries[encoded_pathspec] = \
resolver.Resolver.OpenFileEntry(PathspecHelper._decode_pathspec(encoded_pathspec))
except AttributeError:
logging.warn('Unknown ATTRIBUTE ERROR while opening evidence file, attempting again...')
PathspecHelper._open_file_entries[encoded_pathspec] = \
resolver.Resolver.OpenFileEntry(PathspecHelper._decode_pathspec(encoded_pathspec))
except CacheFullError:
PathspecHelper._clear_file_entry_cache()
PathspecHelper._open_file_entries[encoded_pathspec] = \
resolver.Resolver.OpenFileEntry(PathspecHelper._decode_pathspec(encoded_pathspec))
#if not PathspecHelper._open_file_entries[encoded_pathspec]:
# # TODO There appears to be a bug in dfVFS
# # TODO for compressed formats ZIP, etc.
# logging.warn('Attempting compression error fix...')
# type_indicator_list = ['ZIP', 'GZIP']
# pathspec_dictionary = json.loads(encoded_pathspec)
# # TODO add levels to repeat current_level = 0
# if pathspec_dictionary['type_indicator'] in type_indicator_list:
# pathspec_dictionary['location'] = pathspec_dictionary['location'] + u'/'
# new_encoded_pathspec = json.dumps(pathspec_dictionary)
# PathspecHelper._open_file_entries[encoded_pathspec] = \
# resolver.Resolver.OpenFileEntry(PathspecHelper._decode_pathspec(new_encoded_pathspec))
if PathspecHelper._open_file_entries[encoded_pathspec]:
return PathspecHelper._open_file_entries[encoded_pathspec]
except Exception as e:
del PathspecHelper._open_file_entries_count[encoded_pathspec]
logging.error('Failed second attempt to open evidence file entry')
logging.debug(encoded_pathspec)
logging.debug(e.message)
logging.debug(traceback.format_exc())
raise RuntimeError('Failed to open evidence file entry')
logging.error('Missing file entry for pathspec "' + encoded_pathspec + '"')
raise RuntimeError('Missing File Entry for Pathspec')
@staticmethod
def _clear_file_entry_cache():
logging.warn('File Entry cache is full, attempting to empty cache')
with PathspecHelper._open_file_entries_lock:
temp_locks = PathspecHelper._open_file_entries_locks.iteritems()
keys_to_delete = []
for key, lock in temp_locks:
if not lock.locked():
keys_to_delete.append(key)
for key in keys_to_delete:
if key in PathspecHelper._open_file_entries_locks:
del PathspecHelper._open_file_entries_locks[key]
if key in PathspecHelper._open_file_entries_count:
del PathspecHelper._open_file_entries_count[key]
if key in PathspecHelper._open_file_entries:
del PathspecHelper._open_file_entries[key]
time.sleep(0.1)
@staticmethod
def _close_file_entry(encoded_pathspec):
"""Closes the file entry"""
try:
with PathspecHelper._open_file_entries_lock:
PathspecHelper._open_file_entries_count[encoded_pathspec] -= 1
# currently waiting until cache is full is more stable than the following code:
# if PathspecHelper._open_file_entries_count[encoded_pathspec] < 1:
# del PathspecHelper._open_file_entries[encoded_pathspec]
# del PathspecHelper._open_file_entries_locks[encoded_pathspec]
except KeyError:
logging.error('Attempted to close already closed file entry!')
raise RuntimeError('Attempting to close already closed file entry')
@staticmethod
def read_file(encoded_pathspec, file_entry=False, size=0, seek=0):
"""Reads the file object from the specified pathspec, always seeks back to the beginning"""
file = PathspecHelper._open_file_object(encoded_pathspec)
with PathspecHelper._file_read_lock:
file.seek(seek)
if size:
data = file.read(size)
else:
data = file.read()
file.seek(0)
PathspecHelper._close_file_object(encoded_pathspec)
return data
@staticmethod
def _open_file_object(encoded_pathspec):
"""Returns the file object from the specified pathspec"""
with PathspecHelper._open_file_object_lock:
if len(PathspecHelper._open_file_objects) < PathspecHelper._max_file_count:
if encoded_pathspec in PathspecHelper._open_file_objects_count:
PathspecHelper._open_file_objects_count[encoded_pathspec] += 1
else:
PathspecHelper._open_file_objects_count[encoded_pathspec] = 1
if encoded_pathspec not in PathspecHelper._open_file_objects:
file_entry = PathspecHelper._open_file_entry(encoded_pathspec)
if not file_entry.IsFile() and not file_entry.IsDevice():
PathspecHelper._close_file_entry(encoded_pathspec)
raise TypeError('Cannot open file object, because the pathspec is not for a file or device.')
try:
PathspecHelper._open_file_objects[encoded_pathspec] = file_entry.GetFileObject()
except SystemError:
logging.warn('System Error while trying to get file object, attempting again.')
PathspecHelper._open_file_objects[encoded_pathspec] = file_entry.GetFileObject()
PathspecHelper._close_file_entry(encoded_pathspec)
return PathspecHelper._open_file_objects[encoded_pathspec]
@staticmethod
def _close_file_object(encoded_pathspec):
"""Closes the file object associated with the specified pathspec"""
with PathspecHelper._open_file_object_lock:
try:
PathspecHelper._open_file_objects_count[encoded_pathspec] -= 1
if PathspecHelper._open_file_objects_count[encoded_pathspec] < 1:
PathspecHelper._open_file_objects[encoded_pathspec].close()
del PathspecHelper._open_file_objects[encoded_pathspec]
PathspecHelper._close_file_entry(encoded_pathspec)
except KeyError:
logging.error('Attempted to close already closed file object!')
raise RuntimeError('Attempting to close already closed file object')
@staticmethod
def list_base_pathspecs(evidence):
'''Gets the base pathspec for the given evidence'''
decoded_pathspec = PathspecHelper._decode_pathspec(evidence['pathspec'])
if u'archive_type' in evidence and u'ZIP' in evidence['archive_type']:
pathspec = path_spec_factory.Factory.NewPathSpec(dfvfs_definitions.TYPE_INDICATOR_ZIP, location=u'/',
parent=decoded_pathspec)
elif u'compression_type' in evidence and u'GZIP' in evidence['compression_type']:
pathspec = path_spec_factory.Factory.NewPathSpec(dfvfs_definitions.TYPE_INDICATOR_GZIP,
parent=decoded_pathspec)
elif u'compression_type' in evidence and u'BZIP2' in evidence['compression_type']:
pathspec = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_COMPRESSED_STREAM,
compression_method=dfvfs_definitions.COMPRESSION_METHOD_BZIP2,
parent=decoded_pathspec)
elif u'archive_type' in evidence and u'TAR' in evidence['archive_type']:
pathspec = dfvfs.path.tar_path_spec.TARPathSpec(location='/', parent=decoded_pathspec)
else:
return PathspecHelper._list_new_base_pathspecs(evidence['pathspec'])
encoded_base_pathspec = JsonPathSpecSerializer.WriteSerialized(pathspec)
if hasattr(pathspec, 'location'):
location = pathspec.location
if location.endswith('/') or location.endswith('\\'):
location = location[:-1]
file_name = os.path.basename(location)
else:
file_name = '/'
return [{'pathspec': encoded_base_pathspec, 'file_name': file_name}]
@staticmethod
def _list_new_base_pathspecs(encoded_pathspec):
'''Gets a list of the base_pathspecs from in a pathspec using dfvfs_utils'''
try:
dfvfs_util = DfvfsUtil(PathspecHelper._decode_pathspec(encoded_pathspec), interactive=True, is_pathspec=True)
except CacheFullError:
PathspecHelper._clear_file_entry_cache()
dfvfs_util = DfvfsUtil(PathspecHelper._decode_pathspec(encoded_pathspec), interactive=True, is_pathspec=True)
pathspec = dfvfs_util.base_path_specs
if not isinstance(pathspec, list):
pathspec = [pathspec]
pathspecs = []
previous_partition = ''
for item in pathspec:
if hasattr(item.parent, 'location'):
file_name = item.parent.location
else:
file_name = '/'
# Adds the partition name in front of the shadow volume
if getattr(item.parent, 'type_indicator', '') == 'TSK_PARTITION':
previous_partition = item.parent.location
elif getattr(item.parent, 'type_indicator', '') == 'VSHADOW':
file_name = previous_partition + file_name
new_encoded_pathspec = JsonPathSpecSerializer.WriteSerialized(item)
pathspecs.append({'pathspec': new_encoded_pathspec,
'url_query': urlencode({'pathspec': new_encoded_pathspec}),
'file_name': file_name})
return pathspecs
@staticmethod
def get_parent_pathspec_manually(encoded_pathspec):
"""Returns the decoded parent pathspec, by decoding and getting the 'parent' attribute"""
pathspec = PathspecHelper._decode_pathspec(encoded_pathspec)
parent = getattr(pathspec, 'parent', False)
if parent:
return parent
return False
@staticmethod
def set_pathspec_location(encoded_pathspec, location):
"""Returns the updated pathspec if the location exists, else it returns false"""
pathspec = PathspecHelper._decode_pathspec(encoded_pathspec)
setattr(pathspec, 'location', location)
if getattr(pathspec, 'inode', False):
delattr(pathspec, 'inode')
try:
new_encoded_pathspec = JsonPathSpecSerializer.WriteSerialized(pathspec)
PathspecHelper._open_file_entry(new_encoded_pathspec)
# TODO Make less generic, Runtime?
except:
return False
return new_encoded_pathspec
@staticmethod
def get_encoded_parent_base_pathspec_manually(encoded_pathspec):
"""Returns the encoded parent pathspec, by decoding and getting the 'parent' attribute"""
return JsonPathSpecSerializer.WriteSerialized(PathspecHelper.get_parent_pathspec_manually(encoded_pathspec))
@staticmethod
def get_parent_pathspec(encoded_pathspec, skip_to_type_indicator=False):
'''Gets the parent pathspec of the provided pathspec'''
if not skip_to_type_indicator:
file_entry = PathspecHelper._open_file_entry(encoded_pathspec)
try:
parent_entry = file_entry.GetParentFileEntry()
except KeyError:
parent_entry = False
logging.warn('Failed to GetParentFileEntry using dfvfs, trying manually.')
PathspecHelper._close_file_entry(encoded_pathspec)
if skip_to_type_indicator or not parent_entry:
parent_path_spec = PathspecHelper.get_parent_pathspec_manually(encoded_pathspec)
else:
parent_path_spec = parent_entry.path_spec
if not parent_path_spec:
return False
while getattr(parent_path_spec, 'type_indicator', '') in PathspecHelper._automatically_traverse:
parent_path_spec = parent_path_spec.parent
return JsonPathSpecSerializer.WriteSerialized(parent_path_spec)
@staticmethod
def get_pathspec(pathspec_or_source):
"""Gets the pathspec, only gets the first one so limit use"""
try:
pathspec = DfvfsUtil.decode_pathspec(pathspec_or_source)
except:
try:
dfvfs_util = DfvfsUtil(pathspec_or_source)
except CacheFullError:
PathspecHelper._clear_file_entry_cache()
dfvfs_util = DfvfsUtil(pathspec_or_source)
pathspec = dfvfs_util.base_path_specs
if isinstance(pathspec, list):
pathspec = pathspec[0]
return pathspec
@staticmethod
def get_encoded_pathspec(pathspec_or_source):
"""Gets the encoded pathspec"""
return JsonPathSpecSerializer.WriteSerialized(PathspecHelper.get_pathspec(pathspec_or_source))
@staticmethod
def guess_mimetype(extension):
"""Returns the assumed mimetype based on the extension"""
types_map = {
'a' : 'application/octet-stream',
'ai' : 'application/postscript',
'aif' : 'audio/x-aiff',
'aifc' : 'audio/x-aiff',
'aiff' : 'audio/x-aiff',
'au' : 'audio/basic',
'avi' : 'video/x-msvideo',
'bat' : 'text/plain',
'bcpio' : 'application/x-bcpio',
'bin' : 'application/octet-stream',
'bmp' : 'image/x-ms-bmp',
'c' : 'text/plain',
'cdf' : 'application/x-cdf',
'cpio' : 'application/x-cpio',
'csh' : 'application/x-csh',
'css' : 'text/css',
'dll' : 'application/octet-stream',
'doc' : 'application/msword',
'docx' : 'application/vnd.openxmlformats-officedocument.wordprocessingml.document',
'dot' : 'application/msword',
'dotx' : 'application/vnd.openxmlformats-officedocument.wordprocessingml.template',
'dvi' : 'application/x-dvi',
'eml' : 'message/rfc822',
'eps' : 'application/postscript',
'etx' : 'text/x-setext',
'exe' : 'application/x-dosexec',
'gif' : 'image/gif',
'gtar' : 'application/x-gtar',
'h' : 'text/plain',
'hdf' : 'application/x-hdf',
'htm' : 'text/html',
'html' : 'text/html',
'ico' : 'image/vnd.microsoft.icon',
'ief' : 'image/ief',
'jpe' : 'image/jpeg',
'jpeg' : 'image/jpeg',
'jpg' : 'image/jpeg',
'js' : 'application/javascript',
'ksh' : 'text/plain',
'latex' : 'application/x-latex',
'm1v' : 'video/mpeg',
'man' : 'application/x-troff-man',
'me' : 'application/x-troff-me',
'mht' : 'message/rfc822',
'mhtml' : 'message/rfc822',
'mif' : 'application/x-mif',
'mov' : 'video/quicktime',
'movie' : 'video/x-sgi-movie',
'mp2' : 'audio/mpeg',
'mp3' : 'audio/mpeg',
'mp4' : 'video/mp4',
'mpa' : 'video/mpeg',
| |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
####################
# Interface to NetworkOwl device
# Reads solar PV generation, electricity usage, weather, heating & hot water data & updates device states
# Reads 3 phases if device type is Intution-lc
# V1.6 16 January 2018
# https://smudger4.github.io
#
#
# v1.6: repackaged for Indigo Plugin Library - no functional changes
# v1.5: updated electricity sections to handle new V2 XML specification alongside existing
# v1.4: updated hot water & heating sections to support updated firmware (2.6)
# v1.3: rewritten to replace references to Twisted with more generic networking code, as Twisted no longer supported by Indigo when running on OS X 10.9 (Mavericks) because Twisted libraries were removed from the old version of OS X Python that Indigo relies on.
# v1.2: adds read-only support for hot water & heating controls
# v1.1: extends electricity packet to support 3-phase
# v1.0: initial version
import os
import sys
import struct
import time
import socket
try:
import xml.etree.cElementTree as ET
except ImportError:
import xml.etree.ElementTree as ET
########################################
########################################
class NetworkOwl():
"""Class to interface with the NetworkOWL electricity monitor device.
Reads the multicast stream sent by the NetworkOwl & creates packets to represent
each kind of datagram. Supported types are Solar, Electricity, Weather, Hot water &
Heating.
Calls associatePacket to update the object represented by the plugin instance
variable. In the case of Indigo, this will be the Plugin instance, but for other uses
adapt the associatePacket method to interface to the surrounding framework.
Instance variables:
plugin - represents the object containing an instance of this class.
Constants:
MULTICAST_GROUP - defined by NetworkOwl hardware: group IP address.
MULTICAST_PORT - defined by NetworkOwl hardware: port number.
"""
# constants for NetworkOwl
MULTICAST_GROUP = '172.16.31.10'
MULTICAST_PORT = 22600
########################################
def __init__(self, plugin):
"""Store the reference to the containing object."""
self.plugin = plugin
########################################
def startProtocol(self):
"""Joins the multicast group."""
self.plugin.mylogger.log(4, "Intuition: startProtocol called")
sock = None
try:
# Create the socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
# Bind to the server address
sock.bind(('', NetworkOwl.MULTICAST_PORT))
# Tell the operating system to add the socket to the multicast group
# on all interfaces.
group = socket.inet_aton(NetworkOwl.MULTICAST_GROUP)
mreq = struct.pack('4sL', group, socket.INADDR_ANY)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
except socket.error, msg:
self.mylogger.logError("Intuition: socket error, closing")
sock.close()
return sock
########################################
def runProtocol(self, sock):
"""
wait loop for retrieving multicast packets from socket.
assumes is being run in a separate thread so is OK to block waiting
sends ACK back to sending station
then processes the received packet
loop until the main Indigo plugin thread variable is reset
"""
while self.plugin.stopThread == False:
# check config hasn't changed
self.plugin.checkConfig()
self.plugin.mylogger.log(4, "Intuition: waiting to receive message")
data, address = sock.recvfrom(1024)
num_bytes = len(data)
self.plugin.mylogger.log(4, 'received %s bytes from %s' % (num_bytes, address))
self.plugin.mylogger.log(4, data)
self.plugin.mylogger.log(4, 'sending acknowledgement to %s' % repr(address))
# Rather than replying to the group multicast address, we send the
# reply directly (unicast) to the originating port:
sock.sendto('ack', address)
# process the data just received from the Network Owl
self.processDataPacket(data)
self.plugin.sleep(1)
########################################
def processDataPacket(self, datagram):
"""Parse the datagram & create appropriate data packet instance."""
#Create an Element Tree instance to hold the data received from the Network Owl
root = ET.fromstring(datagram)
dev = None
self.plugin.mylogger.log(3, "NetworkOwl: " + root.tag + " packet received")
packet = None
# get device associated with the MAC address of packet received
packetAddress = root.get('id')
self.plugin.mylogger.log(4, "NetworkOwl: packet address: %s" % packetAddress)
# check if we recognise the MAC address, if not ignore the datagram
if packetAddress not in self.plugin.deviceDict:
self.mylogger.logError("Received packet from unknown NetworkOwl %s: create NetworkOwl device in Indigo" % packetAddress)
else:
# Solar data packet
if root.tag == 'solar':
packet = SolarPacket(self.plugin, packetAddress, root)
if packet.isValid():
packet.associate()
# Electricity data packet
elif root.tag == 'electricity':
packet = ElectricityPacket(self.plugin, packetAddress, root)
if packet.isValid():
packet.associate()
# weather data packet
elif root.tag == 'weather':
packet = WeatherPacket(self.plugin, packetAddress, root)
if packet.isValid():
packet.associate()
# heating data packet
elif root.tag == 'heating':
packet = HeatingPacket(self.plugin, packetAddress, root)
if packet.isValid():
packet.associate()
# hot water data packet
elif root.tag == 'hot_water':
packet = HotWaterPacket(self.plugin, packetAddress, root)
if packet.isValid():
packet.associate()
# if we've received a packet we know how to deal with,
# we've processed it by now
if packet == None:
# having created a packet from the multicast stream,
# now associate the packet with the server
#dev = self.plugin.deviceDict[packetAddress]
#self.associatePacket(packet, dev)
#else:
# no idea what to do with this type of packet, just log an error
self.mylogger.logError("Unknown '%s' packet received" % root.tag)
########################################
def stripDecimals(self, inParam):
"""Remove '.00' from the end of value passed in"""
outParam = inParam
if inParam.endswith('.00'):
outParam = inParam[:-3]
return outParam
########################################
########################################
class NetworkOwlPacket(object):
"""Base class for all NetworkOwl data packets.
Instance variables:
reading_time - time packet was read
mac_address - MAC address of packet
packet_type - type of packet as a single word
plugin - reference to the containing plugin context
device - reference to the device once associated
owlType - kind of Owl device that sent the packet
valid - is the packet a valid one?
"""
########################################
def __init__(self, plugin, addr):
"""Record time & MAC address."""
self.reading_time = time.strftime('%Y/%m/%d %H:%M:%S')
self.mac_address = addr
self.packet_type = ""
self.xml_version = None
self.plugin = plugin
self.device = plugin.deviceDict[addr]
self.owlType = str(self.device.states["networkOwlType"])
self.valid = False
########################################
def stripDecimals(self, inParam):
"""Remove '.00' from the end of value passed in"""
outParam = inParam
if inParam.endswith('.00'):
outParam = inParam[:-3]
return outParam
########################################
def isValid(self):
return self.valid
########################################
def associate(self):
"""Associate the packet with Indigo"""
self.device.updateStateOnServer("lastUpdated", self.reading_time)
########################################
def splunk(self, field, value):
"""Log packet data in a Splunk-friendly format"""
self.plugin.mylogger.log(2, "NetworkOwl: %s=%s" % (field, value))
########################################
########################################
class SolarPacket(NetworkOwlPacket):
"""NetworkOwl solar packet.
Instance variables:
gen_watts - watts being generated now.
exp_watts - watts being exported now..
gen_watts_today - total number of watts generated today.
exp_watts_today - total number of watts exported today.
<solar id='xxxx'>
<current>
<generating units='w'>84.00</generating>
<exporting units='w'>84.00</exporting>
</current>
<day>
<generated units='wh'>145.56</generated>
<exported units='wh'>145.56</exported>
</day>
</solar>
"""
########################################
def __init__(self, plugin, addr, root):
"""Initialise solar instance variables."""
NetworkOwlPacket.__init__(self, plugin, addr)
self.packet_type = "solar"
self.gen_watts = 0
self.exp_watts = 0
self.gen_watts_today = 0
self.exp_watts_today = 0
# work out which version of XML we're dealing with & behave accordingly
self.xml_version = root.get('ver')
if self.xml_version == None:
# original version of the packet with no version number
# extract & process current readings
self.valid = True
currSolar = root.find('current')
gen_watts = currSolar.find('generating').text
exp_watts = currSolar.find('exporting').text
# remove the decimal points
self.gen_watts = self.stripDecimals(gen_watts)
self.exp_watts = self.stripDecimals(exp_watts)
# extract & process whole day readings
daySolar = root.find('day')
self.gen_watts_today = daySolar.find('generated').text
self.exp_watts_today = daySolar.find('exported').text
else:
# unknown XML format - log error
self.plugin.mylogger.logError(u"unrecognised solar packet received")
########################################
def associate(self):
"""Update Indigo with solar packet data"""
NetworkOwlPacket.associate(self)
self.device.updateStateOnServer("genWattsNow", self.gen_watts)
self.device.updateStateOnServer("expWattsNow", self.exp_watts)
self.device.updateStateOnServer("genWattsToday", self.gen_watts_today)
self.device.updateStateOnServer("expWattsToday", self.exp_watts_today)
self.plugin.mylogger.log(3, "NetworkOwl: generating watts: " + self.gen_watts)
self.plugin.mylogger.log(3, "NetworkOwl: exporting watts: " + self.exp_watts)
NetworkOwlPacket.splunk(self, "solar_gen_watts", self.gen_watts)
NetworkOwlPacket.splunk(self, "solar_exp_watts", self.exp_watts)
########################################
########################################
class ElectricityPacket(NetworkOwlPacket):
"""NetworkOwl electricity packet - upto 6 channels.
Instance variables:
curr_watts[] - list of current readings for each channel.
watts_today[] - list of daily totals for each channel.
XML V1
------
<electricity id='xxxx'>
<signal rssi='-72' lqi='70'/>
<battery level='100%'/>
<chan id='0'>
<curr units='w'>0.00</curr>
<day units='wh'>0.00</day>
</chan>
<chan id='1'>
<curr units='w'>84.00</curr>
<day units='wh'>146.96</day>
</chan>
<chan id='2'>
<curr units='w'>0.00</curr>
<day units='wh'>0.00</day>
</chan>
</electricity>
XML V2
------
<electricity id='xxxx' ver='2.0'>
<timestamp>1458313071</timestamp>
<signal rssi='-58' lqi='4'/>
<battery level='100%'/>
<channels>
<chan id='0'>
<curr units='w'>342.00</curr>
<day units='wh'>16456.46</day>
</chan>
<chan id='1'>
<curr units='w'>383.00</curr>
<day units='wh'>13415.39</day>
</chan>
<chan id='2'>
<curr units='w'>0.00</curr>
<day units='wh'>0.00</day>
</chan>
<chan id='3'>
<curr units='w'>0.00</curr>
<day units='wh'>0.00</day>
</chan>
<chan id='4'>
<curr units='w'>0.00</curr>
<day units='wh'>0.00</day>
</chan>
<chan id='5'>
<curr units='w'>0.00</curr>
<day units='wh'>0.00</day>
| |
# Copyright 2019 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for structured_writer."""
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
from reverb import client as client_lib
from reverb import server as server_lib
from reverb import structured_writer
import tree
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.framework import tensor_spec
# pylint: enable=g-direct-tensorflow-import
Condition = structured_writer.Condition
TensorSpec = tensor_spec.TensorSpec
TABLES = tuple(f'queue_{i}' for i in range(5))
STEP_SPEC = {
'a': np.zeros([], np.float32),
'b': {
'c': np.zeros([2, 2], np.int32),
'd': [
np.zeros([3], np.int64),
np.zeros([6], np.int64),
],
},
}
REF_STEP = structured_writer.create_reference_step(STEP_SPEC)
def create_step(idx, structure):
return tree.map_structure(lambda x: np.ones_like(x) * idx, structure)
class StructuredWriterTest(parameterized.TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls._server = server_lib.Server(
[server_lib.Table.queue(table, 100) for table in TABLES])
def setUp(self):
super().setUp()
self.client = client_lib.Client(f'localhost:{self._server.port}')
def tearDown(self):
super().tearDown()
for table in TABLES:
self.client.reset(table)
@classmethod
def tearDownClass(cls):
super().tearDownClass()
cls._server.stop()
def get_table_content(self, idx: int, structure=None):
info = self.client.server_info(1)
num_items = info[TABLES[idx]].current_size
if num_items == 0:
return []
sampler = self.client.sample(
TABLES[idx], num_samples=num_items, emit_timesteps=False)
flat_samples = [sample.data for sample in sampler]
if structure:
return [tree.unflatten_as(structure, sample) for sample in flat_samples]
return flat_samples
@parameterized.parameters(
{
'condition': Condition.step_index() <= 2,
'num_steps': 10,
'want_steps': [0, 1, 2],
},
{
'condition': Condition.step_index() >= 5,
'num_steps': 10,
'want_steps': [5, 6, 7, 8, 9],
},
{
'condition': Condition.step_index() == 3,
'num_steps': 10,
'want_steps': [3],
},
{
'condition': Condition.step_index() != 3,
'num_steps': 10,
'want_steps': [0, 1, 2, 4, 5, 6, 7, 8, 9],
},
{
'condition': Condition.step_index() % 3 == 0,
'num_steps': 10,
'want_steps': [0, 3, 6, 9],
},
{
'condition': Condition.step_index() % 3 != 0,
'num_steps': 10,
'want_steps': [1, 2, 4, 5, 7, 8],
},
{
'condition': Condition.steps_since_applied() >= 4,
'num_steps': 10,
'want_steps': [3, 7],
},
{
'condition': Condition.steps_since_applied() >= 3,
'num_steps': 10,
'want_steps': [2, 5, 8],
},
{
'condition': Condition.is_end_episode(),
'num_steps': 5,
'want_steps': [4],
},
)
def test_single_condition(self, condition, num_steps, want_steps):
pattern = structured_writer.pattern_from_transform(
step_structure=None, transform=lambda x: x[-1])
config = structured_writer.create_config(
pattern=pattern, table=TABLES[0], conditions=[condition])
writer = self.client.structured_writer([config])
for i in range(num_steps):
writer.append(i)
writer.end_episode()
want = [[step] for step in want_steps]
self.assertEqual(self.get_table_content(0), want)
@parameterized.parameters(
{
'pattern': {
'x': REF_STEP['a'][-3],
'y': REF_STEP['b']['c'][-2:],
'z': REF_STEP['b']['d'][0][-1],
},
'num_steps':
5,
'want': [
{
'x':
np.array(0, np.float32),
'y':
np.array([
np.array([[1, 1], [1, 1]], np.int32),
np.array([[2, 2], [2, 2]], np.int32),
]),
'z':
np.array([2, 2, 2], np.int64),
},
{
'x':
np.array(1, np.float32),
'y':
np.stack([
np.array([[2, 2], [2, 2]], np.int32),
np.array([[3, 3], [3, 3]], np.int32),
]),
'z':
np.array([3, 3, 3], np.int64),
},
{
'x':
np.array(2, np.float32),
'y':
np.stack([
np.array([[3, 3], [3, 3]], np.int32),
np.array([[4, 4], [4, 4]], np.int32),
]),
'z':
np.array([4, 4, 4], np.int64),
},
],
},
{
'pattern': {
'older': REF_STEP['a'][-3:-1],
'last_a': REF_STEP['a'][-1],
},
'num_steps':
5,
'want': [
{
'older': np.array([0, 1], np.float32),
'last_a': np.array(2, np.float32),
},
{
'older': np.array([1, 2], np.float32),
'last_a': np.array(3, np.float32),
},
{
'older': np.array([2, 3], np.float32),
'last_a': np.array(4, np.float32),
},
],
},
{
'pattern': {
'every_second_a': REF_STEP['a'][-5::2]
},
'num_steps':
10,
'want': [
{
'every_second_a': np.array([0, 2, 4], np.float32)
},
{
'every_second_a': np.array([1, 3, 5], np.float32)
},
{
'every_second_a': np.array([2, 4, 6], np.float32)
},
{
'every_second_a': np.array([3, 5, 7], np.float32)
},
{
'every_second_a': np.array([4, 6, 8], np.float32)
},
{
'every_second_a': np.array([5, 7, 9], np.float32)
},
],
},
)
def test_trajectory_patterns(self, pattern, num_steps, want):
config = structured_writer.create_config(
pattern=pattern, table=TABLES[0], conditions=[])
writer = self.client.structured_writer([config])
for i in range(num_steps):
writer.append(create_step(i, STEP_SPEC))
writer.end_episode()
tree.map_structure(np.testing.assert_array_equal, want,
self.get_table_content(0, pattern))
def test_round_robin_into_tables(self):
pattern = structured_writer.pattern_from_transform(
step_structure=None, transform=lambda x: x[-1])
# Create configs which should result in steps being written to available
# tables in a round robin fashion.
configs = []
for i, table in enumerate(TABLES):
configs.append(
structured_writer.create_config(
pattern=pattern,
table=table,
conditions=[Condition.step_index() % len(TABLES) == i]))
# Take enough steps to generate two trajectories for each table.
writer = self.client.structured_writer(configs)
for i in range(len(TABLES) * 2):
writer.append(i)
writer.end_episode()
# Check that steps was inserted into tables in the expected order.
self.assertEqual(self.get_table_content(0), [[0], [5]])
self.assertEqual(self.get_table_content(1), [[1], [6]])
self.assertEqual(self.get_table_content(2), [[2], [7]])
self.assertEqual(self.get_table_content(3), [[3], [8]])
self.assertEqual(self.get_table_content(4), [[4], [9]])
@parameterized.named_parameters(
{
'testcase_name': 'condition_on_unused_column',
'step_spec': {'a': None, 'b': None},
'pattern_fn': lambda x: {'old_a': x['a'][-2]},
'condition_fn': lambda x: x['b'] > 10,
'steps': [
{'a': 1, 'b': 11},
{'a': 2, 'b': 10},
{'a': 3, 'b': 11},
{'a': 4, 'b': 9},
{'a': 5, 'b': 12},
],
'want': [
{'old_a': 2},
{'old_a': 4},
],
},
{
'testcase_name': 'int32_eq',
'step_spec': STEP_SPEC,
'pattern_fn': lambda x: {'last_two_a': x['a'][-2:]},
'condition_fn': lambda x: x['a'] == 3,
'steps': [
{'a': np.array(0, np.int32)},
{'a': np.array(2, np.int32)},
{'a': np.array(3, np.int32)},
{'a': np.array(4, np.int32)},
{'a': np.array(5, np.int32)},
],
'want': [
{'last_two_a': np.array([2, 3], np.int32)},
],
},
{
'testcase_name': 'int64_ne',
'step_spec': STEP_SPEC,
'pattern_fn': lambda x: {'last_two_a': x['a'][-2:]},
'condition_fn': lambda x: x['a'] != 4,
'steps': [
{'a': np.array(1, np.int64)},
{'a': np.array(2, np.int64)},
{'a': np.array(3, np.int64)},
{'a': np.array(4, np.int64)},
{'a': np.array(5, np.int64)},
],
'want': [
{'last_two_a': np.array([1, 2])},
{'last_two_a': np.array([2, 3])},
{'last_two_a': np.array([4, 5])},
],
},
{
'testcase_name': 'bool_eq',
'step_spec': {'a': None, 'b': None},
'pattern_fn': lambda x: {'last_a': x['a'][-1]},
'condition_fn': lambda x: x['b'] == 1,
'steps': [
{'a': 1, 'b': True},
{'a': 2, 'b': False},
{'a': 3, 'b': False},
{'a': 4, 'b': True},
{'a': 5, 'b': False},
],
'want': [
{'last_a': 1},
{'last_a': 4},
],
},
)
def test_data_condition(
self, step_spec, pattern_fn, condition_fn, steps, want):
config = structured_writer.create_config(
pattern=structured_writer.pattern_from_transform(step_spec, pattern_fn),
table=TABLES[0],
conditions=[
condition_fn(structured_writer.Condition.data(step_spec)),
]
)
writer = self.client.structured_writer([config])
for step in steps:
writer.append(step)
writer.flush()
got = self.get_table_content(0, structured_writer.unpack_pattern(config))
tree.map_structure(np.testing.assert_array_equal, want, got)
def test_step_is_open(self):
ref_step = structured_writer.create_reference_step([None, None, None])
pattern = [r[-1] for r in ref_step]
config = structured_writer.create_config(pattern, TABLES[0])
writer = self.client.structured_writer([config])
# The step should not be opened when the writer is first created.
self.assertFalse(writer.step_is_open)
# The step should still not be opened after a full step is appended.
writer.append([1, 1, 1])
self.assertFalse(writer.step_is_open)
# Appending a partial step should make it True.
writer.append([None, 2, None], partial_step=True)
self.assertTrue(writer.step_is_open)
# Appending more partial data to the same step shouldn't change anything.
writer.append([None, None, 2], partial_step=True)
self.assertTrue(writer.step_is_open)
# Completing the step should make it False.
writer.append([2, None, None])
self.assertFalse(writer.step_is_open)
# End episode should finalize the active step if any is open.
writer.append([None, 3, None], partial_step=True)
self.assertTrue(writer.step_is_open)
writer.end_episode()
self.assertFalse(writer.step_is_open)
def test_append_wrong_dtype(self):
config = structured_writer.create_config(
pattern=REF_STEP['b']['c'][-1],
table=TABLES[0])
writer = self.client.structured_writer([config])
writer.append({'a': 1, 'b': {'c': 1.0}})
with self.assertRaisesWithLiteralMatch(
ValueError,
'Tensor of wrong dtype provided for column 1 (path=(\'b\', \'c\')). '
'Got int64 but expected double.'):
writer.append({'a': 2, 'b': {'c': 2}})
def test_append_incompatible_shape(self):
config = structured_writer.create_config(
pattern=REF_STEP['b']['d'][1][-1],
table=TABLES[0])
writer = self.client.structured_writer([config])
writer.append({
'a': 1,
'b': {
'c': 1.0,
'd': [
1,
1,
],
},
})
with self.assertRaisesWithLiteralMatch(
ValueError,
'Tensor of incompatible shape provided for column 3 '
'(path=(\'b\', \'d\', 1)). Got [2,2] which is incompatible with [].'):
writer.append({
'a': 2,
'b': {
'c': 2.0,
'd': [
2,
np.array([[2, 2], [2, 2]]),
],
},
})
def test_append_with_different_structures(self):
config = structured_writer.create_config(
pattern=REF_STEP['b']['c'][-1],
table=TABLES[0])
writer = self.client.structured_writer([config])
writer.append({'a': 1, 'b': {'c': 1}})
with self.assertRaisesWithLiteralMatch(
ValueError,
'Flattened data has an unexpected length, got 4 but wanted 2.'):
writer.append({'a': 2, 'b': {'c': 2, 'd': [2, 2]}})
class TestInferSignature(parameterized.TestCase):
@parameterized.parameters(
{
'patterns': [{
'older': REF_STEP['a'][-3:-1],
'last_a': REF_STEP['a'][-1],
},],
'step_spec': {
'a': np.zeros([3, 3], np.float32),
},
'want': {
'older': TensorSpec([2, 3, 3], np.float32, 'older'),
'last_a': TensorSpec([3, 3], np.float32, 'last_a'),
},
},
{
'patterns': [{
'a_with_step': REF_STEP['a'][-6::2],
'a_slice': REF_STEP['a'][-4:],
'x': {
'y': REF_STEP['b']['c'][-2],
},
},],
'step_spec': {
'a': np.zeros([3, 3], np.float32),
'b': {
'c': np.zeros([], np.int32),
'd': np.zeros([5], np.int8), # Unused.
},
},
'want': {
'a_with_step': TensorSpec([3, 3, 3], np.float32, 'a_with_step'),
'a_slice': TensorSpec([4, 3, 3], np.float32, 'a_slice'),
'x': {
'y': TensorSpec([], np.int32, 'x/y'),
},
},
},
{
'patterns': [
{
'x': REF_STEP['a'][-3:-1],
'y': REF_STEP['a'][-1],
'z': REF_STEP['b']['c'][-4:],
},
{
'x': REF_STEP['a'][-2:],
'y': REF_STEP['a'][-2],
'z': REF_STEP['b']['c'][-8::2],
},
],
'step_spec': {
'a': np.zeros([3, 3], np.float32),
'b': {
'c': | |
devices is set to be RTSI 7. Note: If the reference
clock source is set to a value other than None,
configure_for_homogeneous_triggers cannot configure the reference
clock source. Start Triggers If the start trigger is set to None (no
trigger configured) for all sessions, the sessions are configured to
share the start trigger. The start trigger is shared by: - Implicitly
exporting the start trigger from one session - Configuring the other
sessions for digital edge start triggers with sources corresponding to
the exported start trigger - Setting
start_trigger_master_session to the session that is
exporting the trigger for all sessions If the start triggers are None
for all except one session, configure_for_homogeneous_triggers
configures the sessions to share the start trigger from the one excepted
session. The start trigger is shared by: - Implicitly exporting start
trigger from the session with the start trigger that is not None -
Configuring the other sessions for digital-edge start triggers with
sources corresponding to the exported start trigger - Setting
start_trigger_master_session to the session that is
exporting the trigger for all sessions If start triggers are configured
for all sessions, configure_for_homogeneous_triggers does not
affect the start triggers. Start triggers are considered to be
configured for all sessions if either of the following conditions is
true: - No session has a start trigger that is None - One session has a
start trigger that is None, and all other sessions have start triggers
other than None. The one session with the None trigger must have
start_trigger_master_session set to itself, indicating
that the session itself is the start trigger master Reference Triggers
configure_for_homogeneous_triggers configures sessions that support
reference triggers to share the reference triggers if the reference
triggers are None (no trigger configured) for all except one session.
The reference triggers are shared by: - Implicitly exporting the
reference trigger from the session whose reference trigger is not None -
Configuring the other sessions that support the reference trigger for
digital-edge reference triggers with sources corresponding to the
exported reference trigger - Setting
ref_trigger_master_session to the session that is
exporting the trigger for all sessions that support reference trigger If
the reference triggers are configured for all sessions that support
reference triggers, configure_for_homogeneous_triggers does not
affect the reference triggers. Reference triggers are considered to be
configured for all sessions if either one or the other of the following
conditions is true: - No session has a reference trigger that is None -
One session has a reference trigger that is None, and all other sessions
have reference triggers other than None. The one session with the None
trigger must have ref_trigger_master_session set to
itself, indicating that the session itself is the reference trigger
master Reference Trigger Holdoffs Acquisition sessions may be configured
with the reference trigger. For acquisition sessions, when the reference
trigger is shared, configure_for_homogeneous_triggers configures
the holdoff properties (which are instrument driver specific) on the
reference trigger master session so that the session does not recognize
the reference trigger before the other sessions are ready. This
condition is only relevant when the sample clock rates, sample clock
timebase rates, sample counts, holdoffs, and/or any delays for the
acquisitions are different. When the sample clock rates, sample clock
timebase rates, and/or the sample counts are different in acquisition
sessions sharing the reference trigger, you should also set the holdoff
properties for the reference trigger master using the instrument driver.
Script Triggers configure_for_homogeneous_triggers configures
sessions that support script triggers to share them, if the script
triggers are None (no trigger configured) for all except one session.
The script triggers are shared in the following ways: - Implicitly
exporting the script trigger from the session whose script trigger is
not None - Configuring the other sessions that support the script
trigger for digital-edge script triggers with sources corresponding to
the exported script trigger - Setting
script_trigger_master_session to the session that is
exporting the trigger for all sessions that support script triggers If
the script triggers are configured for all sessions that support script
triggers, configure_for_homogeneous_triggers does not affect script
triggers. Script triggers are considered to be configured for all
sessions if either one or the other of the following conditions are
true: - No session has a script trigger that is None - One session has a
script trigger that is None and all other sessions have script triggers
other than None. The one session with the None trigger must have
script_trigger_master_session set to itself, indicating
that the session itself is the script trigger master Pause Triggers
configure_for_homogeneous_triggers configures generation sessions
that support pause triggers to share them, if the pause triggers are
None (no trigger configured) for all except one session. The pause
triggers are shared by: - Implicitly exporting the pause trigger from
the session whose script trigger is not None - Configuring the other
sessions that support the pause trigger for digital-edge pause triggers
with sources corresponding to the exported pause trigger - Setting
pause_trigger_master_session to the session that is
exporting the trigger for all sessions that support script triggers If
the pause triggers are configured for all generation sessions that
support pause triggers, configure_for_homogeneous_triggers does not
affect pause triggers. Pause triggers are considered to be configured
for all sessions if either one or the other of the following conditions
is true: - No session has a pause trigger that is None - One session has
a pause trigger that is None and all other sessions have pause triggers
other than None. The one session with the None trigger must have
pause_trigger_master_session set to itself, indicating
that the session itself is the pause trigger master Note: TClk
synchronization is not supported for pause triggers on acquisition
sessions.
Args:
sessions (list of (Driver Session or nitclk.SessionReference)): sessions is an array of sessions that are being synchronized.
'''
session_count_ctype = _visatype.ViUInt32(0 if sessions is None else len(sessions)) # case S160
sessions_ctype = get_ctypes_pointer_for_buffer(value=_converters.convert_to_nitclk_session_number_list(sessions), library_type=_visatype.ViSession) # case B520
error_code = self._library.niTClk_ConfigureForHomogeneousTriggers(session_count_ctype, sessions_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return
def finish_sync_pulse_sender_synchronize(self, sessions, min_time=datetime.timedelta(seconds=0.0)):
r'''finish_sync_pulse_sender_synchronize
Finishes synchronizing the Sync Pulse Sender.
Args:
sessions (list of (nimi-python Session class or nitclk.SessionReference)): sessions is an array of sessions that are being synchronized.
min_time (float in seconds or datetime.timedelta): Minimal period of TClk, expressed in seconds. Supported values are
between 0.0 s and 0.050 s (50 ms). Minimal period for a single
chassis/PC is 200 ns. If the specified value is less than 200 ns,
NI-TClk automatically coerces minTime to 200 ns. For multichassis
synchronization, adjust this value to account for propagation delays
through the various devices and cables.
'''
session_count_ctype = _visatype.ViUInt32(0 if sessions is None else len(sessions)) # case S160
sessions_ctype = get_ctypes_pointer_for_buffer(value=_converters.convert_to_nitclk_session_number_list(sessions), library_type=_visatype.ViSession) # case B520
min_time_ctype = _converters.convert_timedelta_to_seconds_real64(min_time) # case S140
error_code = self._library.niTClk_FinishSyncPulseSenderSynchronize(session_count_ctype, sessions_ctype, min_time_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=False)
return
def _get_extended_error_info(self):
r'''_get_extended_error_info
Reports extended error information for the most recent NI-TClk method
that returned an error. To establish the method that returned an
error, use the return values of the individual methods because once
_get_extended_error_info reports an errorString, it does not report
an empty string again.
Returns:
error_string (str): Extended error description. If errorString is NULL, then it is not large
enough to hold the entire error description. In this case, the return
value of _get_extended_error_info is the size that you should use
for _get_extended_error_info to return the full error string.
'''
error_string_ctype = None # case C050
error_string_size_ctype = _visatype.ViUInt32() # case S170
error_code = self._library.niTClk_GetExtendedErrorInfo(error_string_ctype, error_string_size_ctype)
errors.handle_error(self, error_code, ignore_warnings=True, is_error_handling=True)
error_string_size_ctype = _visatype.ViUInt32(error_code) # case S180
error_string_ctype = (_visatype.ViChar * error_string_size_ctype.value)() # case C060
error_code = self._library.niTClk_GetExtendedErrorInfo(error_string_ctype, error_string_size_ctype)
errors.handle_error(self, error_code, ignore_warnings=False, is_error_handling=True)
return error_string_ctype.value.decode(self._encoding)
def initiate(self, sessions):
r'''initiate
Initiates the acquisition or generation sessions specified, taking into
consideration any special requirements needed for synchronization. For
| |
<filename>powerprotect/assetsource.py<gh_stars>0
import paramiko
from powerprotect.ppdm import Ppdm
from powerprotect.credential import Credential
from powerprotect import exceptions
from powerprotect import get_module_logger
from powerprotect import helpers
assetsource_logger = get_module_logger(__name__)
assetsource_logger.propagate = False
accpeted_types = ["DATADOMAINMANAGEMENTCENTER", "SMISPROVIDER", "DDSYSTEM",
"VMAXSYSTEM", "XTREMIOMANAGEMENTSERVER", "RECOVERPOINT",
"HOST_OS", "SQLGROUPS", "ORACLEGROUP", "DEFAULTAPPGROUP",
"VCENTER", "EXTERNALDATADOMAIN", "POWERPROTECTSYSTEM", "CDR",
"KUBERNETES", "PPDM", "UNITYMANAGEMENTSERVER",
"POWERSTOREMANAGEMENTSERVER"]
class AssetSource(Ppdm):
""" Class to define an Asset Source object
Asset source object creation require: name, server, password
At object creation this library will go gather any information about
this object, if it exists on the PPDM server.
There are five interactive methods intended for user interaction:
create_assetsource
get_assetsource (run after creation and after all modification
operations automatically)
update_assetsource
delete_assetsource
remove_all_assets_from_policies
Attributes:
name (str): Display name of the asset source (unique)
id (str): Unique ID of the asset source (unique)
type (str): Asset source type, only from list of acepted types
body (dict): Return payload from PPDM server.
target_body (dict): Body to be used for updates to the object
exists (bool): Used to detirmine if the object exists on the PPDM
server
check_mode (bool): If true pretend to modify object on the PPDM server
msg (str): Return message of previous operations
failure (bool): If true, the previous operation failed
fail_msg (str): Detailed message from previous failure
fail_response (dict): Payload returned from PPDM after a failure
assets (list): List of all assets that are part of this asset source
"""
def __init__(self, **kwargs):
try:
self.name = kwargs['name']
self.id = ""
self.type = ""
self.body = {}
self.target_body = {}
self.exists = False
self.changed = False
self.check_mode = kwargs.get('check_mode', False)
self.discovery = {}
self.msg = ""
self.failure = False
self.fail_msg = ""
self.fail_response = {}
self.assets = []
super().__init__(**kwargs)
if 'token' not in kwargs:
super().login()
self.get_assetsource()
except KeyError as e:
assetsource_logger.error(f"Missing required field: {e}")
raise exceptions.PpdmException(f"Missing required field: {e}")
def create_assetsource(self, credential_id=None, credential_name=None,
**kwargs):
""" Method to create asset source if not present
This method will create an asset source if not already present. After
this method runs the get_assetsource method will execute updating the
objects attributes.
Check_Mode: True
Args:
address (str): FQDN or IP of asset source
port (int): Port to communicate with asset source
asset_type (str): PPDM Asset Source Type
credential_name (str): Unique name of cred to use for auth
tanzu (bool, default: False): For k8s tanzu clusters specify True
vcenter_name (str): If Tanzu k8s, specify unique vcenter name. This
must already be an asset source in PPDM
enable_vsphere_integration (bool): For vcenter asset source type,
set to True in order to enable.
Returns:
None
"""
try:
address = kwargs['address']
port = kwargs['port']
asset_type = (kwargs['asset_type']).upper()
except KeyError as e:
raise exceptions.PpdmException(f"Missing required argument: {e}")
tanzu = kwargs.get('tanzu', False)
vcenter_name = kwargs.get('vcenter_name', '')
vcenter_id = kwargs.get('vcenter_id', '')
enable_vsphere_integration = kwargs.get('enable_vsphere_integration',
False)
if not self.exists:
if not self.check_mode:
return_body = self.__create_assetsource(
address=address,
credential_name=credential_name,
credential_id=credential_id,
tanzu=tanzu,
vcenter_name=vcenter_name,
vcenter_id=vcenter_id,
port=port,
asset_type=asset_type,
enable_vsphere_integration=enable_vsphere_integration)
if self.check_mode:
assetsource_logger.info("check mode enabled, "
"no action taken")
return_body = helpers.ReturnBody()
return_body.success = True
if return_body.success:
self.changed = True
self.msg = f"Assetsource {self.name} created"
elif return_body.success is False:
self.failure = True
self.fail_msg = return_body.msg
self.fail_response = return_body.response
elif self.exists:
self.msg = f"Assetsource {self.name} already exists"
self.get_assetsource()
def get_assetsource(self):
""" Method to gather asset source information if present
This method can be thought of as a sync between PPDM and an asset
source object. As such it is run at object creation time, and after
each of the other four methods. This method updates attributes only
Args:
None
Returns:
None
"""
assetsource = self.__get_assetsource_by_name()
if bool(assetsource.response) is not False:
self.exists = True
self.id = assetsource.response['id']
self.type = assetsource.response['type']
self.assets = self.__get_all_assets()
self.__get_asset_source_discovery()
else:
self.exists = False
self.id = ""
self.assets = []
self.discovery = {}
self.body = assetsource.response
def update_assetsource(self):
""" Method to update asset source if present
This method uses the target_body attribute to update the asset source
in PPDM. If there is no asset sourcein PPDM or target_body
attribute, nothing will happen. After this method completes, the
get_assetsource method will run and the target_body attribute will
clear itself.
Check_Mode: True
Args:
None
Returns:
None
"""
if (self.exists and
self.target_body and
helpers._body_match(self.body, self.target_body) is False):
if not self.check_mode:
return_body = self.__update_assetsource()
if self.check_mode:
assetsource_logger.info("check mode enabled, "
"no action taken")
return_body = helpers.ReturnBody()
return_body.success = True
if return_body.success:
self.changed = True
self.msg = f"Assetsource {self.name} updated"
elif return_body.success is False:
self.failure = True
self.fail_msg = return_body.msg
self.fail_response = return_body.response
self.target_body = {}
self.get_assetsource()
def delete_assetsource(self):
""" Method to destroy asset source if present
This method will delete the asset source from PPDM. If the asset source
does not exist nothing will happen. After this method completes the
get_assetsource method will run to update this objects attributes.
Check_Mode: True
Args:
None
Returns:
None
"""
if self.exists:
if not self.check_mode:
return_body = self.__delete_assetsource()
if self.check_mode:
assetsource_logger.info("check mode enabled, "
"no action taken")
return_body = helpers.ReturnBody()
return_body.success = True
if return_body.success:
self.changed = True
self.msg = f"Assetsource {self.name} deleted"
elif return_body.success is False:
self.failure = True
self.fail_msg = return_body.msg
self.fail_response = return_body.response
self.get_assetsource()
def remove_all_assets_from_policies(self):
""" Method to remove an asset sources assets from any protection
policies
This method finds any assets from the objects asset source that belong
to a protection policy. Typically used before removing an asset
source. After this method completes the get_assetsource method will
run to update the objects attributes.
Check_Mode: False
Args:
None
Returns:
None
"""
assetsource_logger.debug("Method: remove_all_assets_from_policies")
if self.exists:
for asset in self.assets:
if asset['protectionPolicyId']:
body = [asset['id']]
url = ("/protection-policies/"
f"{asset['protectionPolicyId']}"
"/asset-unassignments")
response = super()._rest_post(url, body)
if response.ok is False:
assetsource_logger.error(f"""Unable to remove asset:
{asset['name']} from policy:
{asset['protectionPolicy']
['name']}""")
if response.ok:
assetsource_logger.debug(f"""Successfully removed
asset: {asset['name']} from
policy: {asset['protection'
'Policy']['name']}""")
self.get_assetsource()
def add_root_cert(self, **kwargs):
assetsource_logger.debug("Method: add_root_cert")
try:
username = kwargs['ssh_username']
password = kwargs['<PASSWORD>']
base64_cert = kwargs['base64_cert']
except KeyError as e:
assetsource_logger.error(f"Missing required field: {e}")
raise exceptions.PpdmException(f"Missing required field: {e}")
command = ("/usr/local/brs/bin/ppdmtool -importcert -alias "
f"{self.name} -file /home/admin/cert.pem -type BASE64")
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
ssh.connect(self.server, username=username, password=password)
except paramiko.ssh_exception.AuthenticationException as authError:
raise exceptions.PpdmException("Invalid auth credentials: "
f"{authError}")
ftp = ssh.open_sftp()
file = ftp.file('cert.pem', 'w', -1)
file.write(base64_cert)
file.flush()
ftp.close()
stdin, stdout, stderr = ssh.exec_command(command, timeout=30)
error = stderr.read()
error = error.decode("utf-8")
output = stdout.read()
output = output.decode("utf-8")
ssh.close()
if not ("Certificate was added to keystore" in error or
"already exists" in output):
assetsource_logger.error("The certificate was not able to be "
"added to the trust store.")
assetsource_logger.error(f"stderr: {error}")
assetsource_logger.error(f"stdout: {output}")
return False
assetsource_logger.debug("Certificate was successfully added")
return True
def __get_asset_source_discovery(self):
assetsource_logger.debug("Method: __get_asset_source_discovery")
response = super()._rest_get("/discoveries?filter=start%20eq%20%22%2F"
f"inventory-sources%2F{self.id}%22")
if not response.json()['content']:
assetsource_logger.error("Unable to find discovery schedule")
self.discovery = response.json()['content'][0]
def __update_asset_source_discovery(self):
assetsource_logger.debug("Method: __update_asset_source_discovery")
response = super()._rest_put(f"/discoveries/{self.discovery['id']}",
self.discovery)
if not response.ok:
assetsource_logger.error("Unable to update discovery id: "
f"{self.discovery['id']}")
def __get_all_assets(self):
assetsource_logger.debug("Method: __get_all_assets")
if self.type == "KUBERNETES":
asset_source_type = "k8s"
if self.type == "VCENTER":
asset_source_type = "vm"
response = super()._rest_get("/assets?filter=details."
f"{asset_source_type}.inventorySourceId"
f"%20eq%20%22{self.id}%22")
if len(response.json()['content']) > 0:
return response.json()['content']
def __get_assetsource_by_name(self, **kwargs):
assetsource_logger.debug("Method: __get_assetsource_by_name")
return_body = helpers.ReturnBody()
name = self.name
if 'name' in kwargs:
name = kwargs['name']
response = super()._rest_get("/inventory-sources"
f"?filter=name%20eq%20%22{name}%22")
if response.ok is False:
return_body.success = False
return_body.fail_msg = response.json()
return_body.status_code = response.status_code
if response.ok:
if not response.json()['content']:
err_msg = f"Assetsource not found: {self.name}"
assetsource_logger.debug(err_msg)
return_body.success = True
return_body.status_code = response.status_code
return_body.response = {}
else:
return_body.success = True
return_body.response = response.json()['content'][0]
return_body.status_code = response.status_code
return return_body
def __update_assetsource(self):
assetsource_logger.debug("Method: __update_assetsource")
return_body = helpers.ReturnBody()
future_body = self.body.copy()
future_body.update(self.target_body)
response = super()._rest_put("/inventory-sources"
f"/{self.id}", future_body)
if response.ok:
msg = f"Assetsource \"{self.name}\" successfully updated"
return_body.success = True
else:
msg = f"Assetsource \"{self.name}\" not updated"
return_body.success = False
assetsource_logger.debug(msg)
return_body.msg = msg
return_body.response = response.json()
return_body.status_code = response.status_code
return return_body
def __delete_assetsource(self):
assetsource_logger.debug("Method: __delete_assetsource")
return_body = helpers.ReturnBody()
response = super()._rest_delete(f"/inventory-sources/{self.id}")
if response.ok:
msg = f"Assetsource \"{self.name}\" successfully deleted"
certificate = self.__get_host_certificate(self.body['address'],
self.body['port'])
self.__delete_host_certificate(certificate.response)
return_body.success = True
return_body.response = {}
else:
msg = f"Assetsource \"{self.name}\" not deleted"
return_body.success = False
return_body.response = response.json()
assetsource_logger.debug(msg)
return_body.msg = msg
return_body.status_code = response.status_code
return return_body
def __create_assetsource(self, **kwargs):
assetsource_logger.debug("Method: __create_assetsource")
return_body = helpers.ReturnBody()
| |
<gh_stars>0
import getpass
import json
import requests
import urllib.request, urllib.parse, urllib.error
import app_setup
import queue
import threading
class Robinhood:
endpoints = {
"login": "https://api.robinhood.com/api-token-auth/",
"investment_profile": "https://api.robinhood.com/user/investment_profile/",
"logout": "https://api.robinhood.com/api-token-logout/",
"accounts":"https://api.robinhood.com/accounts/",
"ach_iav_auth":"https://api.robinhood.com/ach/iav/auth/",
"ach_relationships":"https://api.robinhood.com/ach/relationships/",
"ach_transfers":"https://api.robinhood.com/ach/transfers/",
"applications":"https://api.robinhood.com/applications/",
"dividends":"https://api.robinhood.com/dividends/",
"edocuments":"https://api.robinhood.com/documents/",
"instruments":"https://api.robinhood.com/instruments/",
"margin_upgrades":"https://api.robinhood.com/margin/upgrades/",
"markets":"https://api.robinhood.com/markets/",
"notifications":"https://api.robinhood.com/notifications/",
"orders":"https://api.robinhood.com/orders/",
"password_reset":"https://api.robinhood.com/password_reset/request/",
"portfolios":"https://api.robinhood.com/portfolios/",
"positions":"https://api.robinhood.com/positions/",
"quotes":"https://api.robinhood.com/quotes/",
"historicals":"https://api.robinhood.com/quotes/historicals/",
"document_requests":"https://api.robinhood.com/upload/document_requests/",
"user":"https://api.robinhood.com/user/",
"watchlists":"https://api.robinhood.com/watchlists/",
"news":"https://api.robinhood.com/midlands/news/",
"movers":"https://api.robinhood.com/midlands/movers/sp500/"
}
session = None
username = None
password = <PASSWORD>
headers = None
auth_token = None
##############################
#Logging in and initializing
##############################
def __init__(self):
""" default constructor for the object"""
self.session = requests.session()
self.session.proxies = urllib.request.getproxies()
self.headers = {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "en;q=1, fr;q=0.9, de;q=0.8, ja;q=0.7, nl;q=0.6, it;q=0.5",
"Content-Type": "application/x-www-form-urlencoded; charset=utf-8",
"X-Robinhood-API-Version": "1.0.0",
"Connection": "keep-alive",
"User-Agent": "Robinhood/823 (iPhone; iOS 8.1.2; Scale/2.00)"
}
self.session.headers = self.headers
self._userData = app_setup.AppSetup()
def cleanupPassword(self):
"""Testing purposes"""
self._userData.cleanUp()
def login_prompt(self):
"""Prompts user for username and password and calls login()."""
username = input("Username: ")
password = <PASSWORD>()
return self._login(username=username, password=password)
def login(self):
"""Facade relay method to relay a login session"""
return self._login(self._userData.getRobinhoodUserName(), self._userData.getRobinhoodPassword())
def _login(self, username, password):
"""private method to login into robinhood"""
self.username = username
self.password = password
data = urllib.parse.urlencode({"password" : <PASSWORD>, "username" : self.username})
res = self.session.post(self.endpoints['login'], data=data)
res = res.json()
try:
self.auth_token = res['token']
except KeyError:
print("[-] Error logging in please reenter username and password")
usr = str(input("Username: " ))
pas = str(input("password: "))
self._userData.changeUserData(usr, pas)
print("[+] relogging now.... ")
self.login()
#return False
self.headers['Authorization'] = 'Token '+self.auth_token
print('[+] successfully logged in')
return True
def logout(self):
""" log out method to log out of robinhood account """
try:
self.session.post(self.endpoints['logout'])
print("[+] successfully logged out")
return True
except Exception as e:
print("ERROR %s" % e)
return False
def investment_profile(self):
self.session.get(self.endpoints['investment_profile'])
def instruments(self, stock=None):
res = self.session.get(self.endpoints['instruments'], params={'query':stock.upper()})
res = res.json()
return res['results']
def getFundamentals(self, stock):
"""
Returns a Json dictionary with the following structure
{
'open':,
'market_cap':,
'average_volume':,
'high':,
'pe_ratio':,
'low':,
'high_52_weeks':,
'dividend_yield':,
'low_52_weeks':,
'volume':
}
"""
price = self.quote_data(stock.upper())
fundamental_endpoint = 'https://api.robinhood.com/fundamentals/%s/'% \
stock.upper()
return_data = self.session.get(fundamental_endpoint).json()
return_data['last_trade_price'] = price['last_trade_price']
return_data['symbol'] = stock.upper()
return return_data
def quote_data(self, stock=None):
# Prompt for stock if not entered
if stock is None:
stock = input("Symbol: ");
url = str(self.endpoints['quotes']) + str(stock) + "/"
#Check for validity of symbol
try:
res = json.loads((urllib.request.urlopen(url)).read().decode('utf-8'));
if len(res) > 0:
return res;
else:
raise NameError("Invalid Symbol: " + stock);
except (ValueError):
raise NameError("Invalid Symbol: " + stock);
def get_quote(self, stock=None):
data = self.quote_data(stock)
return data["symbol"]
def get_historical_quotes(self,symbol,interval,span,bounds='regular'):
# Valid combination
# interval = 5minute | 10minute + span = day, week
# interval = day + span = year
# interval = week
# bounds can be 'regular' for regular hours or 'extended' for extended hours
res = self.session.get(self.endpoints['historicals'], params={'symbols':','.join(symbol).upper(), 'interval':interval, 'span':span, 'bounds':bounds})
return res.json()
def get_news(self, symbol):
return self.session.get(self.endpoints['news']+symbol.upper()+"/").json()
def print_quote(self, stock=None):
data = self.quote_data(stock)
print((data["symbol"] + ": $" + data["last_trade_price"]));
def print_quotes(self, stocks):
for i in range(len(stocks)):
self.print_quote(stocks[i]);
def ask_price(self, stock=None):
return self.quote_data(stock)['ask_price'];
def ask_size(self, stock=None):
return self.quote_data(stock)['ask_size'];
def bid_price(self, stock=None):
return self.quote_data(stock)['bid_price'];
def bid_size(self, stock=None):
return self.quote_data(stock)['bid_size'];
def last_trade_price(self, stock=None):
return self.quote_data(stock)['last_trade_price'];
def previous_close(self, stock=None):
return self.quote_data(stock)['previous_close'];
def previous_close_date(self, stock=None):
return self.quote_data(stock)['previous_close_date'];
def adjusted_previous_close(self, stock=None):
return self.quote_data(stock)['adjusted_previous_close'];
def symbol(self, stock=None):
return self.quote_data(stock)['symbol'];
def last_updated_at(self, stock=None):
return self.quote_data(stock)['updated_at'];
def get_account(self):
res = self.session.get(self.endpoints['accounts'])
res = res.json()
return res['results'][0]
def get_url(self,url):
return self.session.get(url).json()
##############################
# PORTFOLIOS DATA
##############################
def portfolios(self):
"""Returns the user's portfolio data."""
return self.session.get(self.endpoints['portfolios']).json()['results'][0]
def adjusted_equity_previous_close(self):
return float(self.portfolios()['adjusted_equity_previous_close'])
def equity(self):
return float(self.portfolios()['equity'])
def equity_previous_close(self):
return float(self.portfolios()['equity_previous_close'])
def excess_margin(self):
return float(self.portfolios()['excess_margin'])
def extended_hours_equity(self):
return float(self.portfolios()['extended_hours_equity'])
def extended_hours_market_value(self):
return float(self.portfolios()['extended_hours_market_value'])
def last_core_equity(self):
return float(self.portfolios()['last_core_equity'])
def last_core_market_value(self):
return float(self.portfolios()['last_core_market_value'])
def market_value(self):
return float(self.portfolios()['market_value'])
def order_history(self):
"""
show the orders that were placed
"""
return self.session.get(self.endpoints['orders']).json()
def dividends(self):
"""
return the divends stocks
"""
return self.session.get(self.endpoints['dividends']).json()
def cancelMostRecentOrder(self):
"""
This fucntion will cancel the most recent order that was placed
"""
temp_list = self.order_history()['results'][0]['cancel']
return self.session.post(temp_list)
###############################
# Position DATA
###############################
def addToWatchlist(self, stock_idx):
try:
stock_instrument = self.instruments(stock_idx)[0]
print(stock_instrument['id'])
data = 'symbols=%s' % stock_idx.upper()
self.session.post(self.endpoints['watchlists']+'/Default/bulk_add/', data =
data)
except Exception:
pass
#######################
# watch lists
# ->
########################
def p_url(self, q, url):
d = self.session.get(url['instrument']).json()
q.put(d)
def watchlist1(self):
"""
Postcondition: returns a list of dictionary instrument objects with
"""
#get the stock watchlist which returns an list of instruments,
#assuming instruments are just stock objects
watch_list_instruments = self.session.get(self.endpoints['watchlists']\
+ '/Default/?cursor=$cursor').json()
#returns a dictonary query with cursor to next and prev
#access the 'results'
print('starting the threading')
q = queue.Queue()
watch_list_instruments = watch_list_instruments['results']
for u in watch_list_instruments:
t = threading.Thread(target=self.p_url, args=(q,u))
t.daemon = True
t.start()
return q
def watchlist(self):
"""
Postcondition: returns a list of dictionary instrument objects with
"""
#get the stock watchlist which returns an list of instruments,
#assuming instruments are just stock objects
watch_list_instruments = self.session.get(self.endpoints['watchlists']\
+ '/Default/?cursor=$cursor').json()
#returns a dictonary query with cursor to next and prev
#access the 'results'
watch_list_instruments = watch_list_instruments['results']
#break down all the data
x = list()
for i in watch_list_instruments:
x.append(self.session.get(i['instrument']).json())
#returns x gives a list of of dictonary containig all instruments
return x
#######################
# simple watch list
# ->
########################
def simplewl(self):
"""
Returns a watch list of all the instruments
"""
return self.session.get(self.endpoints['watchlists']+ 'Default/' ).json()
#######################
# positions
# ->
########################
def topMovers(self, direction):
"""
Returns the top 10 out of sp500 top movers and returns a list of
dictionary
"""
ep = "%s?direction=%s" %(self.endpoints['movers'], direction)
r = self.session.get(ep)
data = json.loads(r.text)['results']
#data = data['results']
return data
#for i in data:
# print(i['symbol'])
print(data[0])
def positions(self):
"""Returns the user's positions data."""
return self.session.get(self.endpoints['positions']).json()
#######################
# stocks owned
# ->
########################
def securities_owned(self):
"""
Returns a list of symbols of securities of which there are more
than zero shares in user's portfolio.
"""
positions = self.positions()
securities = []
for position in positions['results']:
quantity = float(position['quantity'])
if quantity > 0:
securities.append(self.session.get(position['instrument']).json()['symbol'])
return securities
##############################
# PLACE ORDER
##############################
# # types:
# # -market
# # -limit
# # -StopLoss
# # -Stoplimit
##############################
def place_order(self, instrument, order_type, quantity, bid_price, transaction=None):
"""
Function Description: Places an order in RH
"""
if bid_price == None and order_type == None:
bid_price = self.quote_data(instrument['symbol'])['bid_price']
order_type = 'market'
data =\
'account=%s&instrument=%s&price=%f&quantity=%d&side=%s&symbol=%s&time_in_force=gfd&trigger=immediate&type=%s' % (
self.get_account()['url'],
urllib.parse.unquote(instrument['url']),
float(bid_price),
quantity,
transaction,
instrument['symbol'],
order_type
)
res = self.session.post(self.endpoints['orders'], data=data)
return res
#######################
# place_buy_order
# ->
########################
def place_buy_order(self, symbol, buy_type=None, bid_price = None, quantity=1):
"""
Function Description: Places a buy order
If there is a buyprice we make a limit buy,
otherwise if there isn't a buy price
default to market price
PRECONDITIONS:
-String Stock Symbol
-String Buy_type
- makret
- limit
-bid_price int/float
"""
stock_instrument = None
try:
#get the stock instrument
stock_instrument = self._makeInstrument(symbol)
except NameError as e:
print(e)
transaction = "buy"
return self.place_order(stock_instrument, buy_type, quantity, bid_price, transaction)
#######################
# place_buy_order
#
########################
def place_sell_order(self, symbol, sell_type=None, bid_price=None,quantity=1):
stock_instrument = self._makeInstrument(symbol)
transaction = "sell"
return self.place_order(stock_instrument, sell_type,quantity, bid_price, transaction)
#######################
# _Make Instrument
#
########################
def _makeInstrument(self, symbol):
"""
Function Description: makes an stock instrument
"""
#make the instrument to return, but check it first
ret_instrument = self.instruments(symbol)
if len(ret_instrument) == 0:#no symbol found throw exception
raise NameError("Invalid Symbol: " + symbol);
return ret_instrument[0]
#######################
# reorganizes watch list
#
########################
def reorganize(self):
return self.session.post(self.endpoints['watchlists'] +
'/$watchlistName/reorder/{ids}')
#######################
# place_buy_order
#
########################
def makewl(self):
self.session.post(self.endpoints['watchlists'], data ='name=DANNY')
def test():
import json
x = Robinhood()
print('logging in')
print(x.login())
print("positions")
print(json.dumps(x.positions(), indent=2))
print('\t\twatchlist test')
#print(json.dumps(x.watchlist(), indent=2))
#x.addToWatchlist('MDR')
z = x.watchlist()
#result = x['results']
for i in range(len(z)):
print('%s \t %s\n%s' % (i, z[i]['symbol'],z[i]['fundamentals']))
#print(json.loads(x.simplewl(), indent=2))
#x.reorganize()
def watchListTest():
r = Robinhood()
r.login()
r.addToWatchlist('UMX')
r.addToWatchlist('SKLN')
#z = r.watchlist()
#for i in z:
# print("%s \n" % i['symbol'])
def testPlaceLimitOrder():
r = Robinhood()
r.login()
i = r.instruments("SKLN")[0]
#r.place_order(i,1,1.50,'buy')
#r.place_buy_order('SKLN','limit',bid_price=1.60)
r.place_sell_order('SKLN','limit',bid_price=1.50)
#r.place_buy_order('DCTH')
#r.place_buy_order('DCTH','limit',bid_price=0.04)
#print(r.quote_data('DCTH'))
#r.place_buy_order('DCTH')
#r.place_sell_order('DCTH','limit',bid_price=0.057)
r.place_sell_order('DCTH','stop loss',bid_price=0.033)
def testLogout():
r = Robinhood()
r.login()
print("Logging out now..")
r.logout()
def testMovers():
r | |
# -*- coding: utf-8 -*-
"""
# @Time : 24/10/18 2:40 PM
# @Author : <NAME>
# @FileName: plot_result.py
# @Software: PyCharm
# @Github : https://github.com/hzm2016
"""
import collections
import matplotlib.pyplot as plt
import numpy as np
import pickle
import copy as cp
from baselines.deepq.assembly.src.value_functions import *
"""=================================Plot result====================================="""
# YLABEL = ['$F_x(N)$', '$F_y(N)$', '$F_z(N)$', '$M_x(Nm)$', '$M_y(Nm)$', '$M_z(Nm)$']
YLABEL = ['$F_x$(N)', '$F_y$(N)', '$F_z$(N)', '$M_x$(Nm)', '$M_y$(Nm)', '$M_z$(Nm)']
Title = ["X axis force", "Y axis force", "Z axis force",
"X axis moment", "Y axis moment", "Z axis moment"]
High = np.array([40, 40, 0, 5, 5, 5, 542, -36, 188, 5, 5, 5])
Low = np.array([-40, -40, -40, -5, -5, -5, 538, -42, 192, -5, -5, -5])
scale = np.array([40, 40, 40, 5, 5, 5])
"""================================================================================="""
plt.rcParams['font.sans-serif']=['SimHei']
plt.rcParams['axes.unicode_minus']=False
def plot(result_path):
plt.figure(figsize=(15, 15), dpi=100)
plt.title('Search Result')
prediction_result = np.load(result_path)
for i in range(len(prediction_result)):
for j in range(6):
line = prediction_result[:, j]
# plt.subplot(2, 3, j+1)
plt.plot(line)
plt.ylabel(YLABEL[j])
plt.xlabel('steps')
plt.legend(YLABEL)
plt.show()
def plot_force_and_moment(path_2, path_3):
V_force = np.load(path_2)
V_state = np.load(path_3)
plt.figure(figsize=(15, 10), dpi=100)
plt.title("Search Result of Force", fontsize=20)
plt.plot(V_force[:100])
plt.xlabel("Steps", fontsize=20)
plt.ylabel("F(N)", fontsize=20)
plt.legend(labels=['Fx', 'Fy', 'Fz', 'Mx', 'My', 'Mz'], loc='best', fontsize=20)
plt.xticks(fontsize=15)
plt.yticks(fontsize=15)
plt.figure(figsize=(15, 10), dpi=100)
plt.title("Search Result of State", fontsize=20)
plt.plot(V_state[:100] - [539.88427, -38.68679, 190.03184, 179.88444, 1.30539, 0.21414])
plt.xlabel("Steps", fontsize=20)
plt.ylabel("Coordinate", fontsize=20)
plt.legend(labels=['x', 'y', 'z', 'rx', 'ry', 'rz'], loc='best', fontsize=20)
plt.xticks(fontsize=15)
plt.yticks(fontsize=15)
plt.show()
def plot_reward(reward_path):
reward = np.load(reward_path)
print(reward[0])
plt.figure(figsize=(15, 15), dpi=100)
plt.title('Episode Reward')
plt.plot(np.arange(len(reward) - 1), np.array(reward[1:]))
plt.ylabel('Episode Reward')
plt.xlabel('Episodes')
plt.show()
def plot_raw_data(path_1):
data = np.load(path_1)
force_m = np.zeros((len(data), 12))
plt.figure(figsize=(20, 20), dpi=100)
plt.tight_layout(pad=3, w_pad=0.5, h_pad=1.0)
plt.subplots_adjust(left=0.065, bottom=0.1, right=0.995, top=0.9, wspace=0.2, hspace=0.2)
plt.title("True Data")
for j in range(len(data)):
force_m[j] = data[j, 0]
k = -1
for i in range(len(data)):
if data[i, 1] == 0:
print("===========================================")
line = force_m[k+1:i+1]
print(line)
k = i
for j in range(6):
plt.subplot(2, 3, j + 1)
plt.plot(line[:, j])
# plt.plot(line[:, 0])
if j == 1:
plt.ylabel(YLABEL[j], fontsize=17.5)
plt.xlabel('steps', fontsize=20)
plt.xticks(fontsize=15)
plt.yticks(fontsize=15)
else:
plt.ylabel(YLABEL[j], fontsize=20)
plt.xlabel('steps', fontsize=20)
plt.xticks(fontsize=15)
plt.yticks(fontsize=15)
i += 1
def plot_continuous_data(path):
raw_data = np.load(path)
plt.figure(figsize=(20, 15))
plt.title('Episode Reward')
plt.tight_layout(pad=3, w_pad=0.5, h_pad=1.0)
plt.subplots_adjust(left=0.1, bottom=0.15, right=0.98, top=0.9, wspace=0.23, hspace=0.22)
# plt.subplots_adjust(left=0.065, bottom=0.1, right=0.995, top=0.9, wspace=0.2, hspace=0.2)
data = np.zeros((len(raw_data), 12))
for j in range(len(raw_data)):
data[j] = raw_data[j, 0]
for j in range(6):
plt.subplot(2, 3, j + 1)
plt.plot(data[:, j]*scale[j], linewidth=2.5)
# plt.ylabel(YLABEL[j], fontsize=18)
if j>2:
plt.xlabel('steps', fontsize=30)
plt.title(YLABEL[j], fontsize=30)
plt.xticks(fontsize=25)
plt.yticks(fontsize=25)
# plt.subplots_adjust(left=0.1, bottom=0.1, right=0.9, top=0.9, wspace=0.4, hspace=0.2)
plt.savefig('raw_data.pdf')
plt.show()
def compute_true_return(path):
raw_data = np.load(path)
# print(raw_data)
clock = 0
G = 0.
past_gammas = []
past_cumulants = []
all_G = []
for i in range(len(raw_data)):
observation, action, done, action_probability = raw_data[i]
if done == False:
gamma = 0.99
else:
gamma = 0.
past_gammas.append(gamma)
past_cumulants.append(1)
if done == False:
clock += 1
G = 0
all_G.append(cp.deepcopy(G))
else:
print('clock', clock)
for j in reversed(range(0, clock + 1)):
G *= past_gammas[j]
G += past_cumulants[j]
all_G.append(cp.deepcopy(G))
clock = 0
past_cumulants = []
past_gammas = []
print(len(raw_data))
plt.figure(figsize=(20, 15))
plt.plot(all_G[300:400])
plt.show()
return all_G
# Plot the true prediction and true value
def plot_different_gamma_data(path):
f = open(path, 'rb')
titles = ['$\gamma = 0.4$', '$\gamma = 0.8$', '$\gamma = 0.96$', '$\gamma =1.0$']
# true_data = compute_true_return('prediction_result_different_gamma.npy')
# f = open('../data/learning_result_policy', 'rb')
# plot_value_functions = ['Move down Fy', 'Move down Fx', 'Move down Fz', 'Move down Mx', 'Move down My', 'Move down Mz']
plot_value_functions = ['Move down step', 'Move down step 2', 'Move down step 3', 'Move down step 4']
# plot_value_functions = ['Move down Fx', 'Move down Fx 1', 'Move down Fx 2', 'Move down Fx 3']
raw_data = pickle.load(f)
plt.figure(figsize=(20, 15))
plt.tight_layout(pad=3, w_pad=1., h_pad=0.5)
plt.subplots_adjust(left=0.1, bottom=0.15, right=0.98, top=0.9, wspace=0.23, hspace=0.23)
# legend = sorted([key for key in plot_value_functions.keys()])
# print(legend)
# print(value_functions.keys())
for j, key in enumerate(plot_value_functions):
plt.subplot(2, 2, j + 1)
# print(list(raw_data[('GTD(1)', 'Hindsight Error')][key]))
# plt.plot(np.array(raw_data[('GTD(1)', 'Hindsight Error')][key])[:], linewidth=2.5)
# plt.plot(true_data[300:])
plt.plot(np.array(raw_data[('GTD(0)', 'UDE')][key])[600:], linewidth=2.75)
# print('true value', np.array(raw_data[('GTD(0)', 'UDE')][key])[300:400])
# plt.plot(np.array(raw_data[('GTD(0)', 'TD Error')][key])[600:], linewidth=2.5)
# print('old prediction', np.array(raw_data[('GTD(0)', 'TD Error')][key])[300:400])
plt.plot(np.array(raw_data[('GTD(0)', 'Prediction')][key])[600:], linewidth=2.75)
# plt.plot(np.array(raw_data[('GTD(1)', 'Prediction')][key])[300:] - np.array(raw_data[('GTD(1)', 'Hindsight Error')][key])[300:], linewidth=2.5)
# plt.legend('True value', 'Prediction value')
plt.title(titles[j], fontsize=30)
if j > 1:
plt.xlabel('steps', fontsize=30)
plt.ylabel('Number of steps', fontsize=30)
plt.xticks(fontsize=25)
plt.yticks(fontsize=25)
# plt.savefig('different_gamma.pdf')
plt.show()
# Plot the true prediction and true value
def chinese_plot_different_gamma_data(path):
f = open(path, 'rb')
titles = ['$\gamma = 0.4$', '$\gamma = 0.8$', '$\gamma = 0.96$', '$\gamma =1.0$']
# true_data = compute_true_return('prediction_result_different_gamma.npy')
# f = open('../data/learning_result_policy', 'rb')
# plot_value_functions = ['Move down Fy', 'Move down Fx', 'Move down Fz', 'Move down Mx', 'Move down My', 'Move down Mz']
plot_value_functions = ['Move down step', 'Move down step 2', 'Move down step 3', 'Move down step 4']
# plot_value_functions = ['Move down Fx', 'Move down Fx 1', 'Move down Fx 2', 'Move down Fx 3']
raw_data = pickle.load(f)
plt.figure(figsize=(20, 12), dpi=1000)
plt.tight_layout(pad=3, w_pad=1., h_pad=0.5)
plt.subplots_adjust(left=0.08, bottom=0.12, right=0.98, top=0.95, wspace=0.23, hspace=0.33)
# legend = sorted([key for key in plot_value_functions.keys()])
# print(legend)
# print(value_functions.keys())
for j, key in enumerate(plot_value_functions):
plt.subplot(2, 2, j + 1)
# print(list(raw_data[('GTD(1)', 'Hindsight Error')][key]))
# plt.plot(np.array(raw_data[('GTD(1)', 'Hindsight Error')][key])[:], linewidth=2.5)
# plt.plot(true_data[300:])
plt.plot(np.array(raw_data[('GTD(0)', 'UDE')][key])[600:], linewidth=2.75)
# print('true value', np.array(raw_data[('GTD(0)', 'UDE')][key])[300:400])
# plt.plot(np.array(raw_data[('GTD(0)', 'TD Error')][key])[600:], linewidth=2.5)
# print('old prediction', np.array(raw_data[('GTD(0)', 'TD Error')][key])[300:400])
plt.plot(np.array(raw_data[('GTD(0)', 'Prediction')][key])[600:], linewidth=2.75)
# plt.plot(np.array(raw_data[('GTD(1)', 'Prediction')][key])[300:] - np.array(raw_data[('GTD(1)', 'Hindsight Error')][key])[300:], linewidth=2.5)
# plt.legend('True value', 'Prediction value')
plt.title(titles[j], fontsize=36)
if j > 1:
plt.xlabel('搜索步数', fontsize=36)
plt.ylabel('预测周期', fontsize=36)
plt.xticks([0, 50, 100, 150, 200], fontsize=36)
plt.yticks(fontsize=36)
plt.savefig('./figure/pdf/chinese_different_gamma.pdf')
# plt.show()
def chinese_plot_compare_raw_data(path1, path2):
raw_data = np.load(path1)
raw_data_1 = np.load(path2)
plt.figure(figsize=(20, 12), dpi=1000)
plt.title('Episode Reward')
plt.tight_layout(pad=3, w_pad=0.5, h_pad=1.0)
plt.subplots_adjust(left=0.08, bottom=0.08, right=0.98, top=0.95, wspace=0.33, hspace=0.15)
data = np.zeros((len(raw_data), 12))
for j in range(len(raw_data)):
data[j] = raw_data[j, 0]
data_1 = np.zeros((len(raw_data_1), 12))
for j in range(len(raw_data_1)):
data_1[j] = raw_data_1[j, 0]
for j in range(6):
plt.subplot(2, 3, j + 1)
plt.plot(data[:100, j], linewidth=2.5, color='r', linestyle='--')
plt.plot(data_1[:100, j], linewidth=2.5, color='b')
# plt.ylabel(YLABEL[j], fontsize=18)
if j>2:
plt.xlabel('搜索步数', fontsize=38)
plt.title(YLABEL[j], fontsize=38)
plt.xticks(fontsize=38)
plt.yticks(fontsize=38)
# plt.subplots_adjust(left=0.1, bottom=0.1, right=0.9, top=0.9, wspace=0.4, hspace=0.2)
plt.savefig('./figure/pdf/chinese_raw_data.pdf')
# plt.show()
# Plot the true prediction and true value
def chinese_plot_different_policy_data(path, name):
f = open(path, 'rb')
# true_data = compute_true_return('prediction_result_different_gamma.npy')
# f = open('../data/learning_result_policy', 'rb')
plot_value_functions = ['Move down Fx', 'Move down Fy', 'Move down Fz', 'Move down Mx', 'Move down My', 'Move down Mz']
# plot_value_functions = ['Move down step', 'Move down step 2', 'Move down step 3', 'Move down step 4']
# plot_value_functions = ['Move down Fx', 'Move down Fx 1', 'Move down Fx 2', 'Move down Fx 3']
raw_data = pickle.load(f)
plt.figure(figsize=(20, 12), dpi=1000)
plt.title('Episode Reward')
plt.tight_layout(pad=3, w_pad=0.5, h_pad=1.0)
plt.subplots_adjust(left=0.1, bottom=0.1, right=0.98, top=0.95, wspace=0.33, hspace=0.25)
# plt.subplots_adjust(left=0.1, bottom=0.12, right=0.98, top=0.94, wspace=0.23, hspace=0.33)
# legend = sorted([key for key in plot_value_functions.keys()])
# print(legend)
# print(value_functions.keys())
for j, key in enumerate(plot_value_functions):
plt.subplot(2, 3, j + 1)
# print(list(raw_data[('GTD(1)', 'Hindsight Error')][key]))
# plt.plot(np.array(raw_data[('GTD(1)', 'Hindsight Error')][key])[400:]*scale[j], linewidth=2.5)
# plt.plot(true_data[300:])
plt.plot(np.array(raw_data[('GTD(1)', 'UDE')][key])[1000:]*scale[j], linewidth=2.5)
# print('true value', np.array(raw_data[('GTD(0)', 'UDE')][key])[300:400])
# plt.plot(np.array(raw_data[('GTD(0)', 'TD Error')][key])[600:], linewidth=2.5, color='r')
# print('old prediction', np.array(raw_data[('GTD(0)', 'TD Error')][key])[300:400])
plt.plot(np.array(raw_data[('GTD(1)', 'Prediction')][key])[1000:]*scale[j], linewidth=2.5)
# plt.plot(np.array(raw_data[('GTD(1)', 'Prediction')][key])[300:] - np.array(raw_data[('GTD(1)', 'Hindsight Error')][key])[300:], linewidth=2.5)
# plt.legend('True value', 'Prediction value')
plt.title(YLABEL[j], fontsize=38)
if j > 2:
plt.xlabel('搜索步数', fontsize=38)
plt.xticks([0, 50, 100, 150, 200], fontsize=38)
plt.yticks(fontsize=38)
plt.savefig('./figure/pdf/chinese_' + name +'.pdf')
# plt.show()
# Plot the true prediction and true value
def plot_different_policy_data(path):
f = open(path, 'rb')
# true_data = compute_true_return('prediction_result_different_gamma.npy')
# f = open('../data/learning_result_policy', 'rb')
plot_value_functions = ['Move down Fx', 'Move down Fy', 'Move down Fz', 'Move down Mx', 'Move down My', 'Move down Mz']
# plot_value_functions = ['Move down step', 'Move down step 2', 'Move down step 3', 'Move down step 4']
# plot_value_functions = ['Move down Fx', 'Move down Fx 1', 'Move down Fx 2', 'Move down Fx 3']
raw_data = pickle.load(f)
plt.figure(figsize=(20, 12), dpi=1000)
plt.title('Episode Reward')
plt.tight_layout(pad=3, w_pad=1.0, h_pad=1.0)
plt.subplots_adjust(left=0.1, bottom=0.15, right=0.98, top=0.9, wspace=0.23, hspace=0.23)
# legend = sorted([key for key in plot_value_functions.keys()])
# print(legend)
# print(value_functions.keys())
for j, key in enumerate(plot_value_functions):
plt.subplot(2, 3, j + 1)
# print(list(raw_data[('GTD(1)', 'Hindsight Error')][key]))
# plt.plot(np.array(raw_data[('GTD(1)', 'Hindsight Error')][key])[400:]*scale[j], linewidth=2.5)
# plt.plot(true_data[300:])
plt.plot(np.array(raw_data[('GTD(1)', 'UDE')][key])[1000:]*scale[j], linewidth=2.5)
# print('true value', np.array(raw_data[('GTD(0)', 'UDE')][key])[300:400])
# plt.plot(np.array(raw_data[('GTD(0)', 'TD Error')][key])[600:], linewidth=2.5, color='r')
# print('old prediction', np.array(raw_data[('GTD(0)', 'TD Error')][key])[300:400])
plt.plot(np.array(raw_data[('GTD(1)', 'Prediction')][key])[1000:]*scale[j], linewidth=2.5)
# plt.plot(np.array(raw_data[('GTD(1)', 'Prediction')][key])[300:] - np.array(raw_data[('GTD(1)', 'Hindsight Error')][key])[300:], linewidth=2.5)
# plt.legend('True value', 'Prediction value')
plt.title(YLABEL[j], fontsize=30)
if j > 2:
| |
replaced.')
cls._disable_account()
auth_client = AuthClient(credentials.token,
credentials.base_url,
**credentials.connection_parameters())
service_urls = auth_client.current_service_urls()
user_hubs = auth_client.user_hubs()
preferences = preferences or {}
for hub_info in user_hubs:
# Build credentials.
provider_credentials = Credentials(
credentials.token,
access_token=auth_client.current_access_token(),
url=service_urls['http'],
auth_url=credentials.auth_url,
websockets_url=service_urls['ws'],
proxies=credentials.proxies,
verify=credentials.verify,
services=service_urls.get('services', {}),
default_provider=credentials.default_provider,
**hub_info
)
provider_credentials.preferences = \
preferences.get(provider_credentials.unique_id(), {})
# _credentials class variable is read in __init__ to set provider credentials
cls._credentials = provider_credentials
# Build the provider.
try:
provider = IBMProvider(token=credentials.token, **hub_info)
cls._providers[provider.credentials.unique_id()] = provider
# Clear _credentials class variable so __init__ is not processed for the first
# call to __new__ (since all IBMProvider instances are initialized by this method)
cls._credentials = None
except Exception: # pylint: disable=broad-except
# Catch-all for errors instantiating the provider.
logger.warning('Unable to instantiate provider for %s: %s',
hub_info, traceback.format_exc())
@staticmethod
def _check_api_version(credentials: Credentials) -> Dict[str, Union[bool, str]]:
"""Check the version of the remote server in a set of credentials.
Args:
credentials: IBM Quantum Credentials
Returns:
A dictionary with version information.
"""
version_finder = VersionClient(credentials.base_url,
**credentials.connection_parameters())
return version_finder.version()
@classmethod
def _disable_account(cls) -> None:
"""Disable the account currently in use for the session.
Raises:
IBMProviderCredentialsNotFound: If no account is in use for the session.
"""
if not cls._providers:
raise IBMProviderCredentialsNotFound(
'No IBM Quantum account is in use for the session.')
cls._providers = OrderedDict()
@classmethod
def _get_provider(
cls,
hub: Optional[str] = None,
group: Optional[str] = None,
project: Optional[str] = None,
) -> 'IBMProvider':
"""Return a provider for a single hub/group/project combination.
Args:
hub: Name of the hub.
group: Name of the group.
project: Name of the project.
Returns:
A provider that matches the specified criteria or default provider.
Raises:
IBMProviderError: If no provider matches the specified criteria,
if more than one provider matches the specified criteria or if
no provider could be found for this account.
"""
providers = cls._get_providers(hub=hub, group=group, project=project)
if any([hub, group, project]):
if not providers:
raise IBMProviderError('No provider matches the specified criteria: '
'hub = {}, group = {}, project = {}'
.format(hub, group, project))
if len(providers) > 1:
raise IBMProviderError('More than one provider matches the specified criteria.'
'hub = {}, group = {}, project = {}'
.format(hub, group, project))
elif not providers:
# Prevent edge case where no providers are available.
raise IBMProviderError('No Hub/Group/Project could be found for this account.')
return providers[0]
def __init__(
self,
token: Optional[str] = None,
url: Optional[str] = None,
hub: Optional[str] = None,
group: Optional[str] = None,
project: Optional[str] = None,
**kwargs: Any
) -> None:
# pylint: disable=unused-argument,unsubscriptable-object
super().__init__()
if self._credentials:
self.credentials = self._credentials
self._api_client = AccountClient(self.credentials,
**self.credentials.connection_parameters())
# Initialize the internal list of backends.
self.__backends: Dict[str, IBMBackend] = {}
self._backend = IBMBackendService(self)
# Initialize other services.
self._random = IBMRandomService(self) if self.credentials.extractor_url else None
self._experiment = IBMExperimentService(self) \
if self.credentials.experiment_url else None
self._runtime = IBMRuntimeService(self) \
if self.credentials.runtime_url else None
self._services = {'backend': self._backend,
'random': self._random,
'experiment': self._experiment,
'runtime': self._runtime}
@property
def _backends(self) -> Dict[str, IBMBackend]:
"""Gets the backends for the provider, if not loaded.
Returns:
Dict[str, IBMBackend]: the backends
"""
if not self.__backends:
self.__backends = self._discover_remote_backends()
return self.__backends
@_backends.setter
def _backends(self, value: Dict[str, IBMBackend]) -> None:
"""Sets the value for the account's backends.
Args:
value: the backends
"""
self.__backends = value
def _discover_remote_backends(self, timeout: Optional[float] = None) -> Dict[str, IBMBackend]:
"""Return the remote backends available for this provider.
Args:
timeout: Maximum number of seconds to wait for the discovery of
remote backends.
Returns:
A dict of the remote backend instances, keyed by backend name.
"""
ret = OrderedDict() # type: ignore[var-annotated]
configs_list = self._api_client.list_backends(timeout=timeout)
for raw_config in configs_list:
# Make sure the raw_config is of proper type
if not isinstance(raw_config, dict):
logger.warning("An error occurred when retrieving backend "
"information. Some backends might not be available.")
continue
try:
decode_backend_configuration(raw_config)
try:
config = PulseBackendConfiguration.from_dict(raw_config)
except (KeyError, TypeError):
config = QasmBackendConfiguration.from_dict(raw_config)
backend_cls = IBMSimulator if config.simulator else IBMBackend
ret[config.backend_name] = backend_cls(
configuration=config,
provider=self,
credentials=self.credentials,
api_client=self._api_client)
except Exception: # pylint: disable=broad-except
logger.warning(
'Remote backend "%s" for provider %s could not be instantiated due to an '
'invalid config: %s',
raw_config.get('backend_name', raw_config.get('name', 'unknown')),
repr(self), traceback.format_exc())
return ret
@property
def backend(self) -> IBMBackendService:
"""Return the backend service.
Returns:
The backend service instance.
"""
return self._backend
@property
def experiment(self) -> IBMExperimentService:
"""Return the experiment service.
Returns:
The experiment service instance.
Raises:
IBMNotAuthorizedError: If the account is not authorized to use
the experiment service.
"""
if self._experiment:
return self._experiment
else:
raise IBMNotAuthorizedError("You are not authorized to use the experiment service.")
@property
def random(self) -> IBMRandomService:
"""Return the random number service.
Returns:
The random number service instance.
Raises:
IBMNotAuthorizedError: If the account is not authorized to use
the service.
"""
if self._random:
return self._random
else:
raise IBMNotAuthorizedError("You are not authorized to use the service.")
@property
def runtime(self) -> IBMRuntimeService:
"""Return the runtime service.
Returns:
The runtime service instance.
Raises:
IBMNotAuthorizedError: If the account is not authorized to use the service.
"""
if self._runtime:
return self._runtime
else:
raise IBMNotAuthorizedError("You are not authorized to use the runtime service.")
@classmethod
def active_account(cls) -> Optional[Dict[str, str]]:
"""Return the IBM Quantum account currently in use for the session.
Returns:
A dictionary with information about the account currently in the session,
None if there is no active account in session
"""
if not cls._providers:
return None
first_provider = list(cls._providers.values())[0]
return {
'token': first_provider.credentials.token,
'url': first_provider.credentials.auth_url
}
@classmethod
def providers(
cls,
token: Optional[str] = None,
url: Optional[str] = None,
hub: Optional[str] = None,
group: Optional[str] = None,
project: Optional[str] = None,
**kwargs: Any
) -> List['IBMProvider']:
"""Initialize account and return a list of providers.
Args:
token: IBM Quantum token.
url: URL for the IBM Quantum authentication server.
hub: Name of the hub.
group: Name of the group.
project: Name of the project.
**kwargs: Additional settings for the connection:
* proxies (dict): proxy configuration.
* verify (bool): verify the server's TLS certificate.
Returns:
A list of providers that match the specified criteria.
"""
account_credentials, account_preferences, *_ = cls._resolve_credentials(
token=token,
url=url,
hub=hub,
group=group,
project=project,
**kwargs
)
if not cls._providers or cls._is_different_account(account_credentials.token):
cls._initialize_providers(credentials=account_credentials,
preferences=account_preferences)
return cls._get_providers(hub=hub, group=group, project=project)
@classmethod
def _get_providers(
cls,
hub: Optional[str] = None,
group: Optional[str] = None,
project: Optional[str] = None,
) -> List['IBMProvider']:
"""Return a list of providers, subject to optional filtering.
Args:
hub: Name of the hub.
group: Name of the group.
project: Name of the project.
Returns:
A list of providers that match the specified criteria.
"""
filters = [] # type: List[Callable[[HubGroupProject], bool]]
if hub:
filters.append(lambda hgp: hgp.hub == hub)
if group:
filters.append(lambda hgp: hgp.group == group)
if project:
filters.append(lambda hgp: hgp.project == project)
providers = [provider for key, provider in cls._providers.items()
if all(f(key) for f in filters)]
return providers
@staticmethod
def delete_account() -> None:
"""Delete the saved account from disk.
Raises:
IBMProviderCredentialsNotFound: If no valid IBM Quantum
credentials can be found on disk.
IBMProviderCredentialsInvalidUrl: If invalid IBM Quantum
credentials are found on disk.
"""
stored_credentials, _ = read_credentials_from_qiskitrc()
if not stored_credentials:
raise IBMProviderCredentialsNotFound(
'No IBM Quantum credentials found on disk.')
credentials = list(stored_credentials.values())[0]
if credentials.url != QISKIT_IBM_API_URL:
raise IBMProviderCredentialsInvalidUrl(
'Invalid IBM Quantum credentials found on disk. ')
remove_credentials(credentials)
@staticmethod
def save_account(
token: str,
url: str = QISKIT_IBM_API_URL,
hub: Optional[str] = None,
group: Optional[str] = None,
project: Optional[str] = None,
overwrite: bool = False,
**kwargs: Any
) -> None:
"""Save the account to disk for future use.
Note:
If storing a default provider to disk, all three parameters
`hub`, `group`, `project` must be specified.
Args:
token: IBM Quantum token.
url: URL for the IBM Quantum authentication server.
hub: Name of the hub for the default provider to store on disk.
group: Name of the group for the default provider to store on disk.
project: Name of the project for the default provider to store on disk.
overwrite: Overwrite existing credentials.
**kwargs:
* proxies (dict): Proxy configuration for the server.
* verify (bool): If False, ignores SSL certificates errors
Raises:
IBMProviderCredentialsInvalidUrl: If the `url` is not a valid
IBM Quantum authentication URL.
| |
<reponame>jnthn/intellij-community<gh_stars>100-1000
#
# epydoc.css: default epydoc CSS stylesheets
# <NAME>
#
# Created [01/30/01 05:18 PM]
# $Id: html_css.py 1634 2007-09-24 15:58:38Z dvarrazzo $
#
"""
Predefined CSS stylesheets for the HTML outputter (L{epydoc.docwriter.html}).
@type STYLESHEETS: C{dictionary} from C{string} to C{(string, string)}
@var STYLESHEETS: A dictionary mapping from stylesheet names to CSS
stylesheets and descriptions. A single stylesheet may have
multiple names. Currently, the following stylesheets are defined:
- C{default}: The default stylesheet (synonym for C{white}).
- C{white}: Black on white, with blue highlights (similar to
javadoc).
- C{blue}: Black on steel blue.
- C{green}: Black on green.
- C{black}: White on black, with blue highlights
- C{grayscale}: Grayscale black on white.
- C{none}: An empty stylesheet.
"""
__docformat__ = 'epytext en'
import re
############################################################
## Basic stylesheets
############################################################
# [xx] Should I do something like:
#
# @import url(html4css1.css);
#
# But then where do I get that css file from? Hm.
# Also, in principle I'm mangling classes, but it looks like I'm
# failing.
#
# Black on white, with blue highlights. This is similar to how
# javadoc looks.
TEMPLATE = """
/* Epydoc CSS Stylesheet
*
* This stylesheet can be used to customize the appearance of epydoc's
* HTML output.
*
*/
/* Default Colors & Styles
* - Set the default foreground & background color with 'body'; and
* link colors with 'a:link' and 'a:visited'.
* - Use bold for decision list terms.
* - The heading styles defined here are used for headings *within*
* docstring descriptions. All headings used by epydoc itself use
* either class='epydoc' or class='toc' (CSS styles for both
* defined below).
*/
body { background: $body_bg; color: $body_fg; }
p { margin-top: 0.5em; margin-bottom: 0.5em; }
a:link { color: $body_link; }
a:visited { color: $body_visited_link; }
dt { font-weight: bold; }
h1 { font-size: +140%; font-style: italic;
font-weight: bold; }
h2 { font-size: +125%; font-style: italic;
font-weight: bold; }
h3 { font-size: +110%; font-style: italic;
font-weight: normal; }
code { font-size: 100%; }
/* N.B.: class, not pseudoclass */
a.link { font-family: monospace; }
/* Page Header & Footer
* - The standard page header consists of a navigation bar (with
* pointers to standard pages such as 'home' and 'trees'); a
* breadcrumbs list, which can be used to navigate to containing
* classes or modules; options links, to show/hide private
* variables and to show/hide frames; and a page title (using
* <h1>). The page title may be followed by a link to the
* corresponding source code (using 'span.codelink').
* - The footer consists of a navigation bar, a timestamp, and a
* pointer to epydoc's homepage.
*/
h1.epydoc { margin: 0; font-size: +140%; font-weight: bold; }
h2.epydoc { font-size: +130%; font-weight: bold; }
h3.epydoc { font-size: +115%; font-weight: bold;
margin-top: 0.2em; }
td h3.epydoc { font-size: +115%; font-weight: bold;
margin-bottom: 0; }
table.navbar { background: $navbar_bg; color: $navbar_fg;
border: $navbar_border; }
table.navbar table { color: $navbar_fg; }
th.navbar-select { background: $navbar_select_bg;
color: $navbar_select_fg; }
table.navbar a { text-decoration: none; }
table.navbar a:link { color: $navbar_link; }
table.navbar a:visited { color: $navbar_visited_link; }
span.breadcrumbs { font-size: 85%; font-weight: bold; }
span.options { font-size: 70%; }
span.codelink { font-size: 85%; }
td.footer { font-size: 85%; }
/* Table Headers
* - Each summary table and details section begins with a 'header'
* row. This row contains a section title (marked by
* 'span.table-header') as well as a show/hide private link
* (marked by 'span.options', defined above).
* - Summary tables that contain user-defined groups mark those
* groups using 'group header' rows.
*/
td.table-header { background: $table_hdr_bg; color: $table_hdr_fg;
border: $table_border; }
td.table-header table { color: $table_hdr_fg; }
td.table-header table a:link { color: $table_hdr_link; }
td.table-header table a:visited { color: $table_hdr_visited_link; }
span.table-header { font-size: 120%; font-weight: bold; }
th.group-header { background: $group_hdr_bg; color: $group_hdr_fg;
text-align: left; font-style: italic;
font-size: 115%;
border: $table_border; }
/* Summary Tables (functions, variables, etc)
* - Each object is described by a single row of the table with
* two cells. The left cell gives the object's type, and is
* marked with 'code.summary-type'. The right cell gives the
* object's name and a summary description.
* - CSS styles for the table's header and group headers are
* defined above, under 'Table Headers'
*/
table.summary { border-collapse: collapse;
background: $table_bg; color: $table_fg;
border: $table_border;
margin-bottom: 0.5em; }
td.summary { border: $table_border; }
code.summary-type { font-size: 85%; }
table.summary a:link { color: $table_link; }
table.summary a:visited { color: $table_visited_link; }
/* Details Tables (functions, variables, etc)
* - Each object is described in its own div.
* - A single-row summary table w/ table-header is used as
* a header for each details section (CSS style for table-header
* is defined above, under 'Table Headers').
*/
table.details { border-collapse: collapse;
background: $table_bg; color: $table_fg;
border: $table_border;
margin: .2em 0 0 0; }
table.details table { color: $table_fg; }
table.details a:link { color: $table_link; }
table.details a:visited { color: $table_visited_link; }
/* Fields */
dl.fields { margin-left: 2em; margin-top: 1em;
margin-bottom: 1em; }
dl.fields dd ul { margin-left: 0em; padding-left: 0em; }
dl.fields dd ul li ul { margin-left: 2em; padding-left: 0em; }
div.fields { margin-left: 2em; }
div.fields p { margin-bottom: 0.5em; }
/* Index tables (identifier index, term index, etc)
* - link-index is used for indices containing lists of links
* (namely, the identifier index & term index).
* - index-where is used in link indices for the text indicating
* the container/source for each link.
* - metadata-index is used for indices containing metadata
* extracted from fields (namely, the bug index & todo index).
*/
table.link-index { border-collapse: collapse;
background: $table_bg; color: $table_fg;
border: $table_border; }
td.link-index { border-width: 0px; }
table.link-index a:link { color: $table_link; }
table.link-index a:visited { color: $table_visited_link; }
span.index-where { font-size: 70%; }
table.metadata-index { border-collapse: collapse;
background: $table_bg; color: $table_fg;
border: $table_border;
margin: .2em 0 0 0; }
td.metadata-index { border-width: 1px; border-style: solid; }
table.metadata-index a:link { color: $table_link; }
table.metadata-index a:visited { color: $table_visited_link; }
/* Function signatures
* - sig* is used for the signature in the details section.
* - .summary-sig* is used for the signature in the summary
* table, and when listing property accessor functions.
* */
.sig-name { color: $sig_name; }
.sig-arg { color: $sig_arg; }
.sig-default { color: $sig_default; }
.summary-sig { font-family: monospace; }
.summary-sig-name { color: $summary_sig_name; font-weight: bold; }
table.summary a.summary-sig-name:link
{ color: $summary_sig_name; font-weight: bold; }
table.summary a.summary-sig-name:visited
{ color: $summary_sig_name; font-weight: bold; }
.summary-sig-arg { color: $summary_sig_arg; }
.summary-sig-default { color: $summary_sig_default; }
/* Subclass list
*/
ul.subclass-list { display: inline; }
ul.subclass-list li { display: inline; }
/* To render variables, classes etc. like functions */
table.summary .summary-name { color: $summary_sig_name; font-weight: bold;
font-family: monospace; }
table.summary
a.summary-name:link { color: $summary_sig_name; font-weight: bold;
font-family: monospace; }
table.summary
a.summary-name:visited { color: $summary_sig_name; font-weight: bold;
font-family: monospace; }
/* Variable values
* - In the 'variable details' sections, each varaible's value is
* listed in a 'pre.variable' box. The width of this box is
* restricted to 80 chars; if the value's repr is longer than
* this it will be wrapped, using a backslash marked with
* class 'variable-linewrap'. If the value's repr is longer
* than 3 lines, the rest will be ellided; and an ellipsis
* marker ('...' marked with 'variable-ellipsis') will be used.
* - If the value is a string, its quote marks will be marked
* with 'variable-quote'.
* - If the variable is a regexp, it is syntax-highlighted using
* the re* CSS classes.
*/
pre.variable { padding: .5em; margin: 0;
background: $variable_bg; color: $variable_fg;
border: $variable_border; }
.variable-linewrap { color: $variable_linewrap; font-weight: bold; }
.variable-ellipsis { color: $variable_ellipsis; font-weight: bold; }
.variable-quote { color: $variable_quote; font-weight: bold; }
.variable-group { color: $variable_group; font-weight: bold; }
.variable-op { color: $variable_op; font-weight: bold; }
.variable-string { color: $variable_string; }
.variable-unknown { color: $variable_unknown; font-weight: bold; }
.re { color: $re; }
.re-char { color: $re_char; }
.re-op { color: $re_op; }
.re-group { color: $re_group; }
.re-ref { color: $re_ref; }
/* Base tree
* - Used by class pages to display the base class hierarchy.
*/
pre.base-tree { font-size: 80%; margin: 0; }
/* Frames-based table of contents headers
* - Consists of two frames: one for selecting modules; and
* the other listing the | |
import logging
import time
import io
import pickle
import multiprocessing
import bagua_core as B
from bagua.service import AutotuneService
from . import env
from .env import (
get_master_addr,
get_world_size,
get_rank,
get_local_rank,
get_node_rank,
get_default_bucket_size,
get_bagua_service_port,
get_autotune_server_wait_time,
find_free_network_port,
)
from enum import IntEnum
from .utils import flatten, unflatten
import torch
import torch.distributed as dist
from bagua.service.autotune_service import AutotuneClient
from functools import lru_cache
from datetime import timedelta
from typing import Optional, List
import torch.distributed.distributed_c10d as c10d
from torch.distributed import ProcessGroup as TorchProcessGroup
import gorilla
import weakref
# fmt: off
__all__ = [
"ReduceOp", "new_group", "from_torch_group", "init_process_group",
"is_initialized", "send", "recv", "broadcast", "broadcast_object",
"reduce", "reduce_inplace", "allreduce", "allreduce_inplace",
"allgather", "allgather_inplace", "gather", "gather_inplace",
"scatter", "scatter_inplace", "reduce_scatter", "reduce_scatter_inplace",
"alltoall", "alltoall_inplace", "barrier", "BaguaProcessGroup"
]
# Process group's global rank to local rank mapping
_pg_group_ranks = {}
# Process group's name to BaguaProcessGroup
_pg_map = {}
# Default process group state
_default_pg = None
# Default store
_default_store = None
# Process group count for default naming
_group_count = 0
# Torch process group to bagua process group
_torch_to_bagua_pg_map = weakref.WeakKeyDictionary({})
# must be consistent with Aluminum ReductionOperator: https://github.com/BaguaSys/Aluminum/blob/master/include/aluminum/base.hpp
class ReduceOp(IntEnum):
"""An enum-like class for available reduction operations: ``SUM``, ``PRODUCT``, ``MIN``, ``MAX``, ``BAND``,
``BOR``, ``BXOR`` and ``AVG``."""
SUM = 0
PRODUCT = 1
MIN = 2
MAX = 3
BOR = 7
BAND = 8
BXOR = 9
AVG = 10
@gorilla.patches(TorchProcessGroup, filter=lambda name, obj: "bagua" in name)
class BaguaProcessGroupPatch:
def bagua_patch(self, stream: Optional[torch.cuda.Stream] = None):
global _torch_to_bagua_pg_map
if self not in _torch_to_bagua_pg_map:
_torch_to_bagua_pg_map[self] = from_torch_group(self, stream)
return self
@property
def bagua_pg(self):
assert self in _torch_to_bagua_pg_map, "cannot find associated Bagua process group in cache, BaguaProcessGroupPatch.bagua_patch(...) needs to be run first to initialize Bagua process group in cache."
return _torch_to_bagua_pg_map[self]
def bagua_get_global_communicator(self):
return get_communicator(self.bagua_pg.group_name, "global")
def bagua_get_inter_node_communicator(self):
return get_communicator(self.bagua_pg.group_name, "inter")
def bagua_get_intra_node_communicator(self):
return get_communicator(self.bagua_pg.group_name, "intra")
_base = gorilla._get_base(BaguaProcessGroupPatch)
_decorator_data = gorilla.get_decorator_data(_base)
for patch in _decorator_data.patches:
gorilla.apply(patch)
class BaguaProcessGroup:
"""Definition of Bagua process group."""
def __init__(self, ranks, stream, group_name):
self.ranks = ranks
self.stream = stream
self.group_name = group_name
logging.debug(f"Initialize Bagua process group of ranks {self.ranks}")
def _get_intra_ranks(self):
rank_mappings = _get_rank_mappings()
intra_ranks = list(
filter(
lambda rank: rank_mappings[rank][0] == get_node_rank(),
self.ranks,
)
)
return intra_ranks
def _get_inter_ranks(self):
rank_mappings = _get_rank_mappings()
inter_ranks = list(
filter(
lambda rank: rank_mappings[rank][1] == rank_mappings[self.ranks[0]][1],
self.ranks,
)
)
return inter_ranks
def get_global_communicator(self) -> B.BaguaSingleCommunicatorPy:
"""Returns the global communicator of current process group."""
return get_communicator(self.group_name, "global")
def get_inter_node_communicator(self) -> B.BaguaSingleCommunicatorPy:
"""Returns the inter-node communicator of current process group."""
return get_communicator(self.group_name, "inter")
def get_intra_node_communicator(self) -> B.BaguaSingleCommunicatorPy:
"""Returns the intra-node communicator of current process group."""
return get_communicator(self.group_name, "intra")
@lru_cache(maxsize=None)
def _get_rank_mappings():
rank_mappings = {}
rank_tensors = torch.cuda.LongTensor(get_world_size(), 2)
rank_tensors[get_rank()][0] = get_node_rank()
rank_tensors[get_rank()][1] = get_local_rank()
allgather_inplace(rank_tensors)
for i in range(get_world_size()):
rank_mappings[i] = rank_tensors[i][0].item(), rank_tensors[i][1].item()
return rank_mappings
def _check_default_pg():
"""
Helper that checks if the default process group has been initialized, with
assertion.
"""
assert _default_pg is not None, "Default process group is not initialized"
def is_initialized():
"""
Checking if the default process group has been initialized.
"""
return _default_pg is not None
def _get_default_group():
"""
Getting the default process group created by :func:`init_process_group`.
"""
if not is_initialized():
raise RuntimeError(
"Default process group has not been initialized, "
"please make sure to call init_process_group."
)
return _default_pg
def _bagua_backend_comm(comm: Optional[B.BaguaSingleCommunicatorPy] = None):
"""
Return ``None`` if the current process's rank is not in a given communicator.
Otherwise return the communicator passed in.
"""
if _rank_not_in_comm(comm):
return None
return comm
def new_group(
ranks: Optional[List[int]] = None, stream: Optional[torch.cuda.Stream] = None
) -> BaguaProcessGroup:
"""
Creates a new process group.
This function requires that all processes in the default group (i.e. all
processes that are part of the distributed job) enter this function, even
if they are not going to be members of the group. Additionally, groups
should be created in the same order in all processes.
Each process group will create three communicators on request, a global communicator,
a inter-node communicator and a intra-node communicator. Users can access them through
``group.get_global_communicator()``, ``group.get_inter_node_communicator()``
and ``group.get_intra_node_communicator()`` respectively.
Args:
ranks: List of ranks of group members. If ``None``, will be
set to all ranks. Default is ``None``.
stream: A CUDA stream used to execute NCCL operations. If ``None``,
CUDA stream of the default group will be used. See
`CUDA semantics <https://pytorch.org/docs/stable/notes/cuda.html?highlight=stream>`_
for details.
Returns:
A handle of process group that can be given to collective calls.
.. note::
The global communicator is used for global communications involving all ranks in the process group.
The inter-node communicator and the intra-node communicator is used for hierarchical communications
in this process group.
.. note::
For a specific communicator ``comm``, ``comm.rank()`` returns the rank of current process and
``comm.nranks()`` returns the size of the communicator.
"""
global _group_count
global _pg_group_ranks
global _pg_map
_group_count += 1
if ranks is None:
ranks = list(range(get_world_size()))
else:
# sanity check for the input ranks
for rank in ranks:
if rank < 0 or rank >= get_world_size():
raise ValueError(
"Invalid rank {}, should be non-negative and less than world size {}.",
rank,
get_world_size(),
)
ranks = sorted(ranks)
if stream is None:
_check_default_pg()
stream = _get_default_group().stream
group_name = str(_group_count)
pg = BaguaProcessGroup(ranks, stream, str(_group_count))
# Create the global rank to group rank mapping
_pg_group_ranks[pg] = {
global_rank: group_rank for group_rank, global_rank in enumerate(ranks)
}
_pg_map[group_name] = pg
return pg
__torch_group_id = 0
def from_torch_group(group, stream: Optional[torch.cuda.Stream] = None) -> BaguaProcessGroup:
"""
Convert a Pytorch process group to its equivalent Bagua process group.
Args:
group: A handle of the Pytorch process group.
stream: A CUDA stream used to execute NCCL operations. If ``None``,
CUDA stream of the default group will be used. See :func:`new_group`
for more information.
Returns:
A handle of the Bagua process group.
"""
global __torch_group_id
torch_group_id = __torch_group_id
__torch_group_id += 1
ranks = None
if group in c10d._pg_group_ranks:
ranks = list(c10d._pg_group_ranks[group].keys())
elif _default_store:
def rank_key(rank):
return "global rank of {}.{}".format(torch_group_id, rank)
_default_store.set(rank_key(group.rank()), env.get_rank())
ranks = [int(_default_store.get(rank_key(i))) for i in range(group.size())]
else:
ranks = list(range(group.size()))
return new_group(ranks, stream)
@lru_cache(maxsize=None)
def get_communicator(group_name: str, comm_name: str):
global _pg_map
pg = _pg_map[group_name]
if comm_name == "global":
ranks = pg.ranks
elif comm_name == "inter":
ranks = pg._get_inter_ranks()
elif comm_name == "intra":
ranks = pg._get_intra_ranks()
else:
raise ValueError("comm_name should be one of ['global', 'inter', 'intra']")
comm_key = "{}_{}_{}".format(group_name, comm_name, ",".join(map(str, ranks)))
nccl_unique_id = broadcast_nccl_unique_id(comm_key, root=ranks[0])
if get_rank() not in ranks:
return CommMember.NON_COMM_MEMBER
rank = ranks.index(get_rank())
nranks = len(ranks)
comm = B.BaguaSingleCommunicatorPy(
rank=rank,
nranks=nranks,
device_id=get_local_rank(),
stream_ptr=pg.stream.cuda_stream,
nccl_unique_id_str=nccl_unique_id,
)
logging.debug(
"init bagua communicator %s-%s ok, global rank: %s rank: %s",
group_name,
comm_name,
get_rank(),
comm.rank(),
)
comm.cuda_stream = pg.stream
return comm
def _rank_not_in_comm(comm: Optional[B.BaguaSingleCommunicatorPy] = None):
"""
Return ``True`` if the current process's rank is not in a given communicator.
"""
if comm is None:
return False
return comm == CommMember.NON_COMM_MEMBER
def _rank_not_in_group(group: Optional[BaguaProcessGroup] = None):
"""
Return ``True`` if the current process is not in a given process group.
"""
if group is None:
return False
return _rank_not_in_comm(group.get_global_communicator())
@lru_cache(maxsize=None)
def get_backend(model_name: str):
backend = B.BaguaCommBackendPy(100, device_id=get_local_rank())
backend.model_name = model_name
return backend
def run_flask_app(port):
from flask import Flask
from gevent.pywsgi import WSGIServer
import os
os.environ["WERKZEUG_RUN_MAIN"] = "true"
autotune_service = AutotuneService(
world_size=get_world_size(),
autotune_level=env.get_autotune_level(),
max_samples=env.get_autotune_max_samples(),
sampling_confidence_time_s=env.get_autotune_sampling_confidence_time_s(),
warmup_time_s=env.get_autotune_warmup_time_s(),
is_output_autotune_log=env.get_is_output_autotune_log(),
default_bucket_size=get_default_bucket_size(),
)
app = Flask(__name__)
app = autotune_service.setup_app(app)
http_server = WSGIServer(
listener=("0.0.0.0", port),
application=app,
log=None,
)
http_server.serve_forever()
_autotune_server = None
_autotune_service_port = None
def start_autotune_server(service_port: int):
"""Starts autotune server in background."""
global _autotune_server
_autotune_server = multiprocessing.Process(target=run_flask_app, args=(service_port, ))
_autotune_server.daemon = True
_autotune_server.start()
@lru_cache(maxsize=None)
def get_hyperparameters_service_client():
global _autotune_service_port
hyperparameters_service_client = AutotuneClient(
get_master_addr(), _autotune_service_port
)
return hyperparameters_service_client
def _find_free_bagua_service_port(store) -> int:
service_port = get_bagua_service_port()
if service_port > 0:
return service_port
if get_rank() == 0:
service_port = find_free_network_port()
store.set("bagua_service_port", str(service_port))
else:
service_port = int(store.get("bagua_service_port"))
return service_port
def init_process_group(store: Optional[torch.distributed.Store] = None):
"""Initializes the PyTorch builtin distributed process group, and this will
also initialize the distributed package, should be executed before all the
APIs of Bagua.
Args:
store: Key/value store accessible to all workers, used to exchange
connection/address information. If ``None``, a TCP-based store will be created.
Default: ``None``.
Examples::
>>> import torch
>>> import bagua.torch_api as bagua
>>>
>>> torch.cuda.set_device(bagua.get_local_rank()) # THIS LINE IS IMPORTANT. See the notes below.
>>> bagua.init_process_group()
>>>
>>> model = torch.nn.Sequential(
... torch.nn.Linear(D_in, H),
... torch.nn.ReLU(),
... torch.nn.Linear(H, D_out),
... )
>>> optimizer = torch.optim.SGD(
... model.parameters(),
... lr=0.01,
... momentum=0.9
... )
>>> model = | |
<reponame>mbmetcalfe/Abbot<gh_stars>0
#!/usr/local/bin/python3.6
import discord
from discord.ext import commands
from discord.ext.commands.bot import _get_variable
from config import Config, ConfigDefaults
from permissions import Permissions, PermissionsDefaults
from utils import load_file, write_file, sane_round_int
import exceptions
import inspect
import asyncio
import traceback
import aiohttp
import logging
import os
import random
import inflect
import time
import datetime
import re
import sys
from functools import wraps
from textwrap import dedent
from constants import VERSION as BOTVERSION
from constants import DISCORD_MSG_CHAR_LIMIT, AUDIO_CACHE_PATH
import praw
import db
import event
class Response:
def __init__(self, content, reply=False, embed=False, delete_after=0, reactions=None):
self.content = content
self.reply = reply
self.embed = embed
self.delete_after = delete_after
self.reactions = reactions
class Abbot(discord.Client):
def __init__(self, config_file=ConfigDefaults.options_file, perms_file=PermissionsDefaults.perms_file):
self.config = Config(config_file)
self.permissions = Permissions(perms_file, grant_all=[self.config.owner_id])
self.blacklist = set(load_file(self.config.blacklist_file))
self.exit_signal = None
self.init_ok = False
self.cached_client_id = None
super().__init__()
self.aiosession = aiohttp.ClientSession(loop=self.loop)
self.http.user_agent += ' Abbot/%s' % BOTVERSION
if self.config.auto_status > 0:
logger.info("AutoStatus set. Creating task.")
self.loop.create_task(self._auto_presence_task())
if self.config.database_name:
self.database = db.AbbotDatabase(self.config.database_name)
# TODO: Add some sort of `denied` argument for a message to send when someone else tries to use it
def owner_only(func):
@wraps(func)
async def wrapper(self, *args, **kwargs):
# Only allow the owner to use these commands
orig_msg = _get_variable('message')
if not orig_msg or orig_msg.author.id == self.config.owner_id:
return await func(self, *args, **kwargs)
else:
raise exceptions.PermissionsError("only the owner can use this command", expire_in=30)
return wrapper
def safe_print(self, content, *, end='\n', flush=True):
sys.stdout.buffer.write((content + end).encode('utf-8', 'replace'))
if flush: sys.stdout.flush()
def get_reddit_post(self, subreddit):
#TODO: Have him get a random one once/day.
#TODO: Add the URL and author in the message.
#TODO: decide on hot vs top vs new
maxPosts = 100
reddit = praw.Reddit(user_agent='Abbot',
client_id=self.config.reddit_client_id,
client_secret=self.config.reddit_client_secret)
#posts = reddit.subreddit(subreddit).new(limit=maxPosts)
posts = reddit.subreddit(subreddit).hot(limit=maxPosts)
postNumber = random.randint(0, maxPosts)
for i, post in enumerate(posts):
if i == postNumber:
return post
return None
async def send_typing(self, destination):
try:
return await super().send_typing(destination)
except discord.Forbidden:
if self.config.debug_mode:
logger.debug("Could not send typing to %s, no permssion" % destination)
@staticmethod
def _fixg(x, dp=2):
return ('{:.%sf}' % dp).format(x).rstrip('0').rstrip('.')
def _get_owner(self, voice=False):
if voice:
for server in self.servers:
for channel in server.channels:
for m in channel.voice_members:
if m.id == self.config.owner_id:
return m
else:
return discord.utils.find(lambda m: m.id == self.config.owner_id, self.get_all_members())
async def _autojoin_channels(self, channels):
joined_servers = []
for channel in channels:
if channel.server in joined_servers:
logger.info("Already joined a channel in %s, skipping" % channel.server.name)
continue
if channel and channel.type == discord.ChannelType.voice:
logger.info("Attempting to autojoin %s in %s" % (channel.name, channel.server.name))
chperms = channel.permissions_for(channel.server.me)
if not chperms.connect:
logger.info("Cannot join channel \"%s\", no permission." % channel.name)
continue
elif not chperms.speak:
logger.info("Will not join channel \"%s\", no permission to speak." % channel.name)
continue
try:
player = await self.get_player(channel, create=True)
if player.is_stopped:
player.play()
if self.config.auto_playlist:
await self.on_player_finished_playing(player)
joined_servers.append(channel.server)
except Exception as ex:
if self.config.debug_mode:
traceback.print_exc()
logger.error("Failed to join %s: %s" % channel.name, ex)
elif channel:
logger.info("Not joining %s on %s, that's a text channel." % (channel.name, channel.server.name))
else:
logger.error("Invalid channel thing: " + channel)
async def _wait_delete_msg(self, message, after):
await asyncio.sleep(after)
await self.safe_delete_message(message)
def _cleanup(self):
try:
self.loop.run_until_complete(self.logout())
except: # Can be ignored
pass
pending = asyncio.Task.all_tasks()
gathered = asyncio.gather(*pending)
try:
gathered.cancel()
self.loop.run_until_complete(gathered)
gathered.exception()
except: # Can be ignored
pass
async def safe_send_message(self, dest, content, *, tts=False, expire_in=0, also_delete=None, quiet=False, embed=False, reactions=None):
msg = None
try:
if embed:
msg = await self.send_message(dest, embed=content, tts=tts)
else:
msg = await self.send_message(dest, content, tts=tts)
if msg and reactions:
for reaction in reactions:
await self.safe_add_reaction(msg, reaction)
if msg and expire_in:
asyncio.ensure_future(self._wait_delete_msg(msg, expire_in))
if also_delete and isinstance(also_delete, discord.Message):
asyncio.ensure_future(self._wait_delete_msg(also_delete, expire_in))
except discord.Forbidden:
if not quiet:
logger.warning("Cannot send message to %s, no permission." % dest.name)
except discord.NotFound:
if not quiet:
logger.warning("Cannot send message to %s, invalid channel?" % dest.name)
return msg
async def safe_add_reaction(self, message, emoji):
"""Try to react to a message with a specific emoji."""
reaction = None
try:
reaction = await self.add_reaction(message, emoji)
except discord.Forbidden:
logger.warning("Cannot react to message in %s, no permission" % message.channel)
except discord.NotFound:
logger.warning("Cannot react to message in %s, invalid channel?" % message.channel)
except Exception as e:
logger.error("Could not react to message id {0} with {1}".format(message.id, emoji))
logger.debug("Unexpected error: {0}\n---------------------------------------------".format(str(e)))
return reaction
async def safe_send_embed(self, dest, content, *, tts=False, expire_in=0, also_delete=None, quiet=False):
msg = None
try:
msg = await self.send_message(dest, embed=content, tts=tts)
if msg and expire_in:
asyncio.ensure_future(self._wait_delete_msg(msg, expire_in))
if also_delete and isinstance(also_delete, discord.Message):
asyncio.ensure_future(self._wait_delete_msg(also_delete, expire_in))
except discord.Forbidden:
if not quiet:
logger.warning("Cannot send message to %s, no permission" % dest.name)
except discord.NotFound:
if not quiet:
logger.warning("Cannot send message to %s, invalid channel?" % dest.name)
return msg
async def safe_delete_message(self, message, *, quiet=False):
try:
return await self.delete_message(message)
except discord.Forbidden:
if not quiet:
logger.warning("Cannot delete message \"%s\", no permission" % message.clean_content)
except discord.NotFound:
if not quiet:
logger.warning("Cannot delete message \"%s\", message not found" % message.clean_content)
# noinspection PyMethodOverriding
def run(self):
try:
self.loop.run_until_complete(self.start(*self.config.auth))
except discord.errors.LoginFailure:
# Add if token, else
raise exceptions.HelpfulError(
"Bot cannot login, bad credentials.",
"Fix your Email or Password or Token in the options file. "
"Remember that each field should be on their own line.")
finally:
try:
self._cleanup()
except Exception as e:
logger.error("Error in cleanup: %s" % e)
self.loop.close()
if self.exit_signal:
raise self.exit_signal
async def logout(self):
await self.disconnect_all_voice_clients()
return await super().logout()
async def on_error(self, event, *args, **kwargs):
ex_type, ex, stack = sys.exc_info()
if ex_type == exceptions.HelpfulError:
logger.error("Exception in %s" % event)
logger.error(ex.message)
await asyncio.sleep(2) # don't ask
await self.logout()
elif issubclass(ex_type, exceptions.Signal):
self.exit_signal = ex_type
await self.logout()
else:
traceback.print_exc()
async def on_resumed(self):
logger.debug("Resumed...")
# for vc in self.the_voice_clients.values():
# vc.main_ws = self.ws
async def on_ready(self):
logger.info('Connected! Abbot v%s.' % BOTVERSION)
if self.config.owner_id == self.user.id:
raise exceptions.HelpfulError(
"Your OwnerID is incorrect or you've used the wrong credentials.",
"The bot needs its own account to function. "
"The OwnerID is the id of the owner, not the bot. "
"Figure out which one is which and use the correct information.")
self.init_ok = True
logger.info("Bot: %s/%s#%s" % (self.user.id, self.user.name, self.user.discriminator))
owner = self._get_owner(voice=True) or self._get_owner()
if owner and self.servers:
logger.info("Owner: %s/%s#%s\n" % (owner.id, owner.name, owner.discriminator))
logger.info('Server List:')
[logger.info(' - ' + s.name) for s in self.servers]
elif self.servers:
logger.info("Owner could not be found on any server (id: %s)\n" % self.config.owner_id)
logger.info('Server List:')
[logger.info(' - ' + s.name) for s in self.servers]
else:
logger.info("Owner unknown, bot is not on any servers.")
if self.user.bot:
logger.info("\nTo make the bot join a server, paste this link in your browser.")
logger.info("Note: You should be logged into your main account and have \n"
"manage server permissions on the server you want the bot to join.\n")
logger.info(" " + await self.generate_invite_link())
if self.config.bound_channels:
chlist = set(self.get_channel(i) for i in self.config.bound_channels if i)
chlist.discard(None)
invalids = set()
invalids.update(c for c in chlist if c.type == discord.ChannelType.voice)
chlist.difference_update(invalids)
self.config.bound_channels.difference_update(invalids)
logger.info("Bound to text channels:")
[logger.info(' - %s/%s' % (ch.server.name.strip(), ch.name.strip())) for ch in chlist if ch]
if invalids and self.config.debug_mode:
logger.info("\nNot binding to voice channels:")
[logger.info(' - %s/%s' % (ch.server.name.strip(), ch.name.strip())) for ch in invalids if ch]
else:
logger.info("Not bound to any text channels")
logger.info("Options:")
logger.info(" Command prefix: " + self.config.command_prefix)
logger.info(" Delete Messages: " + ['Disabled', 'Enabled'][self.config.delete_messages])
if self.config.delete_messages:
logger.info(" Delete Invoking: " + ['Disabled', 'Enabled'][self.config.delete_invoking])
logger.info(" Debug Mode: " + ['Disabled', 'Enabled'][self.config.debug_mode])
# maybe option to leave the ownerid blank and generate a random command for the owner to use
# wait_for_message is pretty neato
if self.config.autojoin_channels:
await self._autojoin_channels(self.config.autojoin_channels)
if self.config.auto_statuses:
await self.update_presence("{0} | {1}help".format(
random.sample(self.config.auto_statuses, 1)[0],
self.config.command_prefix))
# t-t-th-th-that's all folks!
# -----------
# Commands
# -----------
async def cmd_help(self, channel, author, command=None):
"""
Prints a help message.
If a command is specified, it prints a help message for that command.
Otherwise, it lists the available commands.
Usage:
{command_prefix}help [command]
"""
if command:
cmd = getattr(self, 'cmd_' + command, None)
if cmd:
return Response(
"```\n{}```".format(
dedent(cmd.__doc__.replace('{command_prefix}', self.config.command_prefix)),
command_prefix=self.config.command_prefix
),
delete_after=60 if channel != 'Direct Message' else 0
)
else:
return Response("No such command.", delete_after=10 if channel != 'Direct Message' else 0)
else:
helpmsg = "```"
commandCount = 0
for att in dir(self):
if att.startswith('cmd_') and att != 'cmd_help':
command_name = att.replace('cmd_', self.config.command_prefix).lower()
if (commandCount % 3) == 0:
helpmsg += "\n"
commandCount += 1
helpmsg += "{0:20}".format(command_name)
helpmsg += "```\nFor help on a specific command, type | |
255, 255, 255],
10477: [255, 255, 0, 0, 255, 255, 255, 255],
10478: [15, 15, 240, 240, 255, 255, 255, 255],
10479: [255, 255, 240, 240, 255, 255, 255, 255],
10480: [0, 0, 15, 15, 15, 15, 255, 255],
10481: [240, 240, 15, 15, 15, 15, 255, 255],
10482: [0, 0, 255, 255, 15, 15, 255, 255],
10483: [240, 240, 255, 255, 15, 15, 255, 255],
10484: [0, 0, 15, 15, 255, 255, 255, 255],
10485: [240, 240, 15, 15, 255, 255, 255, 255],
10486: [0, 0, 255, 255, 255, 255, 255, 255],
10487: [240, 240, 255, 255, 255, 255, 255, 255],
10488: [15, 15, 15, 15, 15, 15, 255, 255],
10489: [255, 255, 15, 15, 15, 15, 255, 255],
10490: [15, 15, 255, 255, 15, 15, 255, 255],
10491: [255, 255, 255, 255, 15, 15, 255, 255],
10492: [15, 15, 15, 15, 255, 255, 255, 255],
10493: [255, 255, 15, 15, 255, 255, 255, 255],
10494: [15, 15, 255, 255, 255, 255, 255, 255],
10495: [255, 255, 255, 255, 255, 255, 255, 255],
10515: [28, 28, 62, 28, 8, 0, 62, 0],
11037: [0, 0, 0, 24, 24, 0, 0, 0],
11044: [60, 126, 255, 255, 255, 255, 126, 60],
11093: [60, 126, 231, 195, 195, 231, 126, 60],
11096: [0, 60, 126, 102, 102, 126, 60, 0],
11822: [60, 102, 102, 48, 24, 0, 24, 0],
57472: [0, 0, 0, 0, 0, 0, 0, 0],
57473: [240, 240, 240, 0, 0, 0, 0, 0],
57474: [0, 0, 240, 240, 240, 240, 0, 0],
57475: [240, 240, 240, 240, 240, 240, 0, 0],
57476: [0, 0, 0, 0, 0, 240, 240, 240],
57477: [240, 240, 240, 0, 0, 240, 240, 240],
57478: [0, 0, 240, 240, 240, 240, 240, 240],
57479: [240, 240, 240, 240, 240, 240, 240, 240],
57480: [15, 15, 15, 0, 0, 0, 0, 0],
57481: [255, 255, 255, 0, 0, 0, 0, 0],
57482: [15, 15, 255, 240, 240, 240, 0, 0],
57483: [255, 255, 255, 240, 240, 240, 0, 0],
57484: [15, 15, 15, 0, 0, 240, 240, 240],
57485: [255, 255, 255, 0, 0, 240, 240, 240],
57486: [15, 15, 255, 240, 240, 240, 240, 240],
57487: [255, 255, 255, 240, 240, 240, 240, 240],
57488: [0, 0, 15, 15, 15, 15, 0, 0],
57489: [240, 240, 255, 15, 15, 15, 0, 0],
57490: [0, 0, 255, 255, 255, 255, 0, 0],
57491: [240, 240, 255, 255, 255, 255, 0, 0],
57492: [0, 0, 15, 15, 15, 255, 240, 240],
57493: [240, 240, 255, 15, 15, 255, 240, 240],
57494: [0, 0, 255, 255, 255, 255, 240, 240],
57495: [240, 240, 255, 255, 255, 255, 240, 240],
57496: [15, 15, 15, 15, 15, 15, 0, 0],
57497: [255, 255, 255, 15, 15, 15, 0, 0],
57498: [15, 15, 255, 255, 255, 255, 0, 0],
57499: [255, 255, 255, 255, 255, 255, 0, 0],
57500: [15, 15, 15, 15, 15, 255, 240, 240],
57501: [255, 255, 255, 15, 15, 255, 240, 240],
57502: [15, 15, 255, 255, 255, 255, 240, 240],
57503: [255, 255, 255, 255, 255, 255, 240, 240],
57504: [0, 0, 0, 0, 0, 15, 15, 15],
57505: [240, 240, 240, 0, 0, 15, 15, 15],
57506: [0, 0, 240, 240, 240, 255, 15, 15],
57507: [240, 240, 240, 240, 240, 255, 15, 15],
57508: [0, 0, 0, 0, 0, 255, 255, 255],
57509: [240, 240, 240, 0, 0, 255, 255, 255],
57510: [0, 0, 240, 240, 240, 255, 255, 255],
57511: [240, 240, 240, 240, 240, 255, 255, 255],
57512: [15, 15, 15, 0, 0, 15, 15, 15],
57513: [255, 255, 255, 0, 0, 15, 15, 15],
57514: [15, 15, 255, 240, 240, 255, 15, 15],
57515: [255, 255, 255, 240, 240, 255, 15, 15],
57516: [15, 15, 15, 0, 0, 255, 255, 255],
57517: [255, 255, 255, 0, 0, 255, 255, 255],
57518: [15, 15, 255, 240, 240, 255, 255, 255],
57519: [255, 255, 255, 240, 240, 255, 255, 255],
57520: [0, 0, 15, 15, 15, 15, 15, 15],
57521: [240, 240, 255, 15, 15, 15, 15, 15],
57522: [0, 0, 255, 255, 255, 255, 15, 15],
57523: [240, 240, 255, 255, 255, 255, 15, 15],
57524: [0, 0, 15, 15, 15, 255, 255, 255],
57525: [240, 240, 255, 15, 15, 255, 255, 255],
57526: [0, 0, 255, 255, 255, 255, 255, 255],
57527: [240, 240, 255, 255, 255, 255, 255, 255],
57528: [15, 15, 15, 15, 15, 15, 15, 15],
57529: [255, 255, 255, 15, 15, 15, 15, 15],
57530: [15, 15, 255, 255, 255, 255, 15, 15],
57531: [255, 255, 255, 255, 255, 255, 15, 15],
57532: [15, 15, 15, 15, 15, 255, 255, 255],
57533: [255, 255, 255, 15, 15, 255, 255, 255],
57534: [15, 15, 255, 255, 255, 255, 255, 255],
57535: [255, 255, 255, 255, 255, 255, 255, 255],
57536: [0, 0, 0, 0, 0, 0, 0, 0],
57537: [224, 224, 0, 0, 0, 0, 0, 0],
57538: [0, 0, 0, 224, 224, 0, 0, 0],
57539: [224, 224, 0, 224, 224, 0, 0, 0],
57540: [0, 0, 0, 0, 0, 224, 224, 0],
57541: [224, 224, 0, 0, 0, 0, 224, 224],
57542: [0, 0, 0, 224, 224, 0, 224, 224],
57543: [224, 224, 0, 224, 224, 0, 224, 224],
57544: [14, 14, 0, 0, 0, 0, 0, 0],
57545: [238, 238, 0, 0, 0, 0, 0, 0],
57546: [14, 14, 0, 224, 224, 0, 0, 0],
57547: [238, 238, 0, 224, 224, 0, 0, 0],
57548: [14, 14, 0, 0, 0, 224, 224, 0],
57549: [238, 238, 0, 0, 0, 0, 224, 224],
57550: [14, 14, 0, 224, 224, 0, 224, 224],
57551: [238, 238, 0, 224, 224, 0, 224, 224],
57552: [0, 0, 0, 14, 14, 0, 0, 0],
57553: [224, 224, 0, 14, 14, 0, 0, 0],
57554: [0, 0, 0, 238, 238, 0, 0, 0],
57555: [224, 224, 0, 238, 238, 0, 0, 0],
57556: [0, 0, 0, 14, 14, 224, 224, 0],
57557: [224, 224, 0, 14, 14, 0, 224, 224],
57558: [0, 0, 0, 238, 238, 0, 224, 224],
57559: [224, 224, 0, 238, 238, 0, 224, 224],
57560: [14, 14, 0, 14, 14, 0, 0, 0],
57561: [238, 238, 0, 14, 14, 0, 0, 0],
57562: [14, 14, 0, 238, 238, 0, 0, 0],
57563: [238, 238, 0, 238, 238, 0, 0, 0],
57564: [14, 14, 0, 14, 14, 224, 224, 0],
57565: [238, 238, 0, 14, 14, 0, 224, 224],
57566: [14, 14, 0, 238, 238, 0, 224, 224],
57567: [238, 238, 0, 238, 238, 0, 224, 224],
57568: [0, 0, 0, 0, 0, 0, 14, 14],
57569: [224, 224, 0, 0, 0, 0, 14, 14],
57570: [0, 0, 0, 224, 224, 0, 14, 14],
57571: [224, 224, 0, 224, 224, 0, 14, 14],
57572: [0, 0, 0, 0, 0, 224, 238, 14],
57573: [224, 224, 0, 0, 0, 0, 238, 238],
57574: [0, 0, 0, 224, 224, 0, 238, 238],
57575: [224, 224, 0, 224, 224, 0, 238, 238],
57576: [14, 14, 0, 0, 0, 0, 14, 14],
57577: [238, 238, 0, 0, 0, 0, 14, 14],
57578: [14, 14, 0, 224, 224, 0, 14, 14],
57579: [238, 238, 0, 224, 224, 0, 14, 14],
57580: [14, 14, 0, 0, 0, 224, 238, 14],
57581: [238, 238, 0, 0, 0, 0, 238, 238],
57582: [14, 14, 0, 224, 224, 0, 238, 238],
57583: [238, 238, 0, 224, 224, 0, 238, 238],
57584: [0, 0, 0, 14, 14, 14, 14, 14],
57585: [224, 224, 0, 14, 14, 14, 14, 14],
57586: [0, 0, 0, 238, 238, 14, 14, 14],
57587: [224, 224, 0, 238, 238, 14, 14, 14],
57588: [0, 0, 0, 14, 14, 238, 238, | |
<filename>modes/Particles/zoe.py
#!/usr/local/bin/python
#
# $Id: //projects/zoe/zoe.py#1 $ $Date$
"""
A simple OpenGL rendering engine.
"""
__program__ = 'zoe'
__version__ = '1.0a'
__url__ = 'http://www.alcyone.com/pyos/zoe/'
__author__ = '<NAME> <<EMAIL>>'
__copyright__ = 'Copyright (C) 2000-2002 <NAME>'
__license__ = 'LGPL'
import math
import sys
import time
import types
from OpenGL.GL import * # strange, but canonical PyOpenGL import
from OpenGL.GLU import *
from OpenGL.GLUT import *
# Some mathematical constants for convenience.
pi = math.pi
twoPi = 2*pi
piOverTwo = pi/2
piOverOneEighty = pi/180
radiansToDegrees = 1/piOverOneEighty
degreesToRadians = piOverOneEighty
sqrtTwo = math.sqrt(2)
# Object ######################################################################
class Object:
"""The fundamental object."""
def __init__(self):
if self.__class__ is Object:
raise NotImplementedError
def display(self):
"""Display the object. This method must be overridden."""
raise NotImplementedError
def update(self):
"""Update the object. This should update the internal state of
the object, if relevant."""
pass
def commit(self):
"""Commit any pending changes to the object. This method is only
called for objects in an engine which has the committing attribute
set."""
pass
class AxesObject(Object):
"""Shows a set of axes, with stippling in the negative direction."""
xColor = 1.0, 0.0, 0.0
yColor = 0.0, 1.0, 0.0
zColor = 0.0, 0.0, 1.0
stipple = 0x0f0f
def __init__(self, expanse=20):
Object.__init__(self)
self.expanse = expanse
def display(self):
glLineStipple(1, self.stipple)
glDisable(GL_LINE_STIPPLE)
glBegin(GL_LINES)
glColor3d(*self.xColor)
glVertex3d(0, 0, 0)
glVertex3d(self.expanse, 0, 0)
glEnd()
glEnable(GL_LINE_STIPPLE)
glBegin(GL_LINES)
glVertex3d(0, 0, 0)
glVertex3d(-self.expanse, 0, 0)
glEnd()
glDisable(GL_LINE_STIPPLE)
glBegin(GL_LINES)
glColor3d(*self.yColor)
glVertex3d(0, 0, 0)
glVertex3d(0, self.expanse, 0)
glEnd()
glEnable(GL_LINE_STIPPLE)
glBegin(GL_LINES)
glVertex3d(0, 0, 0)
glVertex3d(0, -self.expanse, 0)
glEnd()
glDisable(GL_LINE_STIPPLE)
glBegin(GL_LINES)
glColor3d(*self.zColor)
glVertex3d(0, 0, 0)
glVertex3d(0, 0, self.expanse)
glEnd()
glEnable(GL_LINE_STIPPLE)
glBegin(GL_LINES)
glVertex3d(0, 0, 0)
glVertex3d(0, 0, -self.expanse)
glEnd()
glDisable(GL_LINE_STIPPLE)
class GridObject(Object):
"""Shows a grid on the x-y plane."""
gridColor = 0.25, 0.25, 0.25
def __init__(self, resolution=1, expanse=20, skipZero=1):
Object.__init__(self)
self.resolution = resolution
self.expanse = expanse
self.skipZero = skipZero
def display(self):
glColor3d(*self.gridColor)
glBegin(GL_LINES)
i = -self.expanse
while i <= +self.expanse:
if i == 0 and self.skipZero:
i += self.resolution
continue
glVertex3d(i, -self.expanse, 0)
glVertex3d(i, +self.expanse, 0)
glVertex3d(-self.expanse, i, 0)
glVertex3d(+self.expanse, i, 0)
i += self.resolution
glEnd()
class ReplayingObject(Object):
"""An object which can be given a series of PostScript-like
commands, finished with the 'done' method, and then can replay
them back."""
def __init__(self):
self.paths = []
self.current = None
def setColor(self, triple):
self.endPath()
self.paths.append(tuple(triple))
def startPath(self):
if self.current is not None:
self.endPath()
self.current = []
def vertex(self, point):
assert self.current is not None
self.current.append(point)
def endPath(self):
if self.current is not None:
self.paths.append(self.current)
self.current = None
def closePath(self):
assert self.current is not None
self.current.append(self.current[0])
self.endPath()
def done(self):
self.endPath()
def display(self):
for path in self.paths:
if type(path) is types.TupleType:
glColor3d(*path)
else:
if len(path) == 1:
glBegin(GL_POINTS)
elif len(path) == 2:
glBegin(GL_LINES)
else:
glBegin(GL_LINE_STRIP)
for point in path:
glVertex3d(*point)
glEnd()
class Plotter(ReplayingObject):
"""A plotter which, given a function taking two arguments, will
play out its behavior drawing a surface."""
def __init__(self, func, startT=-10.0, deltaT=0.2, endT=+10.0):
ReplayingObject.__init__(self)
x = startT
while x <= endT:
self.startPath()
y = startT
while y <= endT:
try:
z = func(x, y)
except (ZeroDivisionError, OverflowError):
y += deltaT
continue
self.vertex((x, y, z))
y += deltaT
self.endPath()
x += deltaT
y = startT
while y <= endT:
self.startPath()
x = startT
while x <= endT:
try:
z = func(x, y)
except (ZeroDivisionError, OverflowError):
x += deltaT
continue
self.vertex((x, y, z))
x += deltaT
self.endPath()
y += deltaT
class StatusObject(Object):
"""A status object is one that can render itself as text on the
main screen."""
textColor = 1.0, 1.0, 1.0
def __init__(self, engine):
Object.__init__(self)
self.engine = engine
def displayText(self, x, y, style, message):
glMatrixMode(GL_PROJECTION)
glPushMatrix()
glLoadIdentity()
glOrtho(0, self.engine.width, 0, self.engine.height, -1, 1)
glColor3d(*self.textColor)
glRasterPos2i(x, y)
for char in message:
glutBitmapCharacter(style, ord(char))
glPopMatrix()
glMatrixMode(GL_MODELVIEW)
class FrameRateCounter(StatusObject):
"""A frame rate counter, which displays the current frame number
and the current frame rate."""
def __init__(self, engine, x=10, y=10, style=GLUT_BITMAP_HELVETICA_12):
StatusObject.__init__(self, engine)
self.x, self.y = x, y
self.style = style
def display(self):
self.displayText(self.x, self.y, self.style,
"frame %d rate %.2f" % \
(self.engine.frame, self.engine.frameRate))
# Transform ###################################################################
class Transform:
"""An encapsulation of a transformation."""
def __init__(self):
pass
def apply(self):
"""Apply the transformation."""
pass
class TranslateTransform(Transform):
"""A translation transformation."""
def __init__(self, vec):
self.vec = vec
def apply(self):
glTranslated(self.vec[0], self.vec[1], self.vec[2])
class RotateTransform(Transform):
"""A rotation transformation."""
def __init__(self, angle, ray=(0, 0, 1)):
self.angle = angle
self.ray = ray
def apply(self):
glRotated(self.angle*radiansToDegrees, \
self.ray[0], self.ray[1], self.ray[2])
class ScaleTransform(Transform):
"""A scale transformation."""
def __init__(self, vecOrScalar):
if type(vecOrScalar) is types.FloatType:
self.vec = (vecOrScalar,) * 3
else:
self.vec = vecOrScalar
def apply(self):
glScaled(self.vec[0], self.vec[1], self.vec[2])
# Group #######################################################################
class Group(Object):
"""A group is an object which holds a collection of other objects.
It displays, updates, and commits all its contained objects in
sequence."""
def __init__(self, objects=None):
Object.__init__(self)
if objects is None:
objects = []
self.objects = objects
def append(self, object):
self.objects.append(object)
def extend(self, objects):
self.objects.extend(objects)
def remove(self, object):
self.objects.remove(object)
def before(self):
pass
def after(self):
pass
def display(self):
self.before()
for object in self.objects:
object.display()
self.after()
def update(self):
for object in self.objects:
object.update()
def commit(self):
for object in self.objects:
object.commit()
class TransformGroup(Group):
"""A group that implements a series of transforms."""
def __init__(self, transforms, objects=None):
Group.__init__(self, objects)
self.transforms = transforms
def before(self):
glPushMatrix()
for transform in self.transforms:
transform.apply()
def after(self):
glPopMatrix()
class RotatingGroup(TransformGroup):
"""A group that slowly rotates."""
def __init__(self, angularSpeed, ray=(0, 0, 1), objects=None):
self.transform = RotateTransform(0.0, ray)
TransformGroup.__init__(self, [self.transform], objects)
self.angularSpeed = angularSpeed
def update(self):
self.transform.angle += self.angularSpeed
if self.transform.angle >= twoPi:
self.transform.angle -= twoPi
# Particle, System ############################################################
class Particle(Object):
"""A particle is an object with a position, and an optional
trail."""
trailLength = 0
particleColor = 1.0, 1.0, 1.0
trailColor = 0.5, 0.5, 0.5
def __init__(self, pos=(0, 0, 0)):
Object.__init__(self)
self.pos = pos
self.trail = []
def display(self):
if self.trailLength:
glColor3d(*self.trailColor)
glBegin(GL_LINE_STRIP)
for point in self.trail:
glVertex3d(*point)
glVertex3d(*self.pos)
glEnd()
glColor3d(*self.particleColor)
glBegin(GL_POINTS)
glVertex3d(*self.pos)
glEnd()
def update(self):
if self.trailLength:
self.trail.append(self.pos)
if len(self.trail) > self.trailLength:
self.trail = self.trail[-self.trailLength:]
def ok(self):
"""Does this particle need to be reclaimed? Only applicable for
particles in systems."""
return 1
class NewtonianParticle(Particle):
"""A Newtonian particle has a position and a velocity, and every
turn updates its position according to the velocity (which
actually acts as a change in position."""
def __init__(self, pos=(0, 0, 0), vel=(0, 0, 0)):
Particle.__init__(self, pos)
self.vel = vel
def update(self):
Particle.update(self)
self.pos = self.pos[0] + self.vel[0], \
self.pos[1] + self.vel[1], \
self.pos[2] + self.vel[2]
def impulse(self, deltavee):
"""Apply an impulse to the particle, with the change in velocity."""
self.vel = self.vel[0] + deltavee[0], \
self.vel[1] + deltavee[1], \
self.vel[2] + deltavee[2]
class System(Group):
"""A system is a group that maintains a list of objects with a
maximum number, all of the same type. Each object is excepted to
have an 'ok' method that returns whether or not it should be
reclaimed."""
def __init__(self, max):
Group.__init__(self)
self.max = max
def new(self):
"""Construct a new particle."""
raise NotImplementedError
def reset(self, index):
"""Reset the nth particle."""
self.objects[index] = self.new()
def update(self):
Group.update(self)
# Look for expired particles.
for i in range(len(self.objects)):
if not self.objects[i].ok():
self.reset(i)
# Inject new particles.
count = len(self.objects)
if count < self.max:
self.objects.append(None)
self.reset(count)
# Camera ######################################################################
class Camera:
"""A camera, which applies the viewing transformations in order to
get the desired view."""
defaultZoom = 0.2
def __init__(self, engine):
self.engine = engine
engine.camera = self
self.zoom = self.defaultZoom
def zoomIn(self, factor=sqrtTwo):
"""Zoom in."""
self.zoom *= factor
def zoomOut(self, factor=sqrtTwo):
"""Zoom out."""
self.zoom /= factor
def view(self):
"""Apply the relevant viewing transformations."""
pass
def refresh(self):
"""Refresh the position of the camera."""
pass
class OverheadCamera(Camera):
"""An overhead camera views the x-y plane."""
def __init__(self, engine, left=0, right=None, bottom=0, top=None):
Camera.__init__(self, engine)
if right is None:
right = engine.width
if top is None:
top = engine.height
self.left = left
self.right = right
self.bottom = bottom
self.top = top
def view(self):
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glOrtho(self.left, self.right, self.bottom, self.top, -1, 1)
glScalef(self.zoom, self.zoom, self.zoom)
class BasicCamera(Camera):
"""A basic camera views a center point from an eye position, with
a reference up vector that points to the top of the screen."""
def __init__(self, engine, eye, center=(0, 0, 0), up=(0, 0, 1)):
Camera.__init__(self, engine)
self.center = center
self.eye = eye
self.up = up
def view(self):
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluLookAt(self.center[0], self.center[1], self.center[2],
self.eye[0], self.eye[1], self.eye[2],
self.up[0], self.up[1], | |
Python 3.8.2 (tags/v3.8.2:7b3ab59, Feb 25 2020, 23:03:10) [MSC v.1916 64 bit (AMD64)] on win32
Type "help", "copyright", "credits" or "license()" for more information.
>>> import numpy as np
>>> import pandas as pd
>>> import random
>>> import matplotlib.pyplot as plt
>>> t_1= pd.read_excel("C:/Users/BrettData/Desktop/capst/fertiliser-use.xlsx", sheet_name=0, header=3, names=None, index_col=None, keep_default_na=False)
>>> a=t_1.sample(5)
>>> t1=t_1.iloc[43:59,1:5]
>>> t_2= pd.read_excel("C:/Users/BrettData/Desktop/capst/fertiliser-use.xlsx", sheet_name=1, header=3, names=None, index_col=None, keep_default_na=False)
>>> b=t_2.sample(5)
>>> t2=t_2.iloc[43:59,1:5]
>>> a1=np.where(t1-t2)
>>> a2=np.where(a-b)
>>> a1
(array([ 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2,
2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4,
4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6,
6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8,
8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10,
10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12,
12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14, 14, 14,
14, 15, 15, 15, 15, 15, 15, 15, 15], dtype=int64), array([0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4, 5,
6, 7, 0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3,
4, 5, 6, 7, 0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4, 5, 6, 7, 0, 1,
2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4, 5, 6, 7,
0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4, 5,
6, 7, 0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4, 5, 6, 7], dtype=int64))
>>> a2
(array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
9, 9, 9, 9, 9, 9], dtype=int64), array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1,
2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2,
3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3,
4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4,
5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5,
6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6,
7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7,
8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12, 13, 14, 15], dtype=int64))
>>> t_1= pd.read_excel("C:/Users/BrettData/Desktop/capst/fertiliser-use.xlsx", sheet_name=2, header=2, names=None, index_col=None, keep_default_na=False)
>>> t_1= pd.read_excel("C:/Users/BrettData/Desktop/capst/fertiliser-use.xlsx", sheet_name=0, header=3, names=None, index_col=None, keep_default_na=False)
>>> t_3= pd.read_excel("C:/Users/BrettData/Desktop/capst/fertiliser-use.xlsx", sheet_name=2, header=2, names=None, index_col=None, keep_default_na=False)
>>> c=t_3.sample(5)
>>> tt3=t_3.iloc[43:59,1:9]
>>> t3=tt3.drop(columns='Unnamed: 3')
>>> t_4= pd.read_excel("C:/Users/BrettData/Desktop/capst/fertiliser-use.xlsx", sheet_name=3, header=2, names=None, index_col=None, keep_default_na=False)
>>> d=t_4.sample(5)
>>> tt4=t_4.iloc[17:35,1:9]
>>> t4=tt4.drop(columns='Unnamed: 5')
>>> b1=np.where(t3-t4)
>>> b2=np.where(c-d)
>>> b1
(array([ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12,
12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14,
14, 14, 14, 14, 14, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
15, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 18, 18, 18,
18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 19, 19, 19, 19, 19, 19,
19, 19, 19, 19, 19, 19, 19, 19, 20, 20, 20, 20, 20, 20, 20, 20, 20,
20, 20, 20, 20, 20, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21,
21, 21, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 23,
23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 24, 24, 24, 24,
24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 25, 25, 25, 25, 25, 25, 25,
25, 25, 25, 25, 25, 25, 25, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
26, 26, 26, 26, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
27, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 29, 29,
29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 30, 30, 30, 30, 30,
30, 30, 30, 30, 30, 30, 30, 30, 30, 31, 31, 31, 31, 31, 31, 31, 31,
31, 31, 31, 31, 31, 31, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,
32, 32, 32], dtype=int64), array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 0, 1, 2,
3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 0, 1, 2, 3, 4, 5,
6, 7, 8, 9, 10, 11, 12, 13, 0, 1, 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12, 13, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 0,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 0, 1, 2, 3,
4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 0, 1, 2, 3, 4, 5, 6,
7, 8, 9, 10, 11, 12, 13, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, | |
<gh_stars>10-100
#!/usr/bin/python
# -*- coding: utf-8 -*-
from abra.config import DEFAULT_ALPHA, logger
from abra.mixin import InitRepr
from statsmodels.stats.api import DescrStatsW, CompareMeans
from statsmodels.distributions.empirical_distribution import ECDF
from statsmodels.stats.power import tt_ind_solve_power, zt_ind_solve_power
from statsmodels.stats.proportion import proportions_ztest, binom_test
from scipy.stats import norm
from scipy import optimize
from pandas import DataFrame
import numpy as np
CORRECTIONS = {'b': 'bonferroni', 's': 'sidak', 'bh': 'fdr_bh'}
def bonferroni(alpha_orig, p_values):
"""
Bonferrnoi correction.
en.wikipedia.org/wiki/Bonferroni_correction
Parameters
----------
alpha_orig : float
alpha value before correction
p_values: list[float]
p values resulting from all the tests
Returns
-------
alpha_corrected: float
new critical value (i.e. the corrected alpha)
"""
return alpha_orig / len(p_values)
def sidak(alpha_orig, p_values):
"""
Sidak correction.
en.wikipedia.org/wiki/%C5%A0id%C3%A1k_correction
Parameters
----------
alpha_orig : float
alpha value before correction
p_values: list[float]
p values resulting from all the tests
Returns
-------
alpha_corrected: float
new critical value (i.e. the corrected alpha)
"""
return 1. - (1. - alpha_orig) ** (1. / len(p_values))
def fdr_bh(fdr, p_values):
"""
Benjamini-Hochberg false-discovery rate adjustment procedure.
pdfs.semanticscholar.org/af6e/9cd1652b40e219b45402313ec6f4b5b3d96b.pdf
Parameters
----------
fdr : float
False Discovery Rate (q*), proportion of significant results that are
actually false positives
p_values: list[float]
p values resulting from all the tests
Returns
-------
alpha_corrected: float
new critical value (i.e. the corrected alpha)
"""
n_tests = len(p_values)
def p_i(i):
return i * fdr / n_tests
p_sorted = np.sort(np.asarray(p_values))
significant_idx = [i for i, val in enumerate(p_sorted, 1) if val <= p_i(i)]
rank = np.max(significant_idx) if significant_idx else 1
return p_i(rank)
def estimate_experiment_sample_sizes(
delta,
statistic='z',
alpha=.05,
power=.8,
*args, **kwargs
):
"""
Calculate the sample size required for each treatement in order to observe a
difference of `delta` between control and variation groups, for a given setting
of `alpha`, `power`.
Parameters
----------
delta : float
The absolute difference in means between control and variation groups
statistic : string
Either:
- 'z' or 't' if interpreting effect size as scaled difference of means
- 'rates_ratio' if interpreeting effect size as the ratio of means
alpha : float [0, 1)
The assumed Type I error of the test
power : float [0, 1)
The desired statistical power of the test
*args, **kwargs
Model-specific arguments
Returns
-------
sample_sizes : list[int]
The estiamated sample sizes for the control and variation treatments
Example 1: Continuous Variables
-------------------------------
# Estimate the sample size required to observe significant difference between
# two binomial distributions that differ by .01 in mean probability with
# Type I error = 0.05 (default) and Power = 0.8 (default)
prob_control = .49
std_control = (prob_control * (1 - prob_control))**.5 # Binomial std
prob_variation = std_variation = .50
delta = prob_variation - prob_control
print(
estimate_experiment_sample_sizes(
delta=delta,
statistic='z',
std_control=std_control,
std_variation=std_variation
)
)
# [39236, 39236]
Example 2 - Count Variables
---------------------------
# Replicate Example 1 from Gu et al, 2008
R = 4 # ratio under alternative hypothesis
control_rate = .0005
variation_rate = R * control_rate
delta = variation_rate - control_rate
print(
estimate_experiment_sample_sizes(
delta,
statistic='rates_ratio',
control_rate=control_rate,
alpha=.05,
power=.9,
control_exposure_time=2.,
sample_size_ratio=.5
)
)
# [8590, 4295]
"""
if statistic in ('t', 'z'):
# std_control and/or std_variation are in *args, or **kwargs
return cohens_d_sample_size(delta, alpha, power, statistic, *args, **kwargs)
elif statistic == 'rates_ratio':
return ratio_sample_size(alpha, power, delta, *args, **kwargs)
else:
raise ValueError("Unknown statistic")
def cohens_d(delta, std_control, std_variation=None):
std_variation = std_variation if std_variation else std_control
std_pooled = np.sqrt((std_control ** 2 + std_variation ** 2) / 2.)
return delta / std_pooled
def cohens_d_sample_size(
delta,
alpha,
power,
statistic,
std_control,
std_variation=None,
sample_size_ratio=1.
):
"""
Calculate sample size required to observe a significantly reliable difference
between groups a and b. Assumes Cohen's d definition of effect size and an
enrollment ratio of 1.0 between groups a and b by default.
Parameters
----------
std_control : float
An estiamte of the expected sample standard deviation of control
group
nobs_control : int
The number of control observations.
std_variation : float
An estimate of the expected sample standard deviation of variation
group. If not provided, we assume homogenous variances for the
two groups.
Returns
-------
sample_sizes : list[int]
The estiamated sample sizes for the control and variation treatments
Example
-------
# Get estimate of sample size required to observe a significant difference between
# two binomial distributions that differ by .01 in mean probability
prob_control = .49
std_control = (prob_control * (1 - prob_control))**.5 # Binomial std
prob_variation = std_variation = .50
delta = prob_variation - prob_control
print(
cohens_d_sample_size(
delta=delta,
alpha=.05,
power=.8,
statistic='z',
std_control=std_control,
std_variation=std_variation
)
)
# [39236, 39236]
References
----------
<NAME>. (1988). Statistical power analysis for the behavioral sciences
(2nd ed.). Hillsdale, NJ: Lawrence Earlbaum Associates.
"""
SUPPORTED_STATISTICS = ('t', 'z')
effect_size = cohens_d(delta, std_control, std_variation)
if statistic in SUPPORTED_STATISTICS:
power_func = "{}t_ind_solve_power".format(statistic)
N1 = int(
eval(power_func)(
effect_size,
alpha=alpha,
power=power,
ratio=sample_size_ratio
)
)
N2 = int(N1 * sample_size_ratio)
return [N1, N2]
else:
raise ValueError("Unknown statistic, must be either {!r}".format(SUPPORTED_STATISTICS))
def ratio_sample_size(
alpha,
power,
delta,
control_rate,
control_exposure_time=1.,
null_ratio=1.,
sample_size_ratio=1.,
exposure_time_ratio=1.
):
"""
Calculate sample size required to observe a significantly reliable ratio of
rates between variation and control groups. Follows power calculation outlined
in Gu et al, 2008.
Parameters
----------
control_rate : float
The poisson rate of the control group
control_exposure_time : float
The number of time units of the control exposure. Default is 1.0
null_ratio : float
The ratio of variation to control rates under the null hypothesis.
Default is 1.
sample_size_ratio : float
The ratio of sample sizes of the variation to the control groups. Default is
1, thus assuming equal sample sizes.
exposure_time_ratio : float
The ratio of the variation exposure time to the control. Default is 1.0,
thus assuming equal exposure times
Returns
-------
N1, N2 : tuple
Sample sizes for each group
Example
-------
# Replicate Example 1 from Gu et al, 2008
R = 4 # ratio under alternative hypothesis
control_rate = .0005
variation_rate = R * control_rate
delta = variation_rate - control_rate
print(
ratio_sample_size(
alpha=.05,
power=.9,
delta=delta,
control_rate=control_rate,
control_exposure_time=2.,
sample_size_ratio=.5
)
)
# returns [8590, 4295], which have been validated to be more accurate than
# the result reported in Gu et al, due to rounding precision. For details
# see "Example 2 – Validation using Gu (2008)" section of
# http://ncss-wpengine.netdna-ssl.com/wp-content/themes/ncss/pdf/Procedures/PASS/Tests_for_the_Ratio_of_Two_Poisson_Rates.pdf
References
----------
<NAME>., <NAME>., <NAME>., and <NAME>. 2008. 'Testing the Ratio of
Two Poisson Rates.' Biometrical Journal, 50, 2, 283-298.
<NAME>. 1984. 'An Improved Approximate Two-Sample Poisson Test.'
Applied Statistics, 33, 2, 224-226.
"""
# convert absolute difference to ratio
alternative_ratio = float(control_rate + delta) / control_rate
variation_exposure_time = exposure_time_ratio * control_exposure_time
z_alpha = norm.ppf(1 - alpha)
z_power = norm.ppf(power)
def objective(x):
ratio_proposed = (x[1] * variation_exposure_time) / (x[0] * control_exposure_time)
loss = np.abs(null_ratio - (alternative_ratio / ratio_proposed))
return loss
def con1(x):
"""General sample size ratio constraint"""
return (float(x[1]) / x[0]) - sample_size_ratio
def con2(x):
"""Control sample size constraint, outlined in Gu et al, 2008, Equation 10"""
N1, N2 = x
d = (control_exposure_time * N1) / (variation_exposure_time * N2)
A = 2 * (1. - np.sqrt(null_ratio / alternative_ratio))
C = np.sqrt((null_ratio + d) / alternative_ratio)
D = np.sqrt((alternative_ratio + d) / alternative_ratio)
return x[0] - (((z_alpha * C + z_power * D) / A) ** 2. - (3. / 8)) / (control_exposure_time * control_rate)
constraint1 = {'type': 'eq', 'fun': con1}
constraint2 = {'type': 'eq', 'fun': con2}
constraints = [constraint1, constraint2]
results = optimize.minimize(
objective,
(10, 10),
bounds=((1, None), (1, None)),
constraints=constraints,
method='SLSQP',
tol=1e-10
)
return [int(np.ceil(n)) for n in results.x]
class MultipleComparisonCorrection(InitRepr):
"""
Perform multiple comparison adjustment of alpha based on a sequence of
p_values that result from two or more hypothesis tests inference procedures.
param p_values : list[float]
A list of p_values resulting from two or more hypothesis tests.
method : str
One of the following correction methods:
'bonferroni', 'b' : one-step Bonferroni correction
'sidak', 's' : one-step Sidak correction
'fdr_bh', 'bh; : Benjamini/Hochberg (non-negative)
alpha : float in (0, 1)
the desired probability of Type I error
reject_nul: list[bool]
For each probablity, whether or not to reject the null hypothsis given
the updated values for alpha.
"""
__ATTRS__ = ['ntests', 'method', 'alpha_orig', 'alpha_corrected']
def __init__(self, p_values, | |
= "", sPassword = "", cbSize = 0, eFileType = 0):
tdTestGuestCtrlBase.__init__(self);
self.oCreds = tdCtxCreds(sUser, sPassword, sDomain = "");
self.sFile = sFile;
self.cbSize = cbSize;
self.eFileType = eFileType;
class tdTestFileIO(tdTestGuestCtrlBase):
"""
Test for the IGuestFile object.
"""
def __init__(self, sFile = "", sUser = "", sPassword = ""):
tdTestGuestCtrlBase.__init__(self);
self.oCreds = tdCtxCreds(sUser, sPassword, sDomain = "");
self.sFile = sFile;
class tdTestFileQuerySize(tdTestGuestCtrlBase):
"""
Test for the file size query API call (fileQuerySize).
"""
def __init__(self, sFile = "", sUser = "", sPassword = ""):
tdTestGuestCtrlBase.__init__(self);
self.oCreds = tdCtxCreds(sUser, sPassword, sDomain = "");
self.sFile = sFile;
class tdTestFileReadWrite(tdTestGuestCtrlBase):
"""
Tests reading from guest files.
"""
def __init__(self, sFile = "", sUser = "", sPassword = "",
sOpenMode = "r", sDisposition = "",
sSharingMode = "",
lCreationMode = 0, cbOffset = 0, cbToReadWrite = 0,
aBuf = None):
tdTestGuestCtrlBase.__init__(self);
self.oCreds = tdCtxCreds(sUser, sPassword, sDomain = "");
self.sFile = sFile;
self.sOpenMode = sOpenMode;
self.sDisposition = sDisposition;
self.sSharingMode = sSharingMode;
self.lCreationMode = lCreationMode;
self.cbOffset = cbOffset;
self.cbToReadWrite = cbToReadWrite;
self.aBuf = aBuf;
def getOpenAction(self):
""" Converts string disposition to open action enum. """
if self.sDisposition == 'oe': return vboxcon.FileOpenAction_OpenExisting;
if self.sDisposition == 'oc': return vboxcon.FileOpenAction_OpenOrCreate;
if self.sDisposition == 'ce': return vboxcon.FileOpenAction_CreateNew;
if self.sDisposition == 'ca': return vboxcon.FileOpenAction_CreateOrReplace;
if self.sDisposition == 'ot': return vboxcon.FileOpenAction_OpenExistingTruncated;
if self.sDisposition == 'oa': return vboxcon.FileOpenAction_AppendOrCreate;
raise base.GenError(self.sDisposition);
def getAccessMode(self):
""" Converts open mode to access mode enum. """
if self.sOpenMode == 'r': return vboxcon.FileOpenMode_ReadOnly;
if self.sOpenMode == 'w': return vboxcon.FileOpenMode_WriteOnly;
if self.sOpenMode == 'w+': return vboxcon.FileOpenMode_ReadWrite;
if self.sOpenMode == 'r+': return vboxcon.FileOpenMode_ReadWrite;
raise base.GenError(self.sOpenMode);
def getSharingMode(self):
""" Converts the sharing mode. """
return vboxcon.FileSharingMode_All;
class tdTestSession(tdTestGuestCtrlBase):
"""
Test the guest session handling.
"""
def __init__(self, sUser = "", sPassword = "", sDomain = "", \
sSessionName = ""):
tdTestGuestCtrlBase.__init__(self);
self.sSessionName = sSessionName;
self.oCreds = tdCtxCreds(sUser, sPassword, sDomain);
def getSessionCount(self, oVBoxMgr):
"""
Helper for returning the number of currently
opened guest sessions of a VM.
"""
if self.oTest.oGuest is None:
return 0;
aoSession = oVBoxMgr.getArray(self.oTest.oGuest, 'sessions')
return len(aoSession);
class tdTestSessionEx(tdTestGuestCtrlBase):
"""
Test the guest session.
"""
def __init__(self, aoSteps = None, enmUser = None):
tdTestGuestCtrlBase.__init__(self);
assert enmUser is None; # For later.
self.enmUser = enmUser;
self.aoSteps = aoSteps if aoSteps is not None else [];
def execute(self, oTstDrv, oVmSession, oTxsSession, oTestVm, sMsgPrefix):
"""
Executes the test.
"""
#
# Create a session.
#
assert self.enmUser is None; # For later.
self.oCreds = tdCtxCreds(oTestVm = oTestVm);
self.setEnvironment(oVmSession, oTxsSession, oTestVm);
reporter.log2('%s: %s steps' % (sMsgPrefix, len(self.aoSteps),));
fRc, oCurSession = self.createSession(sMsgPrefix);
if fRc is True:
#
# Execute the tests.
#
try:
fRc = self.executeSteps(oTstDrv, oCurSession, sMsgPrefix);
except:
reporter.errorXcpt('%s: Unexpected exception executing test steps' % (sMsgPrefix,));
fRc = False;
fRc2 = self.closeSession();
if fRc2 is False:
reporter.error('%s: Session could not be closed' % (sMsgPrefix,));
fRc = False;
else:
reporter.error('%s: Session creation failed' % (sMsgPrefix,));
fRc = False;
return fRc;
def executeSteps(self, oTstDrv, oGstCtrlSession, sMsgPrefix):
"""
Executes just the steps.
Returns True on success, False on test failure.
"""
fRc = True;
for (i, oStep) in enumerate(self.aoSteps):
fRc2 = oStep.execute(oTstDrv, oGstCtrlSession, sMsgPrefix + ', step #%d' % i);
if fRc2 is True:
pass;
elif fRc2 is None:
reporter.log('skipping remaining %d steps' % (len(self.aoSteps) - i - 1,));
break;
else:
fRc = False;
return fRc;
@staticmethod
def executeListTestSessions(aoTests, oTstDrv, oVmSession, oTxsSession, oTestVm, sMsgPrefix):
"""
Works thru a list of tdTestSessionEx object.
"""
fRc = True;
for (i, oCurTest) in enumerate(aoTests):
try:
fRc2 = oCurTest.execute(oTstDrv, oVmSession, oTxsSession, oTestVm, '%s, test %#d' % (sMsgPrefix, i,));
if fRc2 is not True:
fRc = False;
except:
reporter.errorXcpt('Unexpected exception executing test #%d' % (i,));
fRc = False;
return (fRc, oTxsSession);
class tdSessionStepBase(object):
"""
Base class for the guest control session test steps.
"""
def execute(self, oTstDrv, oGstCtrlSession, sMsgPrefix):
"""
Executes the test step.
Returns True on success.
Returns False on failure (must be reported as error).
Returns None if to skip the remaining steps.
"""
reporter.error('%s: Missing execute implementation: %s' % (sMsgPrefix, self,));
_ = oTstDrv;
_ = oGstCtrlSession;
return False;
class tdStepRequireMinimumApiVer(tdSessionStepBase):
"""
Special test step which will cause executeSteps to skip the remaining step
if the VBox API is too old:
"""
def __init__(self, fpMinApiVer):
self.fpMinApiVer = fpMinApiVer;
def execute(self, oTstDrv, oGstCtrlSession, sMsgPrefix):
""" Returns None if API version is too old, otherwise True. """
if oTstDrv.fpApiVer >= self.fpMinApiVer:
return True;
_ = oGstCtrlSession;
_ = sMsgPrefix;
return None; # Special return value. Don't use elsewhere.
#
# Scheduling Environment Changes with the Guest Control Session.
#
class tdStepSessionSetEnv(tdSessionStepBase):
"""
Guest session environment: schedule putenv
"""
def __init__(self, sVar, sValue, hrcExpected = 0):
self.sVar = sVar;
self.sValue = sValue;
self.hrcExpected = hrcExpected;
def execute(self, oTstDrv, oGstCtrlSession, sMsgPrefix):
"""
Executes the step.
Returns True on success, False on test failure.
"""
reporter.log2('tdStepSessionSetEnv: sVar=%s sValue=%s hrcExpected=%#x' % (self.sVar, self.sValue, self.hrcExpected,));
try:
if oTstDrv.fpApiVer >= 5.0:
oGstCtrlSession.environmentScheduleSet(self.sVar, self.sValue);
else:
oGstCtrlSession.environmentSet(self.sVar, self.sValue);
except vbox.ComException, oXcpt:
# Is this an expected failure?
if vbox.ComError.equal(oXcpt, self.hrcExpected):
return True;
reporter.errorXcpt('%s: Expected hrc=%#x (%s) got %#x (%s) instead (setenv %s=%s)'
% (sMsgPrefix, self.hrcExpected, vbox.ComError.toString(self.hrcExpected),
vbox.ComError.getXcptResult(oXcpt),
vbox.ComError.toString(vbox.ComError.getXcptResult(oXcpt)),
self.sVar, self.sValue,));
return False;
except:
reporter.errorXcpt('%s: Unexpected exception in tdStepSessionSetEnv::execute (%s=%s)'
% (sMsgPrefix, self.sVar, self.sValue,));
return False;
# Should we succeed?
if self.hrcExpected != 0:
reporter.error('%s: Expected hrcExpected=%#x, got S_OK (putenv %s=%s)'
% (sMsgPrefix, self.hrcExpected, self.sVar, self.sValue,));
return False;
return True;
class tdStepSessionUnsetEnv(tdSessionStepBase):
"""
Guest session environment: schedule unset.
"""
def __init__(self, sVar, hrcExpected = 0):
self.sVar = sVar;
self.hrcExpected = hrcExpected;
def execute(self, oTstDrv, oGstCtrlSession, sMsgPrefix):
"""
Executes the step.
Returns True on success, False on test failure.
"""
reporter.log2('tdStepSessionUnsetEnv: sVar=%s hrcExpected=%#x' % (self.sVar, self.hrcExpected,));
try:
if oTstDrv.fpApiVer >= 5.0:
oGstCtrlSession.environmentScheduleUnset(self.sVar);
else:
oGstCtrlSession.environmentUnset(self.sVar);
except vbox.ComException, oXcpt:
# Is this an expected failure?
if vbox.ComError.equal(oXcpt, self.hrcExpected):
return True;
reporter.errorXcpt('%s: Expected hrc=%#x (%s) got %#x (%s) instead (unsetenv %s)'
% (sMsgPrefix, self.hrcExpected, vbox.ComError.toString(self.hrcExpected),
vbox.ComError.getXcptResult(oXcpt),
vbox.ComError.toString(vbox.ComError.getXcptResult(oXcpt)),
self.sVar,));
return False;
except:
reporter.errorXcpt('%s: Unexpected exception in tdStepSessionUnsetEnv::execute (%s)'
% (sMsgPrefix, self.sVar,));
return False;
# Should we succeed?
if self.hrcExpected != 0:
reporter.error('%s: Expected hrcExpected=%#x, got S_OK (unsetenv %s)'
% (sMsgPrefix, self.hrcExpected, self.sVar,));
return False;
return True;
class tdStepSessionBulkEnv(tdSessionStepBase):
"""
Guest session environment: Bulk environment changes.
"""
def __init__(self, asEnv = None, hrcExpected = 0):
self.asEnv = asEnv if asEnv is not None else [];
self.hrcExpected = hrcExpected;
def execute(self, oTstDrv, oGstCtrlSession, sMsgPrefix):
"""
Executes the step.
Returns True on success, False on test failure.
"""
reporter.log2('tdStepSessionBulkEnv: asEnv=%s hrcExpected=%#x' % (self.asEnv, self.hrcExpected,));
try:
if oTstDrv.fpApiVer >= 5.0:
oTstDrv.oVBoxMgr.setArray(oGstCtrlSession, 'environmentChanges', self.asEnv);
else:
oTstDrv.oVBoxMgr.setArray(oGstCtrlSession, 'environment', self.asEnv);
except vbox.ComException, oXcpt:
# Is this an expected failure?
if vbox.ComError.equal(oXcpt, self.hrcExpected):
return True;
reporter.errorXcpt('%s: Expected hrc=%#x (%s) got %#x (%s) instead (asEnv=%s)'
% (sMsgPrefix, self.hrcExpected, vbox.ComError.toString(self.hrcExpected),
vbox.ComError.getXcptResult(oXcpt),
vbox.ComError.toString(vbox.ComError.getXcptResult(oXcpt)),
self.asEnv,));
return False;
except:
reporter.errorXcpt('%s: Unexpected exception writing the environmentChanges property (asEnv=%s).'
% (sMsgPrefix, self.asEnv));
return False;
return True;
class tdStepSessionClearEnv(tdStepSessionBulkEnv):
"""
Guest session environment: clears the scheduled environment changes.
"""
def __init__(self):
tdStepSessionBulkEnv.__init__(self);
class tdStepSessionCheckEnv(tdSessionStepBase):
"""
Check the currently scheduled environment changes of a guest control session.
"""
def __init__(self, asEnv = None):
self.asEnv = asEnv if asEnv is not None else [];
def execute(self, oTstDrv, oGstCtrlSession, sMsgPrefix):
"""
Executes the step.
Returns True on success, False on test failure.
"""
reporter.log2('tdStepSessionCheckEnv: asEnv=%s' % (self.asEnv,));
#
# Get the environment change list.
#
try:
if oTstDrv.fpApiVer >= 5.0:
asCurEnv = oTstDrv.oVBoxMgr.getArray(oGstCtrlSession, 'environmentChanges');
else:
asCurEnv = oTstDrv.oVBoxMgr.getArray(oGstCtrlSession, 'environment');
except:
reporter.errorXcpt('%s: Unexpected exception reading the environmentChanges property.' % (sMsgPrefix,));
return False;
#
# Compare it with the expected one by trying to remove each expected value
# and the list anything unexpected.
#
fRc = True;
asCopy = list(asCurEnv); # just in case asCurEnv is immutable
for sExpected in self.asEnv:
try:
asCopy.remove(sExpected);
except:
reporter.error('%s: Expected "%s" to be in the resulting environment' % (sMsgPrefix, sExpected,));
fRc = False;
for sUnexpected in asCopy:
reporter.error('%s: Unexpected "%s" in the resulting environment' % (sMsgPrefix, sUnexpected,));
fRc = False;
if fRc is not True:
reporter.log2('%s: Current environment: %s' % (sMsgPrefix, asCurEnv));
return fRc;
#
# File system object statistics (i.e. stat()).
#
class tdStepStat(tdSessionStepBase):
"""
Stats a file | |
<gh_stars>1-10
"""
CanvasSync by <NAME>
February 2017
---------------------------------------------
user_prompter.py, module
A collection of functions used to prompt the user for settings.
"""
# TODO
# - Comments
# - Make a Y/N function to reduce code redundancy
# Future
from __future__ import print_function
# Inbuilt modules
import glob
import os
# Check for UNIX or Windows platform
try:
import readline
unix = True
except ImportError:
unix = False
# If python 2.7, use raw_input(), otherwise use input()
from six.moves import input
# CanvasSync module import
from CanvasSync.utilities import helpers
from CanvasSync.utilities.ANSI import ANSI
def show_main_screen(settings_file_exists):
"""
Prompt the user for initial choice of action. Does not allow Synchronization before settings file has been set
"""
choice = -1
to_do = "quit"
while choice not in (0, 1, 2, 3, 4):
helpers.clear_console()
# Load version string
import CanvasSync
version = CanvasSync.__version__
title = u"CanvasSync, "
pretty_string = u"-" * (len(title) + len(version))
print(ANSI.format(u"%s\n%s%s\n%s" % (pretty_string, title, version, pretty_string), u"file"))
print(ANSI.format(u"Automatically synchronize modules, assignments & files located on a Canvas web server.",
u"announcer"))
print(ANSI.format(u"\nWhat would you like to do?", u"underline"))
print(u"\n\t1) " + ANSI.format(u"Synchronize my Canvas", u"blue"))
print(u"\t2) " + ANSI.format(u"Set new settings", u"white"))
print(u"\t3) " + ANSI.format(u"Show current settings", u"white"))
print(u"\t4) " + ANSI.format(u"Show help", u"white"))
print(u"\n\t0) " + ANSI.format(u"Quit", u"yellow"))
try:
choice = int(input(u"\nChoose number: "))
if choice < 0 or choice > 4:
continue
except ValueError:
continue
if choice == 1 and not settings_file_exists:
to_do = u"set_settings"
else:
to_do = [u"quit", u"sync", u"set_settings", u"show_settings", u"show_help"][choice]
return to_do
def ask_for_sync_path():
"""
Prompt the user for a path to a folder that will be used to synchronize the Canvas page into
The path should point into a directory along with a sub-folder name of a folder not already existing.
This folder wll be created using the os module.
"""
# Enable auto-completion of path and cursor movement using the readline and glob modules
def path_completer(text, state):
if u"~" in text:
text = text.replace(u"~", os.path.expanduser(u"~"))
paths = glob.glob(u"%s*" % text)
paths.append(False)
return os.path.abspath(paths[state]) + u'/'
if unix:
readline.set_completer_delims(u' \t\n;')
readline.parse_and_bind(u"tab: complete")
readline.set_completer(path_completer)
found = False
# Keep asking until a valid path has been entered by the user
while not found:
sync_path = input(u"\nEnter a relative or absolute path to sync to (~/Desktop/Canvas etc.):\n$ ")
# Expand tilde if present in the sync_path
if u"~" in sync_path:
sync_path = sync_path.replace(u"~", os.path.expanduser(u"~"))
sync_path = os.path.abspath(sync_path)
if not os.path.exists(os.path.split(sync_path)[0]):
print(u"\n[ERROR] Base path '%s' does not exist." % os.path.split(sync_path)[0])
else:
found = True
if unix:
# Disable path auto-completer
readline.parse_and_bind(u'set disable-completion on')
return sync_path
def ask_for_domain():
"""
Prompt the user for a Canvas domain.
To ensure that the API calls are made on an encrypted SSL connection the initial 'https://' is pre-specified.
To ensure that the user input is 1) a valid URL and 2) a URL representing a Canvas web server request is used
to fetch a resources on the Canvas page. If the GET requests fails the URL was not valid. If the server returns
a 404 unauthenticated error the domain is very likely to be a Canvas server, if anything else is returned the
URL points to a correct URL that is not a Canvas server.
"""
found = False
# Keep asking until a valid domain has been entered by the user
while not found:
domain = u"https://" + input(u"\nEnter the Canvas domain of your institution:\n$ https://")
found = helpers.validate_domain(domain)
return domain
def ask_for_token(domain):
"""
Prompt the user for an authentication token.
The token must be generated on the Canvas web page when login in under the "Settings" menu.
To ensure that the entered token is valid, a request GET call is made on a resource that requires authentication
on the server. If the server responds with the resource the token is valid.
"""
found = False
# Keep asking until a valid authentication token has been entered by the user
while not found:
token = input(u"\nEnter authentication token (see 'Setup' section on https://github.com/perslev/CanvasSync for details):\n$ ")
found = helpers.validate_token(domain, token)
return token
def ask_for_courses(settings, api):
courses = api.get_courses()
for name in courses[:]:
if not 'course_code' in name:
courses.remove(name)
if settings.use_nicknames:
courses = [name[u"name"] for name in courses]
else:
courses = [name[u"course_code"].split(";")[-1] for name in courses]
choices = [True]*len(courses)
choice = -1
while choice != 0:
settings.print_settings(clear=True)
print(ANSI.format(u"\n\nPlease choose which courses you would like CanvasSync to sync for you:\n", u"white"))
print(ANSI.format(u"Sync this item\tNumber\tCourse Title", u"blue"))
for index, course in enumerate(courses):
print(u"%s\t\t[%s]\t%s" % (ANSI.format(str(choices[index]), u"green" if choices[index] else u"red"),
index+1, courses[index]))
print(u"\n\n\t\t[%s]\t%s" % (0, ANSI.format(u"Confirm selection (at least one course required)", "blue")))
print(u"\t\t[%s]\t%s" % (-1, ANSI.format(u"Select all", u"green")))
print(u"\t\t[%s]\t%s" % (-2, ANSI.format(u"Deselect all", u"red")))
try:
choice = int(input(u"\nChoose number: "))
if choice < -2 or choice > len(courses):
continue
except ValueError:
continue
if choice == 0:
if sum(choices) == 0:
choice = -1
continue
else:
break
elif choice == -1:
choices = [True] * len(courses)
elif choice == -2:
choices = [False] * len(courses)
else:
choices[choice-1] = choices[choice-1] is not True
print(choices)
return [x for index, x in enumerate(courses) if choices[index]]
def ask_for_advanced_settings(settings):
choice = -1
while choice not in (1, 2):
settings.print_settings(clear=True)
print(ANSI.format(u"\n\nAll mandatory settings are set. Do you wish see advanced settings?",
u"announcer"))
print(ANSI.format(u"\n[1]\tShow advanced settings (recommended)", u"bold"))
print(ANSI.format(u"[2]\tUse default settings", u"bold"))
try:
choice = int(input(u"\nChoose number: "))
except ValueError:
continue
if choice == 1:
return True
elif choice == 2:
return False
else:
continue
def ask_for_module_settings(module_settings, settings):
choice = -1
while choice != 0:
settings.print_advanced_settings(clear=True)
print(ANSI.format(u"\n\nModule settings", u"announcer"))
print(ANSI.format(u"In Canvas, 'Modules' may contain various items such as files, HTML pages of\n"
u"exercises or reading material as well as links to external web-pages.\n\n"
u"Below you may specify, if you would like CanvasSync to avoid syncing some of these items.\n"
u"OBS: If you chose 'False' to all items, Modules will be skipped all together.", u"white"))
print(ANSI.format(u"\nSync this item\tNumber\t\tItem", u"blue"))
list_of_keys = list(module_settings.keys())
for index, item in enumerate(list_of_keys):
boolean = module_settings[item]
print(u"%s\t\t[%s]\t\t%s" % (ANSI.format(str(boolean), u"green"
if boolean else u"red"),
index+1, item))
print(u"\n\t\t[%s]\t\t%s" % (0, ANSI.format(u"Confirm selection", u"blue")))
try:
choice = int(input(u"\nChoose number: "))
if choice < 0 or choice > len(module_settings):
continue
except ValueError:
continue
if choice == 0:
break
else:
module_settings[list_of_keys[choice-1]] = module_settings[list_of_keys[choice-1]] is not True
return module_settings
def ask_for_assignment_sync(settings):
choice = -1
while choice not in (1, 2):
settings.print_advanced_settings(clear=True)
print(ANSI.format(u"\n\nAssignments settings", u"announcer"))
print(ANSI.format(u"Would you like CanvasSync to synchronize assignments?\n\n"
u"The assignment description will be downloaded as a HTML to be viewed offline\n"
u"and files hosted on the Canvas server that are described in the assignment\n"
u"description section will be downloaded to the same folder.\n", u"white"))
print(ANSI.format(u"1) Sync assignments (default)", u"bold"))
print(ANSI.format(u"2) Do not sync assignments", u"bold"))
try:
choice = int(input(u"\nChoose number: "))
except ValueError:
continue
if choice == 1:
return True
elif choice == 2:
return False
else:
continue
def ask_for_download_linked(settings):
choice = -1
while choice not in (1, 2):
settings.print_advanced_settings(clear=True)
print(ANSI.format(u"\n\nAssignments settings", u"announcer"))
print(ANSI.format(u"You have chosen to synchronise assignments. URLs detected in the\n"
u"description field that point to files on Canvas will be downloaded\n"
u"to the assignment folder.\n\n"
u"CanvasSync may also attempt to download linked files that are NOT\n"
u"hosted on the Canvas server itself. CanvasSync is looking for URLs that\n"
u"end in a filename to avoid downloading other linked material such as\n"
u"web-sites. However, be aware that errors could occur.\n"
u"\nDo you wish to enable this feature?\n", u"white"))
print(ANSI.format(u"1) Enable linked file downloading (default)", u"bold"))
print(ANSI.format(u"2) Disable linked file downloading", u"bold"))
try:
choice = int(input(u"\nChoose number: "))
except ValueError:
continue
if choice == 1:
return True
elif choice == 2:
return False
else:
continue
def ask_for_avoid_duplicates(settings):
choice = -1
while choice not in (1, 2):
settings.print_advanced_settings(clear=True)
print(ANSI.format(u"\n\nVarious files settings", u"announcer"))
print(ANSI.format(u"In addition to synchronizing modules and assignments,\n"
u"CanvasSync will sync files located under the 'Files'\n"
u"section in Canvas into a 'Various Files' folder.\n"
u"Often some of the files stored under 'Files' is mentioned in\n"
u"modules and assignments and may thus already exist in another\n"
u"folder after running CanvasSync.\n\n"
u"Do you want CanvasSync to avoid duplicates by only downloading\n"
u"files into | |
<reponame>shJimmyw/MacAndCheese
import discord
import DiscordCredentials
import asyncio
import requests
import json
import string
import youtube_dl
import time
import random
import logging
from player import vidPlayer
from discord.ext.commands import Bot
logger = logging.getLogger('discord')
logger.setLevel(logging.DEBUG)
handler = logging.FileHandler(filename='discord.log', encoding='utf-8', mode='w')
handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s:%(name)s: %(message)s'))
logger.addHandler(handler)
client = discord.Client()
MacAndCheese = Bot(command_prefix="!")
vid = vidPlayer(MacAndCheese)
#Bot event handling
"""
@MacAndCheese.event
@asyncio.coroutine
def on_voice_state_update(before, after):
Detects when a user's voice state changes.
If the user is in a different voice channel than before, a message is sent telling other users which channel the user has joined.
If the user is no longer in any voice channel, a message is sent telling other users which channel the user has left
before_channel = before.voice.voice_channel
after_channel = after.voice.voice_channel
if after_channel != None and before_channel != after_channel:
yield from MacAndCheese.send_message(after.server, after.mention + " has joined the voice channel: " + after_channel.name)
elif after_channel == None:
yield from MacAndCheese.send_message(before.server, before.mention + " has left the voice channel: " + before_channel.name)
"""
@MacAndCheese.event
@asyncio.coroutine
def on_member_join(member):
"""Upon a neww user joining the server, a message is sent to the other users via the general chat that a new
user has joined the server.
"""
yield from MacAndCheese.send_message(member.server, "Welcome " + member.mention + " to " + member.server)
#Bot commands
@MacAndCheese.command()
@asyncio.coroutine
def commands(*args):
"""!commands
This command asks the bot to display a list of commands and their expected functionality.
"""
yield from MacAndCheese.say(
"!commands -- Displays all commands\n"
+ "!toptenbans -- Displays current 10 most banned champions\n"
+ "!champbuild -- Displays most winning final build for the given champion\n"
+ "!champstart -- Displays most winning starting items for the given champion\n"
+ "!matchup -- Displays the win percentage and KDA for a given champion and enemy champion\n"
+ "!add -- Takes a youtube url and adds it to the bot playlist\n"
+ "!play -- Plays all videos currently in the playlist\n"
+ "!youtube -- Plays a youtube video given a youtube url\n"
+ "!nowplaying -- Displays title of currently playing youtube video\n"
+ "!getVolume -- Get current volume of video\n"
+ "!volume -- Sets the volume of the player to a percentage\n"
+ "!playPause -- Pauses or Resumes the player\n"
+ "!skip -- Skips to next video\n"
+ "!disconnect -- Disconnects this bot from the voice channel\n"
+ "!banhammer -- Bans a member from your server for a minute\n"
+ "!clear -- Searches past messages and deletes those that contain the given keyword\n"
+ "!dice -- Rolls a dice between 1 and 6 or a given int greater than 1")
@MacAndCheese.command()
@asyncio.coroutine
def dice(range: int=None):
"""!dice
Args:
range (int): Upper Limit in range of numbers
If the user provides zero arguments, the bot randomly delivers an integer between 1 and 6 inclusive.
If the user provides one argument, N, that is greater than 1, it delivers an integer between 1 and N inclusive.
If the user provides either 0 or 1 as an argument, it prompts the user to provide a valid integer greater than 1.
"""
if range == None:
output = random.randint(1,6)
elif range > 1:
output = random.randint(1,range)
else:
yield from MacAndCheese.say("Pick an integer greater than 1")
return
yield from MacAndCheese.say("Rolling a dice...\nDice landed on: " + str(output))
@MacAndCheese.command(pass_context=True)
@asyncio.coroutine
def banhammer(context, user: str=None):
"""!banhammer
Args:
user (str): Name of user intended to be banned
If the user does not provide an argument, the bot prompts the user for one.
When provided an argument the bot checks if the user has ban permissions and if not will
ask prompt the user to contact the server administrator. If the user does have permission
it will check to see if the argument provided is the name of a valid user, if so it will
then proceed to ban the given user for a minute before unbanning them.
"""
if user == None:
yield from MacAndCheese.say("Who do you want me to ban?")
return
server = context.message.server
channel = context.message.channel
if channel.permissions_for(context.message.author).ban_members == False:
yield from MacAndCheese.say("You do not have permission to ban " + user + ". Please contact your administrator for details.")
return
member = server.get_member_named(user)
if member == None:
yield from MacAndCheese.say("No member named " + user)
return
try:
yield from MacAndCheese.ban(member, 0)
asyncio.sleep(60)
yield from MacAndCheese.unban(server, member)
except Exception:
yield from MacAndCheese.say("I do not have permission to ban users")
@MacAndCheese.command(pass_context=True)
@asyncio.coroutine
def clear(context, keyword: str=None, numMessages: int=None):
"""!clear
Args:
keyword (str): Term in message that the user wants to delete
numMessages (int): The number of messages to search from, defaults to 100 otherwise.
Deletes all messages with the given keyword in it from the last 100 messages.
"""
channel = context.message.channel
if channel.permissions_for(context.message.author).manage_messages == False:
yield from MacAndCheese.say("You do not have permission to delete messages. Please contact your administrator for details.")
return
if numMessages is None:
numMessages = 100
listOfDeleted = yield from client.purge_from(channel, limit=numMessages)
try:
yield from MacAndCheese.say("Deleted " + str(len(listOfDeleted)))
except Exception:
yield from MacAndCheese.say("I do not have permission to delete messages at this time")
#League of Legends related commands
@MacAndCheese.command()
@asyncio.coroutine
def toptenbans(*args):
"""!toptenbans
The bot makes a GET request through the champion.gg api. It provides the top ten banned champions as recorded by the champion.gg
database. The champions also have their ban rate and win percentage provided.
"""
data = requests.get('http://api.champion.gg/stats/champs/mostBanned?api_key=' + DiscordCredentials.championgg_token + '&page=1&limit=10')
parsedData = json.loads(data.text)
if "error" in parsedData:
yield from MacAndCheese.say("Database Error!")
else:
topBans = ""
for i in range(0,10):
topBans += '---\n'
topBans += str(i + 1) + ') ' + parsedData['data'][i]['name'] + '/' + parsedData['data'][i]['role']
topBans += ' / Ban Rate: ' + str(parsedData['data'][i]['general']['banRate'])
topBans += ' / Win Rate: ' + str(parsedData['data'][i]['general']['winPercent']) + '\n'
yield from MacAndCheese.say('The top ten bans are:\n' + topBans)
@MacAndCheese.command()
@asyncio.coroutine
def champbuild(champion: str=None):
"""!champbuild
args:
champion (str): The name of the champion to lookup
The bot makes a GET request on champion.gg using the given argument as a champion name. If there's no such champion recorded in the database
then an error is given. Otherwise it takes the returned information and looks up each item on the Riot Games API with a GET request. The bot
then gives a list of the names of the items it looked up and the champion's winrate with the set of items.
"""
if champion == None:
yield from MacAndCheese.say("What champion am I looking up?")
return
champion = string.capwords(champion)
data = requests.get('http://api.champion.gg/champion/' + champion + '/items/finished/mostWins?api_key=' + DiscordCredentials.championgg_token)
parsedData = json.loads(data.text)
if "error" in parsedData:
yield from MacAndCheese.say("Champion does not exist!")
else:
yield from MacAndCheese.say("Most winning build for: " + champion)
itemSet = ""
for i in range(0,6):
itemData = requests.get('https://global.api.pvp.net/api/lol/static-data/na/v1.2/item/' + str(parsedData[0]['items'][i]) +
'?api_key=' + DiscordCredentials.riot_token)
parsedItem = json.loads(itemData.text)
itemSet += parsedItem['name'] + "\n" + parsedItem['plaintext'] + "\n---\n"
yield from MacAndCheese.say(itemSet + "\n" + str(parsedData[0]['winPercent']) + " Win Percentage")
@MacAndCheese.command()
@asyncio.coroutine
def champstart(champion: str=None):
"""!champstart
args:
champion (str): The name of the champion to lookup
The bot makes a GET request on champion.gg using the given argument as a champion name. If there's no such champion recorded in the database
then an error is given. Otherwise it takes the returned information and looks up each item on the Riot Games API with a GET request. The bot
then gives a list of the names of the items it looked up and the champion's winrate with the starting items.
"""
if champion == None:
yield from MacAndCheese.say("What champion am I looking up?")
return
data = requests.get('http://api.champion.gg/champion/' + champion + '/items/starters/mostWins?api_key=' + DiscordCredentials.championgg_token)
parsedData = json.loads(data.text)
if "error" in parsedData:
yield from MacAndCheese.say("Champion does not exist!")
else:
yield from MacAndCheese.say("Most winning build for: " + champion)
itemSet = ""
yield from MacAndCheese.say("Most winning starting items for: " + champion)
for i in range(0, len(parsedData[0]['items'])):
itemData = requests.get('https://global.api.pvp.net/api/lol/static-data/na/v1.2/item/' + str(parsedData[0]['items'][i]) +
'?api_key=' + DiscordCredentials.riot_token)
parsedItem = json.loads(itemData.text)
itemSet += parsedItem['name'] + "\n---\n"
yield from MacAndCheese.say(itemSet + "\n" + str(parsedData[0]['winPercent']) + " Win Percentage")
@MacAndCheese.command()
@asyncio.coroutine
def matchup(player: str=None, opponent: str=None):
"""!matchup
args:
player (str): The name of the champion the user is playing
opponent(string): The enemy champion the user is comparing the first one with
The bot makes a GET request on champion.gg using the given arguments as champion names. The bot then sends a message that provides the winrate
and KDA ratio that the first champion has versus the second one.
"""
if player == None or opponent == None:
yield from MacAndCheese.say("I need the names of two champions. Try again")
return
player = string.capwords(player)
opponent = string.capwords(opponent)
data = requests.get('http://api.champion.gg/champion/' + player + '/matchup/' + opponent +'?api_key=' + DiscordCredentials.championgg_token)
parsedData = json.loads(data.text)
if "error" in parsedData:
yield from MacAndCheese.say("No matchup found!")
else:
yield from MacAndCheese.say(player + " has a KDA of " + str(parsedData[0]['statScore']) + " and a win rate of " + str(parsedData[0]['winRate']) +
"% versus "+ opponent)
@MacAndCheese.command()
@asyncio.coroutine
def add(url: str=None):
"""!add
args:
url (str): The url of the video to play
Adds the url to the bot's playlist.
"""
if url == None:
yield from MacAndCheese.say("I need a url of a youtube video")
return
yield from vid.playlist(url)
#Youtube Commands
@MacAndCheese.command(pass_context=True)
@asyncio.coroutine
def youtube(context, url: str=None):
"""!youtube
args:
url (str): The url of the video to play
Plays the video at the url to the members of the voice channel provided.
"""
if url == | |
# Copyright 2019-2021 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Test Asyncio AWS Batch
This test suite uses a large suite of moto mocks for the AWS batch
infrastructure. These infrastructure mocks are derived from the moto test
suite for testing the batch client. The test infrastructure should be used
according to the moto license (Apache-2.0).
.. seealso::
- https://github.com/spulec/moto/pull/1197/files
- https://github.com/spulec/moto/blob/master/tests/test_batch/test_batch.py
"""
import asyncio
import inspect
import time
from contextlib import asynccontextmanager
from unittest.mock import MagicMock
import botocore.exceptions
import pytest
from pytest_aiomoto.aiomoto_fixtures import AioAwsBatchClients
from pytest_aiomoto.aiomoto_fixtures import AioAwsBatchInfrastructure
from pytest_aiomoto.aiomoto_fixtures import aio_batch_infrastructure
from aio_aws import aio_aws_batch
from aio_aws.aio_aws_batch import AWSBatchConfig
from aio_aws.aio_aws_batch import RetryError
from aio_aws.aio_aws_batch import aio_batch_cancel_jobs
from aio_aws.aio_aws_batch import aio_batch_describe_jobs
from aio_aws.aio_aws_batch import aio_batch_get_logs
from aio_aws.aio_aws_batch import aio_batch_job_cancel
from aio_aws.aio_aws_batch import aio_batch_job_logs
from aio_aws.aio_aws_batch import aio_batch_job_manager
from aio_aws.aio_aws_batch import aio_batch_job_submit
from aio_aws.aio_aws_batch import aio_batch_job_terminate
from aio_aws.aio_aws_batch import aio_batch_job_waiter
from aio_aws.aio_aws_batch import aio_batch_run_jobs
from aio_aws.aio_aws_batch import aio_batch_submit_jobs
from aio_aws.aio_aws_batch import aio_batch_terminate_jobs
from aio_aws.aio_aws_batch import aio_batch_update_jobs
from aio_aws.aio_aws_batch import batch_cancel_jobs
from aio_aws.aio_aws_batch import batch_get_logs
from aio_aws.aio_aws_batch import batch_monitor_jobs
from aio_aws.aio_aws_batch import batch_submit_jobs
from aio_aws.aio_aws_batch import batch_terminate_jobs
from aio_aws.aio_aws_batch import batch_update_jobs
from aio_aws.aws_batch_models import AWSBatchJob
from aio_aws.aws_batch_models import AWSBatchJobStates
from aio_aws.utils import datetime_to_unix_milliseconds
from aio_aws.utils import response_success
from aio_aws.utils import utc_now
def test_async_aws_batch():
assert inspect.ismodule(aio_aws_batch)
@pytest.fixture
async def aio_aws_batch_infrastructure(
aio_aws_batch_clients: AioAwsBatchClients,
compute_env_name: str,
job_queue_name: str,
job_definition_name: str,
) -> AioAwsBatchInfrastructure:
aws_region = aio_aws_batch_clients.region
aws_resources = await aio_batch_infrastructure(
aio_aws_batch_clients,
aws_region,
compute_env_name,
job_queue_name,
job_definition_name,
)
return aws_resources
@pytest.fixture
def batch_config(
aio_aws_session, aio_aws_batch_server, aio_aws_logs_server, test_aio_jobs_db
) -> AWSBatchConfig:
class TestBatchConfig(AWSBatchConfig):
session = aio_aws_session
@asynccontextmanager
async def create_batch_client(self):
async with aio_aws_session.create_client(
"batch", endpoint_url=aio_aws_batch_server
) as client:
yield client
@asynccontextmanager
async def create_logs_client(self):
async with aio_aws_session.create_client(
"logs", endpoint_url=aio_aws_logs_server
) as client:
yield client
config = TestBatchConfig(
aio_batch_db=test_aio_jobs_db,
start_pause=0.2,
min_pause=0.2,
max_pause=0.6,
min_jitter=0.1,
max_jitter=0.2,
)
# mocker.patch.object(config, "create_batch_client", return_value=create_batch_client)
# mocker.patch.object(config, "create_logs_client", return_value=create_logs_client)
yield config
@pytest.fixture
def aws_batch_sleep1_job(aio_aws_batch_infrastructure: AioAwsBatchInfrastructure):
return AWSBatchJob(
job_name="sleep-1-job",
job_definition=aio_aws_batch_infrastructure.job_definition_arn,
job_queue=aio_aws_batch_infrastructure.job_queue_arn,
command=["/bin/sh", "-c", "echo Hello && sleep 1 && echo Bye"],
)
@pytest.fixture
def aws_batch_sleep5_job(aio_aws_batch_infrastructure: AioAwsBatchInfrastructure):
return AWSBatchJob(
job_name="sleep-5-job",
job_definition=aio_aws_batch_infrastructure.job_definition_arn,
job_queue=aio_aws_batch_infrastructure.job_queue_arn,
command=["/bin/sh", "-c", "echo Hello && sleep 5 && echo Bye"],
)
@pytest.fixture
def aws_batch_fail_job(aio_aws_batch_infrastructure: AioAwsBatchInfrastructure):
return AWSBatchJob(
job_name="fail-job",
job_definition=aio_aws_batch_infrastructure.job_definition_arn,
job_queue=aio_aws_batch_infrastructure.job_queue_arn,
command=["/bin/sh", "-c", "echo Hello && exit 1"],
)
@pytest.mark.asyncio
async def test_aws_batch_infrastructure(
aio_aws_batch_infrastructure: AioAwsBatchInfrastructure,
):
infrastructure = aio_aws_batch_infrastructure
assert infrastructure
assert infrastructure.vpc_id
assert infrastructure.subnet_id
assert infrastructure.security_group_id
assert infrastructure.iam_arn
assert infrastructure.compute_env_name
assert infrastructure.compute_env_arn
assert infrastructure.job_queue_name
assert infrastructure.job_queue_arn
assert infrastructure.job_definition_name
assert infrastructure.job_definition_arn
@pytest.mark.asyncio
async def test_aio_batch_job_definitions(
aio_aws_batch_infrastructure: AioAwsBatchInfrastructure,
):
aws_resources = aio_aws_batch_infrastructure
aws_region = aws_resources.aws_region
job_definition_name = aws_resources.job_definition_name
assert aws_resources
assert aws_resources.job_definition_arn
assert f"arn:aws:batch:{aws_region}" in aws_resources.job_definition_arn
assert job_definition_name in aws_resources.job_definition_arn
clients = aio_aws_batch_infrastructure.aio_aws_clients
response = await clients.batch.describe_job_definitions()
assert response_success(response)
job_definitions = response["jobDefinitions"]
assert len(job_definitions) == 1
job_definition = job_definitions[0]
assert job_definition["jobDefinitionArn"] == aws_resources.job_definition_arn
assert job_definition["jobDefinitionName"] == aws_resources.job_definition_name
@pytest.mark.asyncio
async def test_aio_batch_job_queues(
aio_aws_batch_infrastructure: AioAwsBatchInfrastructure,
):
aws_resources = aio_aws_batch_infrastructure
aws_region = aws_resources.aws_region
job_queue_name = aws_resources.job_queue_name
assert aws_resources
assert aws_resources.job_queue_arn
assert f"arn:aws:batch:{aws_region}" in aws_resources.job_queue_arn
assert job_queue_name in aws_resources.job_queue_arn
clients = aio_aws_batch_infrastructure.aio_aws_clients
response = await clients.batch.describe_job_queues()
assert response_success(response)
job_queues = response["jobQueues"]
assert len(job_queues) == 1
job_queue = job_queues[0]
assert job_queue["jobQueueArn"] == aws_resources.job_queue_arn
assert job_queue["jobQueueName"] == aws_resources.job_queue_name
@pytest.mark.asyncio
async def test_aio_batch_list_jobs(
aio_aws_batch_infrastructure: AioAwsBatchInfrastructure,
):
clients = aio_aws_batch_infrastructure.aio_aws_clients
job_queue_name = aio_aws_batch_infrastructure.job_queue_name
for job_status in AWSBatchJob.STATES:
response = await clients.batch.list_jobs(
jobQueue=job_queue_name, jobStatus=job_status
)
assert response_success(response)
assert response["jobSummaryList"] == []
@pytest.mark.asyncio
async def test_async_batch_job_submit(aws_batch_sleep1_job, batch_config):
# moto/docker job submission timestamps seem to be too slow (why ?)
utc_dt = utc_now()
utc_ts = datetime_to_unix_milliseconds(utc_dt)
time.sleep(1.0)
job = aws_batch_sleep1_job
await aio_batch_job_submit(job, config=batch_config)
response = job.job_submission
assert response_success(response)
assert response.get("jobId")
# The job-submission modifies the job object, it's authorized for side-effects
assert job.job_id
assert job.job_id == response.get("jobId")
assert job.job_id in job.job_tries
assert job.num_tries == 1
assert job.num_tries <= job.max_tries
assert job.submitted > utc_ts
assert job.submitted_datetime > utc_dt
assert job.status in AWSBatchJob.STATES
assert AWSBatchJobStates[job.status] == AWSBatchJobStates.SUBMITTED
assert job.job_description is None
@pytest.mark.asyncio
async def test_async_batch_job_submit_retry(
aws_batch_sleep1_job,
aio_aws_session,
aio_aws_batch_server,
aio_aws_logs_server,
aio_aws_batch_client,
batch_config,
test_aio_jobs_db,
mocker,
):
job = aws_batch_sleep1_job
error_response = {"Error": {"Code": "TooManyRequestsException"}}
exception = botocore.exceptions.ClientError(
error_response=error_response,
operation_name="submit_job",
)
# monkey patch MagicMock
async def async_magic():
raise exception
MagicMock.__await__ = lambda x: async_magic().__await__()
class MockBatchConfig(AWSBatchConfig):
session = aio_aws_session
@asynccontextmanager
async def create_batch_client(self):
async with aio_aws_session.create_client(
"batch", endpoint_url=aio_aws_batch_server
) as client:
mock_client = mocker.patch.object(client, "submit_job")
mock_client.__await__ = lambda x: async_magic().__await__()
yield mock_client
@asynccontextmanager
async def create_logs_client(self):
async with aio_aws_session.create_client(
"logs", endpoint_url=aio_aws_logs_server
) as client:
yield client
mock_config = MockBatchConfig(
aio_batch_db=test_aio_jobs_db,
start_pause=0.2,
min_pause=0.2,
max_pause=0.6,
min_jitter=0.1,
max_jitter=0.2,
)
with pytest.raises(RetryError) as err:
await aio_batch_job_submit(job, config=mock_config)
assert job.job_id is None
assert job.num_tries == 0
assert job.job_submission == error_response
def test_batch_jobs_utils(aws_batch_sleep1_job, batch_config, mocker):
# Test convenient synchronous functions that wrap async functions
utc_dt = utc_now()
utc_ts = datetime_to_unix_milliseconds(utc_dt)
time.sleep(1.0)
job1 = AWSBatchJob(**aws_batch_sleep1_job.db_data)
job2 = AWSBatchJob(**aws_batch_sleep1_job.db_data)
jobs = [job1, job2]
mock_config = mocker.patch("aio_aws.aio_aws_batch.AWSBatchConfig")
mock_config.return_value = batch_config
batch_submit_jobs(jobs=jobs)
for job in jobs:
assert AWSBatchJobStates[job.status] == AWSBatchJobStates.SUBMITTED
assert job.submitted > utc_ts
assert job.submitted_datetime > utc_dt
batch_update_jobs(jobs=jobs)
for job in jobs:
assert AWSBatchJobStates[job.status] >= AWSBatchJobStates.SUBMITTED
batch_monitor_jobs(jobs=jobs)
for job in jobs:
assert AWSBatchJobStates[job.status] == AWSBatchJobStates.SUCCEEDED
# TODO: add these tests when moto supports millisecond timestamps
# - https://github.com/spulec/moto/issues/4364
# assert job.started > job.submitted > utc_ts
# assert job.started_datetime > job.submitted_datetime > utc_dt
# assert job.created
# assert job.created_datetime
assert job.started
assert job.started_datetime
assert job.stopped
assert job.stopped_datetime
assert job.stopped > job.started
assert job.stopped_datetime > job.started_datetime
batch_get_logs(jobs=jobs)
for job in jobs:
assert job.logs
def test_batch_jobs_cancel(
aws_batch_sleep1_job, aws_batch_sleep5_job, batch_config, mocker
):
# Test convenient synchronous functions that wrap async functions
mock_config = mocker.patch("aio_aws.aio_aws_batch.AWSBatchConfig")
mock_config.return_value = batch_config
batch_config.start_pause = 1.0
batch_config.min_pause = 0.4
batch_config.max_pause = 0.8
pre_job = AWSBatchJob(**aws_batch_sleep1_job.db_data)
pre_job.job_name = "pre-job"
batch_submit_jobs(jobs=[pre_job])
assert AWSBatchJobStates[pre_job.status] == AWSBatchJobStates.SUBMITTED
depends_on = [{"jobId": pre_job.job_id, "type": "SEQUENTIAL"}]
job1 = AWSBatchJob(**aws_batch_sleep1_job.db_data)
job1.job_name = "cancel-job-1"
job1.depends_on = depends_on
job2 = AWSBatchJob(**aws_batch_sleep1_job.db_data)
job2.job_name = "cancel-job-2"
job2.depends_on = depends_on
cancel_jobs = [job1, job2]
batch_submit_jobs(jobs=cancel_jobs)
for job in cancel_jobs:
assert AWSBatchJobStates[job.status] == AWSBatchJobStates.SUBMITTED
batch_cancel_jobs(jobs=cancel_jobs)
for job in cancel_jobs:
assert AWSBatchJobStates[job.status] == AWSBatchJobStates.FAILED
def test_batch_jobs_terminate(aws_batch_sleep5_job, batch_config, mocker):
# Test convenient synchronous functions that wrap async functions
job1 = AWSBatchJob(**aws_batch_sleep5_job.db_data)
job2 = AWSBatchJob(**aws_batch_sleep5_job.db_data)
jobs = [job1, job2]
mock_config = mocker.patch("aio_aws.aio_aws_batch.AWSBatchConfig")
mock_config.return_value = batch_config
batch_submit_jobs(jobs=jobs)
for job in jobs:
assert AWSBatchJobStates[job.status] == AWSBatchJobStates.SUBMITTED
batch_terminate_jobs(jobs=jobs)
for job in jobs:
assert AWSBatchJobStates[job.status] == AWSBatchJobStates.FAILED
@pytest.mark.asyncio
async def test_async_batch_describe_jobs(aws_batch_sleep1_job, batch_config):
job1 = AWSBatchJob(**aws_batch_sleep1_job.db_data)
job2 = AWSBatchJob(**aws_batch_sleep1_job.db_data)
jobs = [job1, job2]
await aio_batch_submit_jobs(jobs, config=batch_config)
job_ids = [j.job_id for j in jobs]
assert all(job_ids)
response = await aio_batch_describe_jobs(job_ids=job_ids, config=batch_config)
assert response_success(response)
job_descriptions = response.get("jobs")
assert len(job_descriptions) == 2
for job_desc in job_descriptions:
assert job_desc["jobQueue"] == job1.job_queue
assert job_desc["jobDefinition"] == job1.job_definition
assert job_desc["status"] in AWSBatchJob.STATES
@pytest.mark.asyncio
async def test_async_batch_update_jobs(aws_batch_sleep1_job, batch_config):
job1 = AWSBatchJob(**aws_batch_sleep1_job.db_data)
job2 = AWSBatchJob(**aws_batch_sleep1_job.db_data)
jobs = [job1, job2]
await aio_batch_submit_jobs(jobs, config=batch_config)
job_ids = [j.job_id for j in jobs]
assert all(job_ids)
await asyncio.sleep(3.0)
await aio_batch_update_jobs(jobs=jobs, config=batch_config)
# Since update jobs can get various status conditions, depending
# on how advanced a job is in a lifecycle, this just tests that
# the job status is set
for job in jobs:
assert job.status in AWSBatchJob.STATES
assert AWSBatchJobStates[job.status] in AWSBatchJobStates
assert AWSBatchJobStates[job.status] > AWSBatchJobStates.SUBMITTED
@pytest.mark.asyncio
async def test_async_batch_cancel_jobs(aws_batch_sleep1_job, batch_config):
batch_config.start_pause = 1.0
batch_config.min_pause = 0.4
batch_config.max_pause = 0.8
pre_job = AWSBatchJob(**aws_batch_sleep1_job.db_data)
pre_job.job_name = "pre-job"
await aio_batch_submit_jobs(jobs=[pre_job], config=batch_config)
assert AWSBatchJobStates[pre_job.status] == AWSBatchJobStates.SUBMITTED
depends_on = [{"jobId": pre_job.job_id, "type": "SEQUENTIAL"}]
job1 = AWSBatchJob(**aws_batch_sleep1_job.db_data)
job1.job_name = "cancel-job-1"
job1.depends_on = depends_on
job2 = AWSBatchJob(**aws_batch_sleep1_job.db_data)
job2.job_name = "cancel-job-2"
job2.depends_on = depends_on
cancel_jobs = [job1, job2]
await aio_batch_submit_jobs(jobs=cancel_jobs, config=batch_config)
for job in cancel_jobs:
assert AWSBatchJobStates[job.status] == AWSBatchJobStates.SUBMITTED
await aio_batch_cancel_jobs(jobs=cancel_jobs, config=batch_config)
for job in cancel_jobs:
assert AWSBatchJobStates[job.status] == AWSBatchJobStates.FAILED
@pytest.mark.asyncio
async def test_async_batch_terminate_jobs(aws_batch_sleep5_job, batch_config):
job1 = AWSBatchJob(**aws_batch_sleep5_job.db_data)
job2 = AWSBatchJob(**aws_batch_sleep5_job.db_data)
jobs = [job1, job2]
await aio_batch_submit_jobs(jobs, config=batch_config)
for job in jobs:
assert job.job_id
assert AWSBatchJobStates[job.status] == AWSBatchJobStates.SUBMITTED
await aio_batch_terminate_jobs(jobs=jobs, config=batch_config)
# The terminate jobs function should wait for it to fail and set status FAILED
for job in jobs:
assert AWSBatchJobStates[job.status] == AWSBatchJobStates.FAILED
@pytest.mark.asyncio
async def test_async_batch_job_failed(aws_batch_fail_job, batch_config):
job = aws_batch_fail_job
await aio_batch_job_submit(job, config=batch_config)
assert job.job_id
await aio_batch_job_waiter(job=job, config=batch_config)
# The job-waiter modifies the job object, it's authorized for side-effects
assert job.job_description
assert job.status in AWSBatchJob.STATES
assert job.status == "FAILED"
@pytest.mark.asyncio
async def test_async_batch_job_waiter(aws_batch_sleep1_job, batch_config):
job = aws_batch_sleep1_job
| |
source,
str((3, 6)): a.dis_code(shu.__code__, shu, (3, 6, 1)),
}
expected_new = old.copy()
expected_new["_version"] = 3
expected_new[str(sys.version_info[:2])] = (
a.dis_code(shu.__code__, shu, sys.version_info),
"",
)
assert expected_new != old
with pytest.raises(ppg.NothingChanged) as e:
a.run(None, None)
assert e.value.new_value == expected_new
del old["source"]
res = a._get_invariant(old, [])
assert res == expected_new
@pytest.mark.usefixtures("ppg1_compatibility_test")
class TestMultiFileInvariant:
def test_input_checking(self):
with pytest.raises(TypeError):
ppg.MultiFileInvariant("out/A", lambda: write("out/A", "A"))
with pytest.raises(TypeError):
ppg.MultiFileInvariant(34, lambda: write("out/A", "A"))
with pytest.raises(TypeError):
alist = ["out/A", "out/B"]
ppg.MultiFileInvariant((x for x in alist), lambda: write("out/A", "A"))
# with pytest.raises(ValueError):
# ppg2
with pytest.raises(TypeError):
ppg.MultiFileInvariant(["out/A", "out/A"], lambda: write("out/A", "A"))
with pytest.raises(TypeError):
ppg.MultiFileInvariant([], lambda: write("out/A", "A"))
@pytest.mark.skip # ppg1 implementation internals no longer relevant to ppg2
def test_new_raises_unchanged(self):
write("out/a", "hello")
write("out/b", "world")
jobA = ppg.MultiFileInvariant(["out/a", "out/b"])
def inner():
jobA.run(None, None)
assertRaises(ppg.NothingChanged, inner)
@pytest.mark.skip # ppg1 implementation internals no longer relevant to ppg2
def test_no_raise_on_no_change(self):
write("out/a", "hello")
write("out/b", "world")
jobA = ppg.MultiFileInvariant(["out/a", "out/b"])
try:
jobA.run(None, None)
self.fail("should not be reached")
except ppg.NothingChanged as e:
cs = e.new_value
try:
jobA.get_invariant(cs, {jobA.job_id: cs})
self.fail("should not be reached")
except ppg.NothingChanged as e:
cs2 = e.new_value
assert cs2 == cs
@pytest.mark.skip # ppg1 implementation internals no longer relevant to ppg2
def test_filetime_changed_contents_the_same(self):
write("out/a", "hello")
write("out/b", "world")
jobA = ppg.MultiFileInvariant(["out/a", "out/b"])
try:
jobA.get_invariant(False, {})
self.fail("should not be reached")
except ppg.NothingChanged as e:
cs = e.new_value
subprocess.check_call(["touch", "--date=2004-02-29", "out/b"])
try:
jobA.get_invariant(cs, {jobA.job_id: cs})
self.fail("should not be reached")
except ppg.NothingChanged as e:
cs2 = e.new_value
assert not (cs2 == cs)
assert not ([x[1] for x in cs2] == [x[1] for x in cs]) # times changed
assert [x[2] for x in cs2] == [x[2] for x in cs] # sizes did not
assert [x[3] for x in cs2] == [x[3] for x in cs]
@pytest.mark.skip # ppg1 implementation internals no longer relevant to ppg2
def test_changed_file(self):
write("out/a", "hello")
write("out/b", "world")
jobA = ppg.MultiFileInvariant(["out/a", "out/b"])
try:
jobA.get_invariant(False, {})
self.fail("should not be reached")
except ppg.NothingChanged as e:
cs = e.new_value
write("out/b", "world!")
cs2 = jobA.get_invariant(cs, {jobA.job_id: cs})
assert not (cs2 == cs)
assert [x[0] for x in cs2] == [x[0] for x in cs] # file names the same
# assert not ( [x[1] for x in cs2] == [x[1] for x in cs]) # don't test times, might not have changed
assert not ([x[2] for x in cs2] == [x[2] for x in cs]) # sizes changed
assert not ([x[3] for x in cs2] == [x[2] for x in cs]) # checksums changed
@pytest.mark.skip # ppg1 implementation internals no longer relevant to ppg2
def test_changed_file_same_size(self):
write("out/a", "hello")
write("out/b", "world")
jobA = ppg.MultiFileInvariant(["out/a", "out/b"])
try:
jobA.get_invariant(False, {})
self.fail("should not be reached")
except ppg.NothingChanged as e:
cs = e.new_value
time.sleep(2) # must be certain we have a changed filetime!
write("out/b", "worlt")
cs2 = jobA.get_invariant(cs, {jobA.job_id: cs})
assert not (cs2 == cs)
assert [x[0] for x in cs2] == [x[0] for x in cs] # file names the same
assert [x[2] for x in cs2] == [x[2] for x in cs] # sizes the same
assert not ([x[3] for x in cs2] == [x[2] for x in cs]) # checksums changed
@pytest.mark.skip # ppg1 implementation internals no longer relevant to ppg2
def test_rehome_no_change(self):
write("out/a", "hello")
write("out/b", "world")
jobA = ppg.MultiFileInvariant(["out/a", "out/b"])
try:
jobA.get_invariant(False, {})
self.fail("should not be reached")
except ppg.NothingChanged as e:
cs = e.new_value
try:
jobA.get_invariant(cs, {jobA.job_id: cs})
self.fail("should not be reached")
except ppg.NothingChanged as e:
cs2 = e.new_value
assert cs2 == cs
os.makedirs("out2")
write("out2/a", "hello")
write("out2/b", "world")
jobB = ppg.MultiFileInvariant(["out2/a", "out2/b"])
def inner():
jobB.get_invariant(False, {jobA.job_id: cs})
assertRaises(ppg.NothingChanged, inner)
@pytest.mark.skip # ppg1 implementation internals no longer relevant to ppg2
def test_rehome_and_change(self):
write("out/a", "hello")
write("out/b", "world")
jobA = ppg.MultiFileInvariant(["out/a", "out/b"])
try:
jobA.get_invariant(False, {})
self.fail("should not be reached")
except ppg.NothingChanged as e:
cs = e.new_value
try:
jobA.get_invariant(cs, {jobA.job_id: cs})
self.fail("should not be reached")
except ppg.NothingChanged as e:
cs2 = e.new_value
assert cs2 == cs
os.makedirs("out2")
write("out2/a", "hello")
write("out2/b", "worl!x") # either change the length, or wait 2 seconds...
jobB = ppg.MultiFileInvariant(["out2/a", "out2/b"])
cs3 = jobB.get_invariant(False, {jobA.job_id: cs})
assert not ([x[3] for x in cs2] == [x[2] for x in cs3]) # checksums changed
def test_non_existant_file_raises(self):
# ppg2 does not raise until run.
mfi = ppg.MultiFileInvariant(["out/a"])
ppg.FileGeneratingJob("out/B", lambda of: of.write("b")).depends_on(mfi)
with pytest.raises(ppg.RuntimeError):
ppg.run_pipegraph()
@pytest.mark.skip # ppg1 implementation internals no longer relevant to ppg2
def test_rehome_and_additional_file(self):
write("out/a", "hello")
write("out/b", "world")
jobA = ppg.MultiFileInvariant(["out/a", "out/b"])
try:
jobA.get_invariant(False, {})
self.fail("should not be reached")
except ppg.NothingChanged as e:
cs = e.new_value
try:
jobA.get_invariant(cs, {jobA.job_id: cs})
self.fail("should not be reached")
except ppg.NothingChanged as e:
cs2 = e.new_value
assert cs2 == cs
os.makedirs("out2")
write("out2/a", "hello")
write("out2/b", "world")
write("out2/c", "worl!x") # either change the length, or wait 2 seconds...
jobB = ppg.MultiFileInvariant(["out2/a", "out2/b", "out2/c"])
cs3 = jobB.get_invariant(False, {jobA.job_id: cs})
assert not ([x[3] for x in cs2] == [x[2] for x in cs3]) # checksums changed
@pytest.mark.skip # ppg1 implementation internals no longer relevant to ppg2
def test_rehome_and_missing_file(self):
write("out/a", "hello")
write("out/b", "world")
jobA = ppg.MultiFileInvariant(["out/a", "out/b"])
try:
jobA.get_invariant(False, {})
self.fail("should not be reached")
except ppg.NothingChanged as e:
cs = e.new_value
try:
jobA.get_invariant(cs, {jobA.job_id: cs})
self.fail("should not be reached")
except ppg.NothingChanged as e:
cs2 = e.new_value
assert cs2 == cs
os.makedirs("out2")
write("out2/a", "hello")
jobB = ppg.MultiFileInvariant(["out2/a"])
cs3 = jobB.get_invariant(False, {jobA.job_id: cs})
assert not ([x[3] for x in cs2] == [x[2] for x in cs3]) # checksums changed
def test_rehome_same_filenames_gives_up(self, ppg1_compatibility_test):
from pathlib import Path
write("out/counter", "0")
Path("out/A").mkdir()
Path("out/B").mkdir()
Path("out/C").mkdir()
Path("out/D").mkdir()
write("out/A/A", "hello")
write("out/B/A", "world")
jobA = ppg.MultiFileInvariant(["out/A/A", "out/B/A"])
def of():
append("out/counter", "x")
write("out/x", "ok")
jobB = ppg.FileGeneratingJob("out/x", of)
jobB.depends_on(jobA)
ppg.run_pipegraph()
assert read("out/counter") == "0x"
ppg1_compatibility_test.new_pipegraph()
shutil.move("out/A/A", "out/C/A")
shutil.move("out/B/A", "out/D/A")
jobA = ppg.MultiFileInvariant(["out/C/A", "out/D/A"])
jobB = ppg.FileGeneratingJob("out/x", of)
jobB.depends_on(jobA)
ppg.run_pipegraph()
# ppg2 now does *not* give up
# assert read("out/counter") == "0xx"
assert read("out/counter") == "0x" # so no rerun
@pytest.mark.usefixtures("ppg1_compatibility_test")
class TestDependency:
def test_simple_chain(self):
o = Dummy()
def load_a():
return "shu"
jobA = ppg.AttributeLoadingJob("a", o, "myattr", load_a)
ofB = "out/B"
def do_write_b():
write(ofB, o.myattr)
jobB = ppg.FileGeneratingJob(ofB, do_write_b).depends_on(jobA)
ofC = "out/C"
def do_write_C():
write(ofC, o.myattr)
ppg.FileGeneratingJob(ofC, do_write_C).depends_on(jobA)
ofD = "out/D"
def do_write_d():
write(ofD, read(ofC) + read(ofB))
ppg.FileGeneratingJob(ofD, do_write_d).depends_on([jobA, jobB])
def test_failed_job_kills_those_after(self, ppg1_compatibility_test):
ofA = "out/A"
def write_a():
append(ofA, "hello")
jobA = ppg.FileGeneratingJob(ofA, write_a)
ofB = "out/B"
def write_b():
raise ValueError("shu")
jobB = ppg.FileGeneratingJob(ofB, write_b)
jobB.depends_on(jobA)
ofC = "out/C"
def write_c():
write(ofC, "hello")
jobC = ppg.FileGeneratingJob(ofC, write_c)
jobC.depends_on(jobB)
try:
ppg.run_pipegraph()
raise ValueError("should not be reached")
except ppg.RuntimeError:
pass
assert os.path.exists(ofA) # which was before the error
assert not (os.path.exists(ofB)) # which was on the error
assert not (os.path.exists(ofC)) # which was after the error
ppg1_compatibility_test.new_pipegraph()
jobA = ppg.FileGeneratingJob(ofA, write_a)
jobC = ppg.FileGeneratingJob(ofC, write_c)
def write_b_ok():
write(ofB, "BB")
jobB = ppg.FileGeneratingJob(ofB, write_b_ok)
jobB.depends_on(jobA)
jobC.depends_on(jobB)
ppg.run_pipegraph()
assert os.path.exists(ofA)
assert read(ofA) == "hello" # run only once!
assert os.path.exists(ofB)
assert os.path.exists(ofC)
def test_done_filejob_does_not_gum_up_execution(self):
ofA = "out/A"
write(ofA, "1111")
def write_a():
append(ofA, "hello")
jobA = ppg.FileGeneratingJob(ofA, write_a)
jobA.ignore_code_changes() # or it will inject a function dependency and run never the less...
ofB = "out/B"
def write_b():
append(ofB, "hello")
jobB = ppg.FileGeneratingJob(ofB, write_b)
jobB.depends_on(jobA)
ofC = "out/C"
def write_c():
write(ofC, "hello")
jobC = ppg.FileGeneratingJob(ofC, write_c)
jobC.depends_on(jobB)
assert os.path.exists(ofA)
ppg.run_pipegraph()
assert os.path.exists(ofB)
assert os.path.exists(ofC)
# ppg2 - ppg2 runs the job at least once! and captures the hash afterwards
# this might seem a disadvantag, but it's the only way to gurantee the code actually
# produces the output.
# on the plus side, you can swap in a FileInvariant inplace without trouble
# (the FileGeneratingJob produces the same 'output'))
# assert read(ofA) == "1111"
assert read(ofA) == "hello"
def test_invariant_violation_redoes_deps_but_not_nondeps(
self, ppg1_compatibility_test
):
def get_job(name):
fn = "out/" + name
def do_write():
if os.path.exists(fn + ".sentinel"):
d = read(fn + ".sentinel")
else:
d = ""
append(fn + ".sentinel", name) # get's longer all the time...
write(fn, d + name) # get's deleted anyhow...
return ppg.FileGeneratingJob(fn, do_write)
jobA = get_job("A")
jobB = get_job("B")
jobC = get_job("C")
get_job("D")
jobC.depends_on(jobB)
jobB.depends_on(jobA)
dep = ppg.ParameterInvariant("myparam", ("hello",))
jobA.depends_on(dep)
ppg.run_pipegraph()
assert read("out/A") == "A"
assert read("out/B") == "B"
assert | |
'zipcode': 458000},
2714: {'name': '凤泉区', 'pid': 246, 'zipcode': 453700},
2715: {'name': '卫滨区', 'pid': 246, 'zipcode': 453700},
2716: {'name': '卫辉市', 'pid': 246, 'zipcode': 453700},
2717: {'name': '原阳县', 'pid': 246, 'zipcode': 453700},
2718: {'name': '封丘县', 'pid': 246, 'zipcode': 453700},
2719: {'name': '延津县', 'pid': 246, 'zipcode': 453700},
2720: {'name': '新乡县', 'pid': 246, 'zipcode': 453700},
2721: {'name': '牧野区', 'pid': 246, 'zipcode': 453700},
2722: {'name': '红旗区', 'pid': 246, 'zipcode': 453700},
2723: {'name': '获嘉县', 'pid': 246, 'zipcode': 453700},
2724: {'name': '辉县市', 'pid': 246, 'zipcode': 453700},
2725: {'name': '长垣县', 'pid': 246, 'zipcode': 453700},
2726: {'name': '中站区', 'pid': 247, 'zipcode': 454150},
2727: {'name': '修武县', 'pid': 247, 'zipcode': 454150},
2728: {'name': '博爱县', 'pid': 247, 'zipcode': 454150},
2729: {'name': '孟州市', 'pid': 247, 'zipcode': 454150},
2730: {'name': '山阳区', 'pid': 247, 'zipcode': 454150},
2731: {'name': '武陟县', 'pid': 247, 'zipcode': 454150},
2732: {'name': '沁阳市', 'pid': 247, 'zipcode': 454150},
2733: {'name': '温县', 'pid': 247, 'zipcode': 454150},
2734: {'name': '解放区', 'pid': 247, 'zipcode': 454150},
2735: {'name': '马村区', 'pid': 247, 'zipcode': 454150},
2736: {'name': '华龙区', 'pid': 248, 'zipcode': 457100},
2737: {'name': '南乐县', 'pid': 248, 'zipcode': 457100},
2738: {'name': '台前县', 'pid': 248, 'zipcode': 457100},
2739: {'name': '清丰县', 'pid': 248, 'zipcode': 457100},
2740: {'name': '濮阳县', 'pid': 248, 'zipcode': 457100},
2741: {'name': '范县', 'pid': 248, 'zipcode': 457100},
2742: {'name': '禹州市', 'pid': 249, 'zipcode': 461100},
2743: {'name': '襄城县', 'pid': 249, 'zipcode': 461100},
2744: {'name': '许昌县', 'pid': 249, 'zipcode': 461100},
2745: {'name': '鄢陵县', 'pid': 249, 'zipcode': 461100},
2746: {'name': '长葛市', 'pid': 249, 'zipcode': 461100},
2747: {'name': '魏都区', 'pid': 249, 'zipcode': 461100},
2748: {'name': '临颍县', 'pid': 250, 'zipcode': 462000},
2749: {'name': '召陵区', 'pid': 250, 'zipcode': 462000},
2750: {'name': '源汇区', 'pid': 250, 'zipcode': 462000},
2751: {'name': '舞阳县', 'pid': 250, 'zipcode': 462000},
2752: {'name': '郾城区', 'pid': 250, 'zipcode': 462000},
2753: {'name': '义马市', 'pid': 251, 'zipcode': 472000},
2754: {'name': '卢氏县', 'pid': 251, 'zipcode': 472000},
2755: {'name': '渑池县', 'pid': 251, 'zipcode': 472000},
2756: {'name': '湖滨区', 'pid': 251, 'zipcode': 472000},
2757: {'name': '灵宝市', 'pid': 251, 'zipcode': 472000},
2758: {'name': '陕州区', 'pid': 251, 'zipcode': 472000},
2759: {'name': '内乡县', 'pid': 252, 'zipcode': 473000},
2760: {'name': '南召县', 'pid': 252, 'zipcode': 473000},
2761: {'name': '卧龙区', 'pid': 252, 'zipcode': 473000},
2762: {'name': '唐河县', 'pid': 252, 'zipcode': 473000},
2763: {'name': '宛城区', 'pid': 252, 'zipcode': 473000},
2764: {'name': '新野县', 'pid': 252, 'zipcode': 473000},
2765: {'name': '方城县', 'pid': 252, 'zipcode': 473000},
2766: {'name': '桐柏县', 'pid': 252, 'zipcode': 473000},
2767: {'name': '淅川县', 'pid': 252, 'zipcode': 473000},
2768: {'name': '社旗县', 'pid': 252, 'zipcode': 473000},
2769: {'name': '西峡县', 'pid': 252, 'zipcode': 473000},
2770: {'name': '邓州市', 'pid': 252, 'zipcode': 473000},
2771: {'name': '镇平县', 'pid': 252, 'zipcode': 473000},
2772: {'name': '夏邑县', 'pid': 253, 'zipcode': 476000},
2773: {'name': '宁陵县', 'pid': 253, 'zipcode': 476000},
2774: {'name': '柘城县', 'pid': 253, 'zipcode': 476000},
2775: {'name': '民权县', 'pid': 253, 'zipcode': 476000},
2776: {'name': '永城市', 'pid': 253, 'zipcode': 476000},
2777: {'name': '睢县', 'pid': 253, 'zipcode': 476000},
2778: {'name': '睢阳区', 'pid': 253, 'zipcode': 476000},
2779: {'name': '粱园区', 'pid': 253, 'zipcode': 476000},
2780: {'name': '虞城县', 'pid': 253, 'zipcode': 476000},
2781: {'name': '光山县', 'pid': 254, 'zipcode': 464000},
2782: {'name': '商城县', 'pid': 254, 'zipcode': 464000},
2783: {'name': '固始县', 'pid': 254, 'zipcode': 464000},
2784: {'name': '平桥区', 'pid': 254, 'zipcode': 464000},
2785: {'name': '息县', 'pid': 254, 'zipcode': 464000},
2786: {'name': '新县', 'pid': 254, 'zipcode': 464000},
2787: {'name': '浉河区', 'pid': 254, 'zipcode': 464000},
2788: {'name': '淮滨县', 'pid': 254, 'zipcode': 464000},
2789: {'name': '潢川县', 'pid': 254, 'zipcode': 464000},
2790: {'name': '罗山县', 'pid': 254, 'zipcode': 464000},
2791: {'name': '商水县', 'pid': 255, 'zipcode': 466000},
2792: {'name': '太康县', 'pid': 255, 'zipcode': 466000},
2793: {'name': '川汇区', 'pid': 255, 'zipcode': 466000},
2794: {'name': '扶沟县', 'pid': 255, 'zipcode': 466000},
2795: {'name': '沈丘县', 'pid': 255, 'zipcode': 466000},
2796: {'name': '淮阳县', 'pid': 255, 'zipcode': 466000},
2797: {'name': '西华县', 'pid': 255, 'zipcode': 466000},
2798: {'name': '郸城县', 'pid': 255, 'zipcode': 466000},
2799: {'name': '项城市', 'pid': 255, 'zipcode': 466000},
2800: {'name': '鹿邑县', 'pid': 255, 'zipcode': 466000},
2801: {'name': '上蔡县', 'pid': 256, 'zipcode': 463000},
2802: {'name': '平舆县', 'pid': 256, 'zipcode': 463000},
2803: {'name': '新蔡县', 'pid': 256, 'zipcode': 463000},
2804: {'name': '正阳县', 'pid': 256, 'zipcode': 463000},
2805: {'name': '汝南县', 'pid': 256, 'zipcode': 463000},
2806: {'name': '泌阳县', 'pid': 256, 'zipcode': 463000},
2807: {'name': '确山县', 'pid': 256, 'zipcode': 463000},
2808: {'name': '西平县', 'pid': 256, 'zipcode': 463000},
2809: {'name': '遂平县', 'pid': 256, 'zipcode': 463000},
2810: {'name': '驿城区', 'pid': 256, 'zipcode': 463000},
2811: {'name': '济源市', 'pid': 257, 'zipcode': 454650},
2812: {'name': '东西湖区', 'pid': 258, 'zipcode': 430000},
2813: {'name': '新洲区', 'pid': 258, 'zipcode': 430000},
2814: {'name': '武昌区', 'pid': 258, 'zipcode': 430000},
2815: {'name': '汉南区', 'pid': 258, 'zipcode': 430000},
2816: {'name': '汉阳区', 'pid': 258, 'zipcode': 430000},
2817: {'name': '江夏区', 'pid': 258, 'zipcode': 430000},
2818: {'name': '江岸区', 'pid': 258, 'zipcode': 430000},
2819: {'name': '江汉区', 'pid': 258, 'zipcode': 430000},
2820: {'name': '洪山区', 'pid': 258, 'zipcode': 430000},
2821: {'name': '硚口区', 'pid': 258, 'zipcode': 430000},
2822: {'name': '蔡甸区', 'pid': 258, 'zipcode': 430000},
2823: {'name': '青山区', 'pid': 258, 'zipcode': 430000},
2824: {'name': '黄陂区', 'pid': 258, 'zipcode': 430000},
2825: {'name': '下陆区', 'pid': 259, 'zipcode': 435000},
2826: {'name': '大冶市', 'pid': 259, 'zipcode': 435000},
2827: {'name': '西塞山区', 'pid': 259, 'zipcode': 435000},
2828: {'name': '铁山区', 'pid': 259, 'zipcode': 435000},
2829: {'name': '阳新县', 'pid': 259, 'zipcode': 435000},
2830: {'name': '黄石港区', 'pid': 259, 'zipcode': 435000},
2831: {'name': '丹江口市', 'pid': 260, 'zipcode': 442000},
2832: {'name': '张湾区', 'pid': 260, 'zipcode': 442000},
2833: {'name': '房县', 'pid': 260, 'zipcode': 442000},
2834: {'name': '竹山县', 'pid': 260, 'zipcode': 442000},
2835: {'name': '竹溪县', 'pid': 260, 'zipcode': 442000},
2836: {'name': '茅箭区', 'pid': 260, 'zipcode': 442000},
2837: {'name': '郧阳区', 'pid': 260, 'zipcode': 442000},
2838: {'name': '郧西县', 'pid': 260, 'zipcode': 442000},
2839: {'name': '五峰土家族自治县', 'pid': 261, 'zipcode': 443400},
2840: {'name': '伍家岗区', 'pid': 261, 'zipcode': 443000},
2841: {'name': '兴山县', 'pid': 261, 'zipcode': 443000},
2842: {'name': '夷陵区', 'pid': 261, 'zipcode': 443000},
2843: {'name': '宜都市', 'pid': 261, 'zipcode': 443000},
2844: {'name': '当阳市', 'pid': 261, 'zipcode': 443000},
2845: {'name': '枝江市', 'pid': 261, 'zipcode': 443000},
2846: {'name': '点军区', 'pid': 261, 'zipcode': 443000},
2847: {'name': '秭归县', 'pid': 261, 'zipcode': 443000},
2848: {'name': '猇亭', 'pid': 261, 'zipcode': 443000},
2849: {'name': '西陵区', 'pid': 261, 'zipcode': 443000},
2850: {'name': '远安县', 'pid': 261, 'zipcode': 443000},
2851: {'name': '长阳土家族自治县', 'pid': 261, 'zipcode': 443500},
2852: {'name': '保康县', 'pid': 262, 'zipcode': 441100},
2853: {'name': '南漳县', 'pid': 262, 'zipcode': 441100},
2854: {'name': '宜城市', 'pid': 262, 'zipcode': 441100},
2855: {'name': '枣阳市', 'pid': 262, 'zipcode': 441100},
2856: {'name': '樊城区', 'pid': 262, 'zipcode': 441100},
2857: {'name': '老河口市', 'pid': 262, 'zipcode': 441100},
2858: {'name': '襄城区', 'pid': 262, 'zipcode': 441100},
2859: {'name': '襄州区', 'pid': 262, 'zipcode': 441100},
2860: {'name': '谷城县', 'pid': 262, 'zipcode': 441100},
2861: {'name': '华容区', 'pid': 263, 'zipcode': 436000},
2862: {'name': '梁子湖区', 'pid': 263, 'zipcode': 436000},
2863: {'name': '鄂城区', 'pid': 263, 'zipcode': 436000},
2864: {'name': '东宝区', 'pid': 264, 'zipcode': 448000},
2865: {'name': '京山县', 'pid': 264, 'zipcode': 448000},
2866: {'name': '掇刀区', 'pid': 264, 'zipcode': 448000},
2867: {'name': '沙洋县', 'pid': 264, 'zipcode': 448000},
2868: {'name': '钟祥市', 'pid': 264, 'zipcode': 448000},
2869: {'name': '云梦县', 'pid': 265, 'zipcode': 432000},
2870: {'name': '大悟县', 'pid': 265, 'zipcode': 432000},
2871: {'name': '孝南区', 'pid': 265, 'zipcode': 432000},
2872: {'name': '孝昌县', 'pid': 265, 'zipcode': 432000},
2873: {'name': '安陆市', 'pid': 265, 'zipcode': 432000},
2874: {'name': '应城市', 'pid': 265, 'zipcode': 432000},
2875: {'name': '汉川市', 'pid': 265, 'zipcode': 432000},
2876: {'name': '公安县', 'pid': 266, 'zipcode': 434020},
2877: {'name': '松滋市', 'pid': 266, 'zipcode': 434020},
2878: {'name': '江陵县', 'pid': 266, 'zipcode': 434020},
2879: {'name': '沙市区', 'pid': 266, 'zipcode': 434020},
2880: {'name': '洪湖市', 'pid': 266, 'zipcode': 434020},
2881: {'name': '监利县', 'pid': 266, 'zipcode': 434020},
2882: {'name': '石首市', 'pid': 266, 'zipcode': 434020},
2883: {'name': '荆州区', 'pid': 266, 'zipcode': 434020},
2884: {'name': '团风县', 'pid': 267, 'zipcode': 438000},
2885: {'name': '武穴市', 'pid': 267, 'zipcode': 438000},
2886: {'name': '浠水县', 'pid': 267, 'zipcode': 438000},
2887: {'name': '红安县', 'pid': 267, 'zipcode': 438000},
2888: {'name': '罗田县', 'pid': 267, 'zipcode': 438000},
2889: {'name': '英山县', 'pid': 267, 'zipcode': 438000},
2890: {'name': '蕲春县', 'pid': 267, 'zipcode': 438000},
2891: {'name': '麻城市', 'pid': 267, 'zipcode': 438000},
2892: {'name': '黄州区', 'pid': 267, 'zipcode': 438000},
2893: {'name': '黄梅县', 'pid': 267, 'zipcode': 438000},
2894: {'name': '咸安区', 'pid': 268, 'zipcode': 437000},
2895: {'name': '嘉鱼县', 'pid': 268, 'zipcode': 437000},
2896: {'name': '崇阳县', 'pid': 268, 'zipcode': 437000},
2897: {'name': '赤壁市', 'pid': 268, 'zipcode': 437000},
2898: {'name': '通城县', 'pid': 268, 'zipcode': 437000},
2899: {'name': '通山县', 'pid': 268, 'zipcode': 437000},
2900: {'name': '广水市', 'pid': 269, 'zipcode': 441300},
2901: {'name': '曾都区', 'pid': 269, 'zipcode': 441300},
2902: {'name': '利川市', 'pid': 270, 'zipcode': 445000},
2903: {'name': '咸丰县', 'pid': 270, 'zipcode': 445000},
2904: {'name': '宣恩县', 'pid': 270, 'zipcode': 445000},
2905: {'name': '巴东县', 'pid': 270, 'zipcode': 445000},
2906: {'name': '建始县', 'pid': 270, 'zipcode': 445000},
2907: {'name': '恩施市', 'pid': 270, 'zipcode': 445000},
2908: {'name': '来凤县', 'pid': 270, 'zipcode': 445000},
2909: {'name': '鹤峰县', 'pid': 270, 'zipcode': 445000},
2910: {'name': '仙桃市', 'pid': 271, 'zipcode': 433000},
2911: {'name': '潜江市', 'pid': 272, 'zipcode': 433100},
2912: {'name': '天门市', 'pid': 273, 'zipcode': 431700},
2913: {'name': '神农架林区', 'pid': 274, 'zipcode': 442400},
2914: {'name': '天心区', 'pid': 275, 'zipcode': 410000},
2915: {'name': '宁乡市', 'pid': 275, 'zipcode': 410000},
2916: {'name': '岳麓区', 'pid': 275, 'zipcode': 410000},
2917: {'name': '开福区', 'pid': 275, 'zipcode': 410000},
2918: {'name': '望城区', 'pid': 275, 'zipcode': 410000},
2919: {'name': '浏阳市', 'pid': 275, 'zipcode': 410000},
2920: {'name': '芙蓉区', 'pid': 275, 'zipcode': 410000},
2921: {'name': '长沙县', 'pid': 275, 'zipcode': 410000},
2922: {'name': '雨花区', 'pid': 275, 'zipcode': 410000},
2923: {'name': '天元区', 'pid': 276, 'zipcode': 412000},
2924: {'name': '攸县', 'pid': 276, 'zipcode': 412000},
2925: {'name': '株洲县', 'pid': 276, 'zipcode': 412000},
2926: {'name': '炎陵县', 'pid': 276, 'zipcode': 412000},
2927: | |
skip_save_training_statistics: (bool, default: `False`) disables
saving training statistics JSON file.
:param skip_save_model: (bool, default: `False`) disables
saving model weights and hyperparameters each time the model
improves. By default Ludwig saves model weights after each epoch
the validation metric improves, but if the model is really big
that can be time consuming. If you do not want to keep
the weights and just find out what performance a model can get
with a set of hyperparameters, use this parameter to skip it,
but the model will not be loadable later on and the returned model
will have the weights obtained at the end of training, instead of
the weights of the epoch with the best validation performance.
:param skip_save_progress: (bool, default: `False`) disables saving
progress each epoch. By default Ludwig saves weights and stats
after each epoch for enabling resuming of training, but if
the model is really big that can be time consuming and will uses
twice as much space, use this parameter to skip it, but training
cannot be resumed later on.
:param skip_save_log: (bool, default: `False`) disables saving
TensorBoard logs. By default Ludwig saves logs for the TensorBoard,
but if it is not needed turning it off can slightly increase the
overall speed.
:param skip_save_processed_input: (bool, default: `False`) if input
dataset is provided it is preprocessed and cached by saving an HDF5
and JSON files to avoid running the preprocessing again. If this
parameter is `False`, the HDF5 and JSON file are not saved.
:param skip_save_unprocessed_output: (bool, default: `False`) by default
predictions and their probabilities are saved in both raw
unprocessed numpy files containing tensors and as postprocessed
CSV files (one for each output feature). If this parameter is True,
only the CSV ones are saved and the numpy ones are skipped.
:param skip_save_predictions: (bool, default: `False`) skips saving test
predictions CSV files
:param skip_save_eval_stats: (bool, default: `False`) skips saving test
statistics JSON file
:param skip_collect_predictions: (bool, default: `False`) skips
collecting post-processed predictions during eval.
:param skip_collect_overall_stats: (bool, default: `False`) skips
collecting overall stats during eval.
:param output_directory: (str, default: `'results'`) the directory that
will contain the training statistics, TensorBoard logs, the saved
model and the training progress files.
:param random_seed: (int: default: 42) random seed used for weights
initialization, splits and any other random function.
:param debug: (bool, default: `False) if `True` turns on `tfdbg` with
`inf_or_nan` checks.
# Return
:return: (Tuple[dict, dict, tuple, str)) `(evaluation_statistics, training_statistics, preprocessed_data, output_directory)`
`evaluation_statistics` dictionary with evaluation performance
statistics on the test_set,
`training_statistics` is a dictionary of training statistics for
each output
feature containing loss and metrics values for each epoch,
`preprocessed_data` tuple containing preprocessed
`(training_set, validation_set, test_set)`, `output_directory`
filepath string to where results are stored.
"""
(
train_stats,
preprocessed_data,
output_directory
) = self.train(
dataset=dataset,
training_set=training_set,
validation_set=validation_set,
test_set=test_set,
training_set_metadata=training_set_metadata,
data_format=data_format,
experiment_name=experiment_name,
model_name=model_name,
model_load_path=model_load_path,
model_resume_path=model_resume_path,
skip_save_training_description=skip_save_training_description,
skip_save_training_statistics=skip_save_training_statistics,
skip_save_model=skip_save_model,
skip_save_progress=skip_save_progress,
skip_save_log=skip_save_log,
skip_save_processed_input=skip_save_processed_input,
skip_save_unprocessed_output=skip_save_unprocessed_output,
output_directory=output_directory,
random_seed=random_seed,
debug=debug,
)
(training_set,
validation_set,
test_set,
training_set_metadata) = preprocessed_data
eval_set = validation_set
if eval_split == TRAINING:
eval_set = training_set
elif eval_split == VALIDATION:
eval_set = validation_set
elif eval_split == TEST:
eval_set = test_set
else:
logger.warning(f"Eval split {eval_split} not supported. "
f"Using validation set instead")
if eval_set is not None:
if self.config[TRAINING]['eval_batch_size']:
batch_size = self.config[TRAINING]['eval_batch_size']
else:
batch_size = self.config[TRAINING]['batch_size']
# predict
try:
eval_stats, _, _ = self.evaluate(
eval_set,
data_format=data_format,
batch_size=batch_size,
output_directory=output_directory,
skip_save_unprocessed_output=skip_save_unprocessed_output,
skip_save_predictions=skip_save_predictions,
skip_save_eval_stats=skip_save_eval_stats,
collect_predictions=not skip_collect_predictions,
collect_overall_stats=not skip_collect_overall_stats,
return_type='dict',
debug=debug
)
except NotImplementedError:
logger.warning(
"Skipping evaluation as the necessary methods are not "
"supported. Full exception below:\n"
f"{traceback.format_exc()}"
)
eval_stats = None
else:
logger.warning(f"The evaluation set {eval_set} was not provided. "
f"Skipping evaluation")
eval_stats = None
return eval_stats, train_stats, preprocessed_data, output_directory
def collect_weights(
self,
tensor_names: List[str] = None,
**kwargs
) -> list:
"""Load a pre-trained model and collect the tensors with a specific name
# Inputs
:param tensor_names: (list, default: `None`) List of tensor names to collect
weights
# Return
:return: (list) List of tensors
"""
self._check_initialization()
collected_tensors = self.model.collect_weights(tensor_names)
return collected_tensors
def collect_activations(
self,
layer_names: List[str],
dataset: Union[str, Dict[str, list], pd.DataFrame],
data_format: str = None,
split: str = FULL,
batch_size: int = 128,
debug: bool = False,
**kwargs
) -> list:
"""Loads a pre-trained model model and input data to collect the values
of the activations contained in the tensors.
# Inputs
:param layer_names: (list) list of strings for layer names in the model
to collect activations.
:param dataset: (Union[str, Dict[str, list], pandas.DataFrame]) source
containing the data to make predictions.
:param data_format: (str, default: `None`) format to interpret data
sources. Will be inferred automatically if not specified. Valid
formats are `'auto'`, `'csv'`, `'df'`, `'dict'`, `'excel'`, `'feather'`,
`'fwf'`, `'hdf5'` (cache file produced during previous training),
`'html'` (file containing a single HTML `<table>`), `'json'`, `'jsonl'`,
`'parquet'`, `'pickle'` (pickled Pandas DataFrame), `'sas'`, `'spss'`,
`'stata'`, `'tsv'`.
:param: split: (str, default= `'full'`): if the input dataset contains
a split column, this parameter indicates which split of the data
to use. Possible values are `'full'`, `'training'`, `
'validation'`, `'test'`.
:param batch_size: (int, default: 128) size of batch to use when making
predictions.
:param debug: (bool, default: `False`) if `True` turns on `tfdbg`
with `inf_or_nan` checks.
# Return
:return: (list) list of collected tensors.
"""
self._check_initialization()
# preprocessing
logger.debug('Preprocessing')
dataset, training_set_metadata = preprocess_for_prediction(
self.config,
dataset=dataset,
training_set_metadata=self.training_set_metadata,
data_format=data_format,
split=split,
include_outputs=False
)
logger.debug('Predicting')
with self.backend.create_predictor(
batch_size=batch_size,
debug=debug
) as predictor:
activations = predictor.batch_collect_activations(
self.model,
layer_names,
dataset,
)
return activations
def preprocess(
self,
dataset: Union[str, dict, pd.DataFrame] = None,
training_set: Union[str, dict, pd.DataFrame] = None,
validation_set: Union[str, dict, pd.DataFrame] = None,
test_set: Union[str, dict, pd.DataFrame] = None,
training_set_metadata: Union[str, dict] = None,
data_format: str = None,
skip_save_processed_input: bool = True,
random_seed: int = default_random_seed,
debug: bool = False,
**kwargs
) -> Tuple[Dataset, Dataset, Dataset, dict]:
"""This function is used to preprocess data.
# Inputs
:param dataset: (Union[str, dict, pandas.DataFrame], default: `None`)
source containing the entire dataset to be used in the experiment.
If it has a split column, it will be used for splitting
(0 for train, 1 for validation, 2 for test),
otherwise the dataset will be randomly split.
:param training_set: (Union[str, dict, pandas.DataFrame], default: `None`)
source containing training data.
:param validation_set: (Union[str, dict, pandas.DataFrame], default: `None`)
source containing validation data.
:param test_set: (Union[str, dict, pandas.DataFrame], default: `None`)
source containing test data.
:param training_set_metadata: (Union[str, dict], default: `None`)
metadata JSON file or loaded metadata. Intermediate preprocessed
structure containing the mappings of the input
dataset created the first time an input file is used in the same
directory with the same name and a '.meta.json' extension.
:param data_format: (str, default: `None`) format to interpret data
sources. Will be inferred automatically if not specified. Valid
formats are `'auto'`, `'csv'`, `'df'`, `'dict'`, `'excel'`,
`'feather'`, `'fwf'`,
`'hdf5'` (cache file produced during previous training),
`'html'` (file containing a single HTML `<table>`),
`'json'`, `'jsonl'`, `'parquet'`,
`'pickle'` (pickled Pandas DataFrame),
`'sas'`, `'spss'`, `'stata'`, `'tsv'`.
:param skip_save_processed_input: (bool, default: `False`) if input
dataset is provided it is preprocessed and cached by saving an HDF5
and JSON files to avoid running the preprocessing again. If this
parameter is `False`, the HDF5 and JSON file are not saved.
:param output_directory: (str, default: `'results'`) the directory that
will contain the training statistics, TensorBoard logs, the saved
model and the training progress files.
:param random_seed: (int, default: `42`) a random seed that will be
used anywhere there is a call to a random number generator: data
splitting, parameter initialization and training set shuffling
:param debug: (bool, default: `False`) if `True` turns on `tfdbg` with
`inf_or_nan` checks.
# Return
:return: (Tuple[dict, Union[dict, pd.DataFrame], str]) tuple containing
`(training_statistics, preprocessed_data, output_directory)`.
`training_statistics` is a dictionary of training statistics
for each output feature containing loss and metrics values
for each epoch.
`preprocessed_data` is the tuple containing these three data sets
`(training_set, validation_set, test_set)`.
`output_directory` filepath to where training results are stored.
"""
# preprocess
preprocessed_data = preprocess_for_training(
self.config,
dataset=dataset,
training_set=training_set,
validation_set=validation_set,
test_set=test_set,
training_set_metadata=training_set_metadata,
data_format=data_format,
| |
"""Normalization helper functions"""
from __future__ import absolute_import, print_function, division
import numpy as np
from cddm.conf import CDTYPE, FDTYPE
from cddm.print_tools import print1,print2, enable_prints, disable_prints
from cddm._core_nb import _normalize_cdiff_1,_normalize_cdiff_3,\
_normalize_ccorr_0,_normalize_ccorr_1,_normalize_ccorr_2,_normalize_ccorr_2b,_normalize_ccorr_3,_normalize_ccorr_3b
from cddm._core_nb import weighted_sum, weight_from_g, weight_from_d, sigma_weighted, sigma_prime_weighted,\
weight_prime_from_g, weight_prime_from_d
import cddm._core_nb as _nb
import cddm.avg as _avg
#NORM_BASELINE = 0
#"""baseline normalization flag"""
NORM_NONE = 0
"""none normalization flag"""
NORM_BASELINE = 0
# NORM_COMPENSATED = 1
# """compensated normalization (cross-diff) flag"""
# NORM_SUBTRACTED = 2
# """background subtraction normalization flag"""
# NORM_WEIGHTED = 4
# """weighted normalization flag"""
NORM_STANDARD = 1
"""standard normalization flag"""
NORM_STRUCTURED= 2
"""structured normalization flag"""
NORM_WEIGHTED = NORM_STANDARD | NORM_STRUCTURED
"""weighted normalization flag"""
NORM_SUBTRACTED = 4
"""background subtraction flag"""
NORM_COMPENSATED =8 # NORM_SUBTRACTED | 8
"""compensated normalization flag"""
import sys
_thismodule = sys.modules[__name__]
def norm_from_string(value):
"""Converts norm string to flags integer.
Parameters
----------
value : str
A string or combination of strings that describe the normalization.
Any of normalization modes 'baseline', 'subtracted', or 'compensated',
You can mix these with calculation modes 'structured', 'standard', or
'weighted' by joining the normalization mode with the calculation
mode e.g. "subtracted|structured"
Returns
-------
norm : int
Normalization flags.
Examples
--------
>>> norm_from_string("structured")
2
"""
values = value.split("|")
norm = 0
for value in values:
name = "NORM_" + value.strip().upper()
try:
norm = norm | getattr(_thismodule,name)
except AttributeError:
raise ValueError("Invalid norm string '{}'".format(value))
if norm % 4 == 0:
norm = norm | NORM_STANDARD
return norm
def norm_flags(structured = False, subtracted = False, weighted = False, compensated = False):
"""Return normalization flags from the parameters.
Parameters
----------
structured : bool
Whether to set the STRUCTURED normalization flag.
subtracted : bool
Whether to set SUBTRACTED normalization flag.
weighted : bool
Whether to set WEIGHTED normalization flags.
compensated : bool
Whether to set COMPENSATED normalization flag.
Returns
-------
norm : int
Normalization flags.
Examples
--------
>>> norm_flags(structured = True)
2
>>> norm_flags(compensated = True)
9
"""
standard = True if structured == False and weighted == False else False
norm = NORM_NONE
if standard == True :
norm = norm | NORM_STANDARD
if structured == True:
norm = norm | NORM_STRUCTURED
if compensated == True:
norm = norm | NORM_COMPENSATED
if subtracted == True:
norm = norm | NORM_SUBTRACTED
if weighted == True:
norm = norm | NORM_WEIGHTED
return norm
def scale_factor(variance, mask = None):
"""Computes the normalization scaling factor from the variance data.
You can divide the computed correlation data with this factor to normalize
data between (0,1) for correlation mode, or (0,2) for difference mode.
Parameters
----------
variance : (ndarray, ndarray) or ndarray
A variance data (as returned from :func:`.stats`)
mask : ndarray
A boolean mask array, if computation was performed on masked data,
this applys mask to the variance data.
Returns
-------
scale : ndarray
A scaling factor for normalization
"""
if variance is None:
raise ValueError("You must provide variance data for normalization")
try:
v1, v2 = variance
scale = (np.asarray(v1) + np.asarray(v2))/2
except:
scale = np.asarray(variance)
if mask is None:
return scale
else:
return scale[mask]
def noise_level(window, intensity):
"""Computes the camera noise level spectrum in FFT space.
This can be used to build noise and delta parameters for data error
estimations.
Parameters
----------
window : ndarray
Window function used in the analysis. Set this to np.ones, if no window
is used. This is used to determine frame size
intensity : float
Mean image intensity.
Returns
-------
noise : float
Expected image noise level.
Examples
--------
>>> window = np.ones(shape = (512,512))
>>> noise_level(window, intensity = 200)
52428800.0
If you multiplied the image with a constant, e.g. 10 do
>>> noise_level(window*10, intensity = 200)
5242880000.0
Noise level is 100 times larger in this case and is not the same as
>>> noise_level(window, intensity = 2000)
524288000.0
"""
return (np.abs(window)**2).sum() * intensity
def noise_mean(corr, scale_factor = 1., mode = "corr"):
"""Computes the scalled mean noise from the correlation data estimator.
This is the delta parameter for weighted normalization.
Parameters
----------
corr: (ndarray,)
Correlation function (or difference function) model.
scale_factor : ndarray
Scaling factor as returned by :func:`.core.scale_factor`. If not provided,
corr data must be computed with scale = True option.
Returns
-------
delta : ndarray
Scalled delta value.
"""
scale_factor = np.asarray(scale_factor)
g = np.divide(corr,scale_factor[...,None])
if mode == "corr":
noise = np.clip(1 - g[...,0],0,1)
elif mode == "diff":
noise = np.clip(g[...,0]/2,0,1)
else:
raise ValueError("Wrong mode.")
return noise
def noise_delta(variance, mask = None, scale = True):
"""Computes the scalled noise difference from the variance data.
This is the delta parameter for weighted normalization.
Parameters
----------
variance : (ndarray, ndarray)
A variance data (as returned from :func:`.stats`)
mask : ndarray
A boolean mask array, if computation was performed on masked data,
this applys mask to the variance data.
Returns
-------
delta : ndarray
Scalled delta value.
"""
try:
v1, v2 = variance
delta = (np.asarray(v2) - np.asarray(v1))
scale = (np.asarray(v1) + np.asarray(v2)) if scale == True else 2.
delta /= scale
except:
#single data has delta = 0.
scale = np.asarray(variance)
delta = np.zeros_like(scale)
if mask is None:
return delta
else:
return delta[mask]
def weight_from_data(corr, delta = 0., scale_factor = 1., mode = "corr", pre_filter = True):
"""Computes weighting function for weighted normalization.
Parameters
----------
corr : ndarray
Correlation (or difference) data
scale_factor : ndarray
Scaling factor as returned by :func:`.core.scale_factor`. If not provided,
corr data must be computed with scale = True option.
mode : str
Representation mode of the data, either 'corr' (default) or 'diff'
pre_filter : bool
Whether to perform denoising and filtering. If set to False, user has
to perform data filtering.
Returns
-------
out : ndarray
Weight data for weighted sum calculation.
"""
scale_factor = np.asarray(scale_factor, FDTYPE)
delta = np.asarray(delta, FDTYPE)
if mode == "corr":
#make sure it is decreasing and clipped between 0 and 1
if pre_filter == True:
corr = _avg.denoise(corr)
corr = _avg.decreasing(corr)
corr = np.clip(corr,0.,scale_factor[...,None])
corr = _avg.denoise(corr)
g = np.divide(corr,scale_factor[...,None])
return weight_from_g(g, delta[...,None])
elif mode == "diff":
if pre_filter == True:
corr = _avg.denoise(corr)
corr = _avg.increasing(corr)
corr = np.clip(corr,0.,scale_factor[...,None]*2)
corr = _avg.denoise(corr)
d = np.divide(corr,scale_factor[...,None])
return weight_from_d(d, delta[...,None])
else:
raise ValueError("Wrong mode.")
def weight_prime_from_data(corr, bg1, bg2, delta = 0., scale_factor = 1., mode = "corr", pre_filter = True):
"""Computes weighting function for weighted normalization.
Parameters
----------
corr : ndarray
Correlation (or difference) data
scale_factor : ndarray
Scaling factor as returned by :func:`.core.scale_factor`. If not provided,
corr data must be computed with scale = True option.
mode : str
Representation mode of the data, either 'corr' (default) or 'diff'
pre_filter : bool
Whether to perform denoising and filtering. If set to False, user has
to perform data filtering.
Returns
-------
out : ndarray
Weight data for weighted sum calculation.
"""
scale_factor = np.asarray(scale_factor, FDTYPE)
delta = np.asarray(delta)
bg1, bg2 = np.asarray(bg1), np.asarray(bg2)
if mode == "corr":
#make sure it is decreasing and clipped between 0 and 1
if pre_filter == True:
corr = _avg.denoise(corr)
corr = _avg.decreasing(corr)
corr = np.clip(corr,0.,scale_factor[...,None])
corr = _avg.denoise(corr)
g = np.divide(corr,scale_factor[...,None])
return weight_prime_from_g(g, delta[...,None], bg1[...,None], bg2[...,None])
elif mode == "diff":
if pre_filter == True:
corr = _avg.denoise(corr)
corr = _avg.increasing(corr)
corr = np.clip(corr,0.,scale_factor[...,None]*2)
corr = _avg.denoise(corr)
d = np.divide(corr,scale_factor[...,None])
return weight_prime_from_d(d, delta[...,None], bg1[...,None], bg2[...,None])
else:
raise ValueError("Wrong mode.")
def _norm_from_ccorr_data(data, norm = None):
"""Determines normalization type from ccorr data"""
try:
d, c, sq, s1, s2 = data
except ValueError:
raise ValueError("Not a valid correlation data")
if s1 is None:
if sq is None:
default_norm = NORM_STANDARD
available_norm = NORM_STANDARD
else:
default_norm = NORM_WEIGHTED
available_norm = NORM_WEIGHTED
else:
if sq is None:
default_norm = NORM_STANDARD | NORM_SUBTRACTED
available_norm = NORM_STANDARD | NORM_COMPENSATED | NORM_SUBTRACTED
else:
default_norm = NORM_WEIGHTED | NORM_SUBTRACTED
available_norm = NORM_WEIGHTED | NORM_COMPENSATED | NORM_SUBTRACTED
if norm is None:
return default_norm
else:
if norm | |
<reponame>ciex/souma
import logging
import json
from dateutil.parser import parse as dateutil_parse
from uuid import uuid4
from nucleus import create_session, notification_signals, PersonaNotFoundError, UnauthorizedError, VesicleStateError, CHANGE_TYPES
from nucleus.models import Persona, Star, Planet, Starmap, Group, Oneup
from nucleus.vesicle import Vesicle
from synapse.electrical import ElectricalSynapse
from web_ui import app
# These are Vesicle options which are recognized by this Synapse
ALLOWED_MESSAGE_TYPES = [
"object",
"object_request",
]
OBJECT_TYPES = ("Star", "Planet", "Persona", "Starmap", "Group", "Oneup")
class Synapse():
"""
A Synapse reacts to local changes in the database and transmits
them to each Persona's peers using the Myelin API. It also keeps
Glia up to date on all Persona's managed by this Souma.
"""
_instance = None
def __new__(cls, *args, **kwargs):
"""Singleton pattern"""
if not cls._instance:
cls._instance = super(ElectricalSynapse, cls).__new__(cls, *args, **kwargs)
return cls._instance
def __init__(self):
self.logger = logging.getLogger('synapse')
self.logger.setLevel(app.config['LOG_LEVEL'])
# Core setup
self.starmap = Starmap.query.get(app.config['SOUMA_ID'])
# Connect to glia
self.electrical = ElectricalSynapse(parent=self)
# Connect to nucleus
self._connect_signals()
def _connect_signals(self):
"""
Connect to Blinker signals which are registered in nucleus.__init__
"""
signal = notification_signals.signal
signal('local-model-changed').connect(self.on_local_model_change)
signal('new-contact').connect(self.on_new_contact)
signal('request-objects').connect(self.on_request_objects)
def _distribute_vesicle(self, vesicle, recipients=None):
"""
Encrypt, sign and distribute vesicle to `recipients` using Myelin
Args:
vesicle (Vesicle): The message to transmit.
recipients (List): List of Persona objects. If recipients is not
empty, the Vesicle is encrypted and the key is transmitted for
this list of Personas.
Returns:
Vesicle: The (signed and encrypted) Vesicle object
"""
self.logger.debug("Distributing {} to {} recipients {}".format(
vesicle,
len(recipients) if recipients is not None else "0",
"via Myelin" if app.config["ENABLE_MYELIN"] else ""))
if not hasattr(vesicle, "author"):
raise ValueError("Can't send Vesicle without defined author")
if vesicle.encrypted():
keycrypt = json.loads(vesicle.keycrypt)
# First remove everyone from keycrypt that is not a current recipient
keycrypt = json.loads(vesicle.keycrypt)
remove_recipients = set(keycrypt.keys()) - set([r.id for r in recipients])
for recipient_id in remove_recipients:
if recipient_id != vesicle.author_id: # Don't remove author!
del keycrypt[recipient_id]
vesicle.keycrypt = json.dumps(keycrypt)
# Then add the new recipients
vesicle.add_recipients(recipients)
self.logger.debug("{} was already encrypted: Modified keycrypt.".format(vesicle))
else:
vesicle.encrypt(recipients=recipients)
try:
vesicle.sign()
except VesicleStateError:
self.logger.info("{} was already signed".format(vesicle))
if app.config["ENABLE_MYELIN"]:
self.electrical.myelin_store(vesicle)
return vesicle
def _find_source(self, obj):
"""Return a list of possible sources for object.
A Persona qualifies as source if they have obj in their starmaps,
have a controlled Persona as a contact.
Args:
obj (Star, Planet or Starmap): Object to find a source for
Returns:
list: Possible sources
"""
# Return True if at least one of controlled personas is a contact of p
connected_to = lambda p: len(p.contacts.filter(Persona.crypt_private != "")) > 0
sources = list()
if isinstance(obj, Star):
if connected_to(obj.author):
sources.append(obj.author)
for starmap in obj.starmaps:
if connected_to(starmap.author):
sources.append(starmap.author)
elif isinstance(obj, Planet):
for star in obj.stars:
if connected_to(star.author):
sources.append(star.author)
for starmap in star.starmaps:
if connected_to(starmap.author):
sources.append(starmap.author)
elif isinstance(obj, Starmap):
if connected_to(obj.author):
sources.append(obj.author)
# TODO: Sort by last seen
return sources
def _log_errors(self, msg, errors, level="error"):
"""
Log a list of errors to the logger
Args:
msg(str): A message describing the error source
errors(list): A list of error messages
Raises:
ValueError: If the specified log level is invalid
"""
if level not in ["debug", "info", "warning", "error"]:
raise ValueError("Invalid log level {}".format(level))
call = getattr(self.logger, level)
call("{msg}:\n{list}".format(msg=msg, list="\n* ".join(str(e) for e in errors)))
def handle_object(self, vesicle, reader_persona, session):
"""
Handle received object updates by verifying the request and calling
an appropriate handler
Args:
vesicle (Vesicle): Vesicle containing the object changeset
reader_persona (Persona): Persona used for decrypting the Vesicle
"""
# Validate response
errors = list()
try:
action = vesicle.data["action"]
object_type = vesicle.data["object_type"]
obj = vesicle.data["object"]
author = vesicle.author
except KeyError, e:
errors.append("Missing key: {}".format(e))
if object_type not in OBJECT_TYPES:
errors.append("Unknown object type: {}".format(object_type))
if action not in CHANGE_TYPES:
errors.append("Unknown action type '{}'".format(action))
if errors:
self.logger.error("Malformed object received\n{}".format("\n".join(errors)))
else:
handler = getattr(self, "object_{}".format(action))
new_obj = handler(author, reader_persona, object_type, obj, session)
if new_obj is not None:
vesicle.handled = True
new_obj.vesicles.append(vesicle)
session.add(new_obj)
session.add(vesicle)
session.commit()
def handle_object_request(self, vesicle, reader_persona, session):
"""
Act on received object requests by sending the object in question back
Args:
vesicle (Vesicle): Vesicle containing metadata about the object
"""
# Validate vesicle
errors = []
object_id = None
object_type = None
try:
object_id = vesicle.data["object_id"]
object_type = vesicle.data["object_type"]
recipient = vesicle.author
except KeyError, e:
errors.append("missing ({})".format(vesicle, e))
if object_type not in OBJECT_TYPES:
errors.append("invalid object_type: {}".format(object_type))
if errors:
self._log_errors("Received invalid object request", errors)
else:
# Load object
obj_class = globals()[object_type]
obj = obj_class.query.get(object_id)
if obj is None:
self.logger.error("Requested object <{type} {id}> not found".format(
type=object_type, id=object_id[:6]))
elif hasattr(obj, "author") and recipient not in obj.author.contacts:
self.logger.info("Requested {} not published for request author {}".format(obj, recipient))
elif isinstance(obj, Persona) and recipient not in obj.contacts:
self.logger.info("Requested {} does not have requesting {} as a contact.".format(obj, recipient))
else:
for v in obj.vesicles:
# Send response
# Modified vesicles (re-encrypted) don't get saved to DB
self._distribute_vesicle(v, recipients=[recipient, ])
self.logger.info("Sent {}'s {} vesicles to {}".format(
obj, len(obj.vesicles), recipient
))
vesicle.handled = True
session.add(vesicle)
session.commit()
def handle_vesicle(self, data):
"""
Parse received vesicles and call handler
Args:
data (String): JSON encoded Vesicle
Returns:
Vesicle: The Vesicle that was decrypted and loaded
None: If no Vesicle could be loaded
"""
try:
vesicle = Vesicle.read(data)
except PersonaNotFoundError, e:
self.logger.info("Received Vesicle from unknown Persona, trying to retrieve Persona info.")
resp, errors = self.electrical.persona_info(e[0])
if errors:
self.logger.warning("Could not retrieve unknown Persona from server:\n{}".format(", ".join(errors)))
return
else:
vesicle = Vesicle.read(data)
if vesicle is None:
self.logger.error("Failed handling Vesicle due to decoding error")
else:
old_vesicle = Vesicle.query.get(vesicle.id)
if old_vesicle is not None:
vesicle = old_vesicle
# Vesicle is loaded and has not yet been handled, start processing..
session = create_session()
# Decrypt if neccessary
import keyczar
if not vesicle.decrypted():
try:
reader_persona = vesicle.decrypt()
except UnauthorizedError:
self.logger.info("Not authorized to decrypt {}".format(vesicle))
except keyczar.errors.InvalidSignatureError:
self.logger.warning("Failed decrypting {}: id={} h={}".format(vesicle, vesicle.id, vesicle._get_hashcode()))
return vesicle
if old_vesicle is None:
session.add(vesicle)
session.commit()
if not vesicle.decrypted():
self.logger.debug("{} has encrypted payload.".format(vesicle))
else:
self.logger.info("{} has payload:\n{}".format(vesicle, json.dumps(vesicle.data, indent=2)))
# Call handler depending on message type
try:
if vesicle is not None and not vesicle.handled and vesicle.message_type in ALLOWED_MESSAGE_TYPES:
handler = getattr(self, "handle_{}".format(vesicle.message_type))
try:
handler(vesicle, reader_persona, session)
except UnauthorizedError, e:
self.logger.error("Error handling {}: {}".format(vesicle, e))
except:
session.rollback()
raise
finally:
session.flush()
return vesicle
def object_insert(self, author, recipient, object_type, obj, session):
# Handle answer
obj_class = globals()[object_type]
missing_keys = obj_class.validate_changeset(obj)
if len(missing_keys) > 0:
raise KeyError("Missing '{}' for creating {} from changeset".format(
", ".join(missing_keys), obj_class.__name__))
if hasattr(obj, "author_id") and author.id != obj["author_id"]:
raise UnauthorizedError(
"Received object_insert Vesicle author {} does not match object author [{}]".format(
author, obj["author_id"][:6]))
o = obj_class.query.get(obj["id"])
if o is None or (hasattr(o, "get_state") and o.get_state() == -1) or (isinstance(o, Persona) and o._stub is True):
o = obj_class.create_from_changeset(obj, stub=o, update_sender=author, update_recipient=recipient)
session.add(o)
if isinstance(o, Persona):
o.stub = False
else:
o.set_state(0)
self.logger.info("Inserted new {}".format(o))
return o
def object_update(self, author, recipient, object_type, obj, session):
# Verify message
obj_class = globals()[object_type]
missing_keys = obj_class.validate_changeset(obj, update=True)
if len(missing_keys) > 0:
raise KeyError("Missing '{}' for updating {} from changeset".format(obj_class.__name__))
obj_modified = dateutil_parse(obj["modified"])
# Retrieve local object copy
o = obj_class.query.get(obj["id"])
if o is None:
self.logger.warning("Received update for unknown <{} [{}]>".format(object_type, obj["id"][:6]))
o = obj_class(id=obj["id"])
o.set_state(-1)
session.add(o)
self.request_object(
object_type=object_type,
object_id=obj["id"],
author=recipient,
recipient=author,
session=session)
else:
if o.authorize("update", author.id):
if o.modified <= obj_modified or (hasattr(o, "_stub") and o._stub is True):
o.update_from_changeset(obj, update_sender=author, update_recipient=recipient)
if isinstance(o, Persona):
o.stub = False
# else:
# o.set_state(0)
session.add(o)
self.logger.info("Applied update for {}".format(o))
else:
self.logger.info("Received obsolete update ({})".format(obj))
else:
self.logger.warning("{} is not authorized to update {} - update canceled.".format(author, o))
return o
def object_delete(self, author, recipient, object_type, obj, session):
# Verify message
obj_class = globals()[object_type]
for k in ["id", "modified"]:
if k not in obj.keys():
raise KeyError("Missing '{}' for deleting {} from changeset".format(k, obj_class.__name__))
# Get the object's class from globals
o = obj_class.query.get(obj["id"])
if o is None:
self.logger.info("Request to delete unknown <{} [{}]>".format(object_type, obj["id"]))
else:
if o.authorize("delete", author.id):
if hasattr(o, "set_state"):
o.set_state(-2)
o.text = None
session.add(o)
self.logger.info("{} marked deleted".format(o))
else:
name = str(o)
session.delete(o)
o = None
self.logger.info("Permanently deleted {}".format(name))
else:
self.logger.warning("Object deletion not authorized!")
return o
def on_new_contact(self, sender, message):
for k in["new_contact_id", "author_id"]:
if k not in message:
raise KeyError("Missing message | |
<filename>Machine_Learning/K_Fold_Cross_Validation.py
#!/usr/bin/env python
# coding: utf-8
# # Tarea #3
# # Estudiante: <NAME>
# ## Importando bibliotecas
# In[1]:
import os
import pandas as pd
import numpy as np
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
from sklearn.model_selection import LeaveOneOut
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
from sklearn.neighbors import KNeighborsClassifier
# ## Pregunta 1:
# #### En esta pregunta utiliza los datos (tumores.csv). Se trata de un conjunto de datos de caracteristicas del tumor cerebral que incluye cinco variables de primer orden y ocho de textura y cuatro par´ametros de evaluacion de la calidad con el nivel objetivo. La variables son: Media, Varianza, Desviacion estandar, Asimetria, Kurtosis, Contraste, Energia, ASM (segundo momento angular), Entropia, Homogeneidad, Disimilitud, Correlacion, Grosor, PSNR (Pico de la relacion senal-ruido), SSIM (Indice de Similitud Estructurada), MSE (Mean Square Error), DC (Coeficiente de Dados) y la variable a predecir tipo (1 = Tumor, 0 = No-Tumor).
# #### 1. Cargue la tabla de datos tumores.csv en Python
# In[2]:
tumores = pd.read_csv("tumores.csv", delimiter = ',', decimal = '.')
tumores.head()
# In[3]:
tumores.info()
# In[4]:
# Convierte las variables de object a categórica
tumores['imagen'] = tumores['imagen'].astype('category')
print(tumores.info())
print(tumores.head())
# Recodifica las categorías usando números
tumores["imagen"] = tumores["imagen"].cat.codes
print(tumores.info())
print(tumores.head())
# Convierte las variables de entero a categórica
tumores['imagen'] = tumores['imagen'].astype('category')
print(tumores.info())
print(tumores.head())
# In[5]:
tumores.tail() #variable categorica ha sido convertida a numero
# In[10]:
# Normalizando y centrando la tabla ya que hay valores en diferentes escalas y al ser un metodo basado en distancias es preferible
# centrar y reducir
tumores_1 = tumores.iloc[:,0:17]
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaled_values = scaler.fit_transform(tumores_1)
tumores_1.loc[:,:] = scaled_values
tumores_1.head()
# #### Elimina la variable catégorica, deja las variables predictoras en X
# In[11]:
X = tumores_1.iloc[:,0:17]
X.head()
# #### Deja la variable a predecir en y
# In[12]:
y = tumores.iloc[:,17:18]
y.head()
# #### 2. El objetivo de este ejercicio es analizar la variacion del error (usando el enfoque trainingtesting) para la prediccion de variable tipo (que indica 1 = Tumor, 0 = No-Tumor), para esto repita 5 veces el calculo de error global de prediccion usando el metodo de los k vecinos mas cercanos (use kmax=50) y con un 75 % de los datos para tabla aprendizaje y un 25 % para la tabla testing. Grafique los resultados.
# #### Enfoque Training-Testing para medir el error
# In[16]:
error_tt = []
for i in range(0, 4):
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size = 0.75)
knn = KNeighborsClassifier(n_neighbors = 50)
knn.fit(X_train, y_train.values.ravel())
error_tt.append(1 - knn.score(X_test, y_test))
plt.figure(figsize=(15,10))
plt.plot(error_tt, 'o-', lw = 2)
plt.xlabel("Número de Iteración", fontsize = 15)
plt.ylabel("Error Cometido %", fontsize = 15)
plt.title("Variación del Error", fontsize = 20)
plt.grid(True)
plt.legend(['Training Testing'], loc = 'upper right', fontsize = 15)
# #### 3. El objetivo de este ejercicio es medir el error para la prediccion de variable tipo, utilizando validacion cruzada con K grupos (K−fold cross-validation). Para esto usando el metodo de los k vecinos mas cercanos (use kmax=50) realice una validacion cruzada 5 veces con 10 grupos (folds) y grafique el error obtenido en cada iteracion, agregue en este grafico los 5 errores generados en el ejercicio anterior.
# #### Enfoque: Validación Cruzada usando K grupos (K-Fold Cross-Validation, CV)
# In[17]:
error_cv = []
for i in range(0, 4):
kfold = KFold(n_splits = 10, shuffle = True)
error_folds = []
for train, test in kfold.split(X, y):
knn = KNeighborsClassifier(n_neighbors = 50)
knn.fit(X.iloc[train], y.iloc[train].values.ravel())
error_folds.append((1 - knn.score(X.iloc[test], y.iloc[test])))
error_cv.append(np.mean(error_folds))
plt.figure(figsize=(15,10))
plt.plot(error_tt, 'o-', lw = 2)
plt.plot(error_cv, 'o-', lw = 2)
plt.xlabel("Número de Iteración", fontsize = 15)
plt.ylabel("Error Cometido", fontsize = 15)
plt.title("Variación del Error", fontsize = 20)
plt.grid(True)
plt.legend(['Training Testing', 'K-Fold CV'], loc = 'upper right', fontsize = 15)
# #### 4. ¿Que se puede concluir?
# #### Conclusion:
#
# * Haciendo la graficacion respectiva usando el primer enfoque de training - testing se puede ver que el error que genera brinca mucho, por lo cual no es un metodo para medir el error fiable, ya que no es constante y va en el rango de 5.65% hasta casi 9.5% con los 5 repeticiones (calculo del error) que se le hizo. Con el K-Fold Cross Validation se ve como el error es constante y esta apenas mas arriba de 7.5% y para las 5 repeticiones que se hizo en el calculo, el error se mantiene constante, creando asi confianza del error obtenido con la ultima forma de validacion.
# ## Pregunta 2:
# #### Para esta pregunta tambien usaremos los datos tumores.csv.
# #### 1. El objetivo de este ejercicio es calibrar el metodo de RandomForestClassifier para esta Tabla de Datos. Aqui interesa predecir en la variable tipo. Para esto genere Validaciones Cruzadas con 10 grupos calibrando el modelo de acuerdo con los dos tipos de criterios que este permite para medir la calidad de cada division en los arboles, es decir, con gini y entropy. Para esto utilice KFold de sklearn y realice un grafico comparativo de barras.
# In[21]:
cadena = "=== Importando datos de nuevo ya que este metodo se basa en arboles y es mejor no centrar y reducir la tabla ==="
print(cadena.center(140, " "))
# In[28]:
tumores = pd.read_csv("tumores.csv", delimiter = ',', decimal = '.')
# Convierte las variables de object a categórica
tumores['imagen'] = tumores['imagen'].astype('category')
# Recodifica las categorías usando números
tumores["imagen"] = tumores["imagen"].cat.codes
# Convierte las variables de entero a categórica
tumores['imagen'] = tumores['imagen'].astype('category')
tumores.tail() #variable categorica ha sido convertida a numero
# #### Elimina la variable catégorica, deja las variables predictoras en X
# In[29]:
X = tumores.iloc[:,0:17]
# #### Dejar la variable a predecir en y
# In[30]:
y = tumores.iloc[:,17:18]
# #### Random Forest
# #### ------> Primero con el criterio "Gini"
# In[46]:
from sklearn.ensemble import RandomForestClassifier
instancia_kfold = KFold(n_splits=10)
porcentajes = cross_val_score(RandomForestClassifier(n_estimators=10, criterion = 'gini'), X, y.iloc[:,0].values, cv=instancia_kfold)
print("Porcentaje de detección por grupo:\n{}".format(porcentajes))
res_bosques_gini = porcentajes.mean()
print("Promedio de detección: {:.2f}".format(porcentajes.mean()))
# #### -------> Segundo con el criterio "Entropy"
# In[47]:
from sklearn.ensemble import RandomForestClassifier
instancia_kfold = KFold(n_splits=10)
porcentajes = cross_val_score(RandomForestClassifier(n_estimators=10, criterion = 'entropy'), X, y.iloc[:,0].values, cv=instancia_kfold)
print("Porcentaje de detección por grupo:\n{}".format(porcentajes))
res_bosques_entropy = porcentajes.mean()
print("Promedio de detección: {:.2f}".format(porcentajes.mean()))
# #### Grafico Comparativo
# In[48]:
plt.figure(figsize=(8,5))
alto = [res_bosques_gini, res_bosques_entropy]
barras = ('Gini','Entropia')
y_pos = np.arange(len(barras))
plt.bar(y_pos, alto, color=['purple', 'orange'])
plt.xticks(y_pos, barras)
plt.show()
# #### 2. ¿Cual algoritmo usaria con base en la informacion obtenida en los dos ejercicios anteriores?
# #### Analisis
#
# * Con base en la informacion obtenida se puede usar cualesquiera de los dos algoritmos, ya que "entropy" asi como "gini", dan promedios de deteccion de 99% cada uno, lo que los hace iguales para este dataset en especifico.
# ## Pregunta 3:
# #### Para esta pregunta tambien usaremos los datos tumores.csv.
# #### 1. El objetivo de este ejercicio es calibrar el metodo de KNeighborsClassifier para esta Tabla de Datos. Aqui interesa predecir en la variable tipo. Para esto genere Validaciones Cruzadas con 5 grupos calibrando el modelo de acuerdo con todos los tipos de algoritmos que permite auto, ball tree, kd tree y brute en el parametro algorithm. Realice un grafico de barras comparativo. ¿Se puede determinar con claridad cual algoritmo es el mejor? Utilice KFold de sklearn?
# In[49]:
cadena = "=== Centrando y reduciendo la tabla ya que este metodo se basa en distancias ==="
print(cadena.center(140, " "))
# In[50]:
# Normalizando y centrando la tabla ya que hay valores en diferentes escalas y al ser un metodo basado en distancias es preferible
# centrar y reducir
tumores_1 = tumores.iloc[:,0:17]
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaled_values = scaler.fit_transform(tumores_1)
tumores_1.loc[:,:] = scaled_values
tumores_1.head()
# #### Definicion de X y y
# In[51]:
X = tumores_1.iloc[:,0:17]
y = tumores.iloc[:,17:18]
# #### Metodo KNN
# #### -----> Primero con el algoritmo "auto"
# In[52]:
from sklearn.neighbors import KNeighborsClassifier
instancia_kfold = KFold(n_splits=5)
porcentajes = cross_val_score(KNeighborsClassifier(n_neighbors=80, algorithm = 'auto'), X, y.iloc[:,0].values, cv=instancia_kfold)
print("Porcentaje de detección por grupo:\n{}".format(porcentajes))
res_knn_auto = porcentajes.mean()
print("Promedio de detección: {:.2f}".format(porcentajes.mean()))
# #### -----> Segundo con el algoritmo "ball_tree"
# In[53]:
from sklearn.neighbors import KNeighborsClassifier
instancia_kfold = KFold(n_splits=5)
porcentajes = cross_val_score(KNeighborsClassifier(n_neighbors=80, algorithm = 'ball_tree'), X, y.iloc[:,0].values, cv=instancia_kfold)
print("Porcentaje de detección por grupo:\n{}".format(porcentajes))
res_knn_ball = porcentajes.mean()
print("Promedio de detección: {:.2f}".format(porcentajes.mean()))
# #### -----> Tercero con el algoritmo "kd_tree"
# In[54]:
from sklearn.neighbors import KNeighborsClassifier
instancia_kfold = KFold(n_splits=5)
porcentajes = cross_val_score(KNeighborsClassifier(n_neighbors=80, algorithm = 'kd_tree'), X, y.iloc[:,0].values, cv=instancia_kfold)
print("Porcentaje de detección por grupo:\n{}".format(porcentajes))
res_knn_kd = porcentajes.mean()
print("Promedio de detección: {:.2f}".format(porcentajes.mean()))
# #### -----> Cuarto con el algoritmo "Brute"
# In[55]:
from sklearn.neighbors import KNeighborsClassifier
instancia_kfold = KFold(n_splits=5)
porcentajes = cross_val_score(KNeighborsClassifier(n_neighbors=80, algorithm = 'brute'), X, y.iloc[:,0].values, cv=instancia_kfold)
print("Porcentaje de detección por grupo:\n{}".format(porcentajes))
res_knn_brute= porcentajes.mean()
print("Promedio de detección: {:.2f}".format(porcentajes.mean()))
# #### Grafico Comparativo
# In[58]:
plt.figure(figsize=(8,5))
alto = [res_knn_auto,res_knn_ball, res_knn_kd, | |
'''
def run_process_stdinput(args_lst, byte_obj):
try:
if args.verbosity > 0:
print("Trying to execute the following command with input from " +
"STDIN:")
print(" ".join(args_lst))
result = subprocess.run(args_lst, input=byte_obj,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if args.verbosity > 0:
print("Suceeded in executing command.")
if(result.returncode == 0):
return(result)
else:
frameinfo = getframeinfo(currentframe())
print('Error in file ' + frameinfo.filename + ' at line ' +
str(frameinfo.lineno) + ': '
+ "run_process_stdinput: return code of subprocess was "
+ str(result.returncode))
quit(1)
except subprocess.CalledProcessError as prcsexc:
frameinfo = getframeinfo(currentframe())
print('Error in file ' + frameinfo.filename + ' at line ' +
str(frameinfo.lineno) + ': ' +
"Failed executing: ", " ".join(prcsexc.args))
print("Error code: ", prcsexc.returncode, prcsexc.output)
quit(1)
''' Function that converts a bam file to a bedgraph '''
def bam_to_bedgraph(bams):
# currently only unstranded libraries
# sort bams
i = 0
for bam_file in bams:
bam_sorted_file = tmp_dir + 'rnaseq_' + str(i) + '_s.bam'
subprcs_args = [samtools, "sort", '-@',
str(args.cores), bam_file, "-o", bam_sorted_file]
run_simple_process(subprcs_args)
i += 1
# merge bams
merged_file = tmp_dir + 'rnaseq_merged.bam'
subprcs_args = [samtools, "merge", merged_file]
for j in range(0, i):
subprcs_args.append(tmp_dir + 'rnaseq_' + str(j) + '_s.bam')
run_simple_process(subprcs_args)
# generate bedgraph file
subprcs_args = [java, '-jar', jar, 'CLI', 'ERE', 'm=' +
merged_file, 'u=true', 'c=true', 'outdir='+tmp_dir]
run_simple_process(subprcs_args)
return tmp_dir + 'coverage.bedgraph', tmp_dir + 'introns.gff'
''' Function that finds complete genes in a gtf file
(they have both start_codon and stop_codon) '''
def gtf_filter_complete(gtf_file):
has_start = {}
has_stop = {}
try:
with open(gtf_file, "r") as gtf_handle:
for line in gtf_handle:
if re.search(r'transcript_id "([^"]+)";', line):
txid = re.search(
r'transcript_id "([^"]+)";', line).group(1)
if re.search(r'\tstart_codon\t', line):
has_start[txid] = 1
elif re.search(r'\tstop_codon\t', line):
has_stop[txid] = 1
except IOError:
frameinfo = getframeinfo(currentframe())
print('Error in file ' + frameinfo.filename + ' at line ' +
str(frameinfo.lineno) + ': ' + "Failed to open file " +
gtf_file + " for reading!")
quit(1)
has_both = {} # txids
for key in has_start.keys():
if key in has_stop:
has_both[key] = 1
has_both_gids = {}
for key in has_both.keys():
gid = re.sub(r'([^.]+)\.[^.]+', r'\1', key)
has_both_gids[gid] = 1
filtered_file = tmp_dir + 'complete.gtf'
try:
with open(filtered_file, "w") as compl_handle:
try:
with open(gtf_file, "r") as gtf_handle:
for line in gtf_handle:
if re.search(r'\tgene\t', line):
gid = re.search(r'\t([^\t]+)\n', line).group(1)
if gid in has_both_gids:
compl_handle.write(line)
elif re.search(r'\ttranscript\t', line):
if re.search('\ttranscript_id \"[^"]+\"', line):
txid = re.search('\ttranscript_id \"([^"]+)\"', line).group(1)
else:
txid = re.search(r'\t([^\t]+)\n', line).group(1)
if txid in has_both:
compl_handle.write(line)
elif re.search(r'transcript_id "[^"]+";', line):
txid = re.search(
r'transcript_id "([^"]+)";', line).group(1)
if txid in has_both:
compl_handle.write(line)
except IOError:
frameinfo = getframeinfo(currentframe())
print('Error in file ' + frameinfo.filename + ' at line ' +
str(frameinfo.lineno) + ': ' + "Failed to open file " +
gtf_file + " for reading!")
quit(1)
except IOError:
frameinfo = getframeinfo(currentframe())
print('Error in file ' + frameinfo.filename + ' at line ' +
str(frameinfo.lineno) + ': ' + "Failed to open file " +
compl_file + " for writing!")
quit(1)
return filtered_file
''' Function that converts AUGUSTUS gtf format to gff3 format '''
def gtf2gff3(gtf_file):
try:
with open(gtf_file, "r") as gtf_handle:
gtf_data = gtf_handle.read()
except IOError:
frameinfo = getframeinfo(currentframe())
print('Error in file ' + frameinfo.filename + ' at line ' +
str(frameinfo.lineno) + ': ' + "Failed to open file " +
gtf_file + " for reading!")
quit(1)
gff3_file = tmp_dir + 'complete.gff3'
subprcs_args = [gtf2gff, '--out=' + gff3_file, '--gff3']
run_process_stdinput(subprcs_args, gtf_data.encode('utf-8'))
print("Done")
return(gff3_file)
''' Function that makes the AUGUSTUS gff3 compatible with what GeMoMa
expects from gff3 format '''
def augustus_gff3_to_gemoma_gff3(aug_gff3_file):
gemoma_like_gff3_file = tmp_dir + 'complete_gemoma_like.gff3'
try:
with open(aug_gff3_file, 'r') as aug_handle:
try:
with open(gemoma_like_gff3_file, 'w') as gemoma_handle:
for line in aug_handle:
line = line.strip('\n')
line_elements = re.split(r'\t', line)
if(re.search(r'\tmRNA\t.*ID=([^;]*);', line)):
gid = re.search(
r'\tmRNA\t.*ID=([^;]*);', line).group(1)
nCds = 1
if(re.search(r'\tmRNA\t', line)):
for i in range(0, 8):
if i != 2:
gemoma_handle.write(
line_elements[i] + '\t')
else:
gemoma_handle.write("gene\t")
gemoma_handle.write(
"ID=" + gid + ";transcripts=1;complete=1;" +
"maxEvidence=1;maxTie=0.3333\n")
for i in range(0, 8):
if i != 2:
gemoma_handle.write(
line_elements[i] + '\t')
else:
gemoma_handle.write("prediction\t")
gemoma_handle.write(
"ID=" + gid + "_R1;ref-gene=NA;AA=NA;" +
"score=NA;tae=NA;tde=NA;tie=NA;" +
"minSplitReads=0;start=M;stop=*;" +
"evidence=1;Parent=" + gid + "\n")
elif(re.search(r'\tCDS\t', line)):
for i in range(0, 8):
gemoma_handle.write(line_elements[i] + '\t')
gemoma_handle.write(
"ID=" + gid + ".CDS" + str(nCds) +
";Parent=" + gid + "_R1;\n")
nCds = nCds + 1
except IOError:
frameinfo = getframeinfo(currentframe())
print('Error in file ' + frameinfo.filename + ' at line ' +
str(frameinfo.lineno) + ': ' + "Failed to open file " +
aug_gff3_file + " for reading!")
quit(1)
except IOError:
frameinfo = getframeinfo(currentframe())
print('Error in file ' + frameinfo.filename + ' at line ' +
str(frameinfo.lineno) + ': ' + "Failed to open file " +
gemoma_like_gff3_file + " for writing!")
quit(1)
return(gemoma_like_gff3_file)
''' Function that adds UTRs to the gff2 file (generated by GeMoMa from
AUGUSTUS CDS predictions and RNA-seq coverage bedgraph) '''
def add_utrs_to_gff3(gff3_file, bed_graph, intron_file):
gff3_utr_file = tmp_dir + "final_annotation.gff"
subprcs_args = [java, '-jar', jar, 'CLI', 'AnnotationFinalizer',
'u=YES', 'g=' + args.genome, 'a=' + gff3_file,
'i=' + intron_file, 'c=UNSTRANDED',
'coverage_unstranded=' + bed_graph, 'rename=NO',
'outdir=' + tmp_dir]
run_simple_process(subprcs_args)
return gff3_utr_file
''' Function that extracts UTR features from gff3 output of
GeMoMa, converts to gtf format '''
def gemoma_gff3_to_gtf(gff3_file):
gtf_file = tmp_dir + 'utrs.gtf'
try:
with open(gff3_file, 'r') as gff3_handle:
try:
with open(gtf_file, 'w') as gtf_handle:
for line in gff3_handle:
if re.search(r'_prime_UTR\t', line):
line = line.strip('\n')
line_elements = re.split(r'\t', line)
gitxids = re.search(
r'Parent=([^\.]+)\.([^\.]+)_', line).groups()
gid = gitxids[0]
txid = gitxids[0] + '.' + gitxids[1]
gtf_handle.write(
line_elements[0] + '\t' + line_elements[1] +
'\t')
if re.search(r'three_', line):
gtf_handle.write("3'-UTR\t")
else:
gtf_handle.write("5'-UTR\t")
for i in range(3, 8):
gtf_handle.write(line_elements[i] + ' \t')
gtf_handle.write(
'transcript_id "' +
txid + '"; gene_id "' + gid + '";\n')
except IOError:
frameinfo = getframeinfo(currentframe())
print('Error in file ' + frameinfo.filename + ' at line ' +
str(frameinfo.lineno) + ': ' + "Failed to open file " +
gtf_file + " for writing!")
quit(1)
except IOError:
frameinfo = getframeinfo(currentframe())
print('Error in file ' + frameinfo.filename + ' at line ' +
str(frameinfo.lineno) + ': ' + "Failed to open file " +
gff3_file + " for reading!")
quit(1)
return gtf_file
''' Function that identifies genes with both 5'- and 3'-UTR,
returns the name of a file with a list with tx ids '''
def find_both_utrs(gtf_file):
both_utrs_lst = args.outfile_name_stem + '_bothutr.lst'
three_utr = {}
five_utr = {}
try:
with open(gtf_file, 'r') as gtf_handle:
for line in gtf_handle:
if re.search(r'\t5\'-UTR\t', line):
tx_id = re.search(
r'transcript_id "([^"]+)";', line).group(1)
five_utr[tx_id] = 1
elif re.search(r'\t3\'-UTR\t', line):
tx_id = re.search(
r'transcript_id "([^"]+)";', line).group(1)
three_utr[tx_id] = 1
except IOError:
frameinfo = getframeinfo(currentframe())
print('Error in file ' + frameinfo.filename + ' at line ' +
str(frameinfo.lineno) + ': ' + "Failed to open file " +
gtf_file + " for reading!")
quit(1)
both_utr = {}
for key in three_utr:
if key in five_utr:
both_utr[key] = 1
try:
with open(both_utrs_lst, 'w') as both_handle:
for key in both_utr:
both_handle.write(key + '\n')
except IOError:
frameinfo = getframeinfo(currentframe())
print('Error in file ' + frameinfo.filename + ' at line ' +
str(frameinfo.lineno) + ': ' + "Failed to open file " +
both_utrs_lst + " for writing!")
quit(1)
return
''' Function that merges the intial AUGUSTUS gtf file with the gene models
with UTRs from GeMoMa, merging is performed because for generating a genbank
file for training AUGUSTUS, we want to exclude genes in the neighborhood,
i.e. avoid having CDS in the flanking region, gene and tx feature coordinates
are re-computed from possible novel UTR features '''
def merge_original_with_utrs(original_gtf, utr_gtf):
all_gtf = tmp_dir + "all_intermediate.gtf"
final_gtf = args.outfile_name_stem + ".gtf"
# merge files
tmp_gtf = ""
try:
with open(original_gtf, 'r') as ori_handle:
for line in ori_handle:
if not(re.search(r'\tgene\t', line)) and \
not(re.search(r'\ttranscript\t', line)) and \
not(re.search(r'\tmRNA\t', line)):
tmp_gtf += line
except IOError:
frameinfo = getframeinfo(currentframe())
print('Error in file ' + frameinfo.filename + ' at line ' +
str(frameinfo.lineno) + ': ' + "Failed to open file " +
original_gtf + " for reading!")
quit(1)
try:
with open(utr_gtf, 'r') as utr_handle:
for line in utr_handle:
if not(re.search(r'\tgene\t', line)) and \
not(re.search(r'\ttranscript\t', line)) and \
not(re.search(r'\tmRNA\t', line)):
tmp_gtf | |
<reponame>Annie201/pywikibot-core<filename>tests/tools_ip_tests.py
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Test IP module/regex."""
#
# (C) Pywikibot team, 2012-2015
#
# Distributed under the terms of the MIT license.
from __future__ import unicode_literals
__version__ = '$Id: b9c364602d083b989181228423d61cf6f2a5694e $'
from distutils.version import StrictVersion
from pywikibot.tools import ip
from tests.aspects import unittest, TestCase, DeprecationTestCase
from tests.utils import expected_failure_if
class TestIPBase(TestCase):
"""Unit test class base for IP matching."""
net = False
def setUp(self):
"""Set up test."""
self.total = 0
self.fail = 0
self.errors = []
super(TestIPBase, self).setUp()
def tearDown(self):
"""Tear down test."""
super(TestIPBase, self).tearDown()
if not self.fail:
print('%d tests done' % self.total)
else:
print('%d of %d tests failed:\n%s'
% (self.fail, self.total, '\n'.join(self.errors)))
def ipv6test(self, result, IP):
"""Perform one IP test."""
self.total += 1
try:
self.assertEqual(result, self._do_ip_test(IP))
except AssertionError:
self.fail += 1
self.errors.append(
'"%s" match should be %s - not OK'
% (IP, result))
def _run_tests(self):
"""Test various IP."""
# test from http://download.dartware.com/thirdparty/test-ipv6-regex.pl
self.ipv6test(False, "") # empty string
self.ipv6test(True, "::1") # loopback, compressed, non-routable
self.ipv6test(True, "::") # unspecified, compressed, non-routable
self.ipv6test(True, "0:0:0:0:0:0:0:1") # loopback, full
self.ipv6test(True, "0:0:0:0:0:0:0:0") # unspecified, full
self.ipv6test(True, "2001:DB8:0:0:8:800:200C:417A") # unicast, full
self.ipv6test(True, "fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b") # multicast, full
self.ipv6test(True, "2001:DB8::8:800:200C:417A") # unicast, compressed
self.ipv6test(True, "fc00:db20:35b:7399::5") # multicast, compressed
self.ipv6test(False, "2001:DB8:0:0:8:800:200C:417A:221") # unicast, full
self.ipv6test(False, "FF01::101::2") # multicast, compressed
self.ipv6test(True, "fe80::217:f2ff:fe07:ed62")
self.ipv6test(True, "2001:0000:1234:0000:0000:C1C0:ABCD:0876")
self.ipv6test(True, "fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b")
self.ipv6test(True, "fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b")
self.ipv6test(True, "0000:0000:0000:0000:0000:0000:0000:0001")
self.ipv6test(True, "0000:0000:0000:0000:0000:0000:0000:0000")
self.ipv6test(False, " 2001:0000:1234:0000:0000:C1C0:ABCD:0876") # leading space
self.ipv6test(False, "2001:0000:1234:0000:0000:C1C0:ABCD:0876 ") # trailing space
# leading and trailing space
self.ipv6test(False, ' 2001:0000:1234:0000:0000:C1C0:ABCD:0876 ')
# junk after valid address
self.ipv6test(False, '2001:0000:1234:0000:0000:C1C0:ABCD:0876 0')
self.ipv6test(False, "2001:0000:1234: 0000:0000:C1C0:ABCD:0876") # internal space
self.ipv6test(False, "3ffe:0b00:0000:0001:0000:0000:000a") # seven segments
self.ipv6test(False, "FF02:0000:0000:0000:0000:0000:0000:0000:0001") # nine segments
self.ipv6test(False, "fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b::a") # double "::"
self.ipv6test(False, "::1111:2222:3333:4444:5555:6666::") # double "::"
self.ipv6test(True, "fc00:e968:6179::de52:7100")
self.ipv6test(True, "fc00:db20:35b:7399::5")
self.ipv6test(True, "fe80::")
self.ipv6test(True, "2002::")
self.ipv6test(True, "2001:db8::")
self.ipv6test(True, "2001:0db8:1234::")
self.ipv6test(True, "::ffff:0:0")
self.ipv6test(True, "::1")
self.ipv6test(True, "1:2:3:4:5:6:7:8")
self.ipv6test(True, "fdf8:f53e:61e4::18")
self.ipv6test(True, "fc00:e968:6179::de52:7100")
self.ipv6test(True, "fdf8:f53e:61e4::18")
self.ipv6test(True, "fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b")
self.ipv6test(True, "fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b")
self.ipv6test(True, "fc00:db20:35b:7399::5")
self.ipv6test(True, "fc00:db20:35b:7399::5:3:4:5:6:7")
self.ipv6test(True, "fc00:db20:35b:7399::5:3:4:5:6")
self.ipv6test(True, "fc00:db20:35b:7399::5:3:4:5")
self.ipv6test(True, "fc00:db20:35b:7399::5:3:4")
self.ipv6test(True, "fc00:db20:35b:7399::5:3")
self.ipv6test(True, "fc00:db20:35b:7399::5")
self.ipv6test(True, "::2:3:4:5:6:7:8")
self.ipv6test(True, "::2:3:4:5:6:7")
self.ipv6test(True, "::2:3:4:5:6")
self.ipv6test(True, "::2:3:4:5")
self.ipv6test(True, "::2:3:4")
self.ipv6test(True, "::2:3")
self.ipv6test(True, "::8")
self.ipv6test(True, "1:2:3:4:5:6::")
self.ipv6test(True, "1:2:3:4:5::")
self.ipv6test(True, "1:2:3:4::")
self.ipv6test(True, "1:2:3::")
self.ipv6test(True, "1:2::")
self.ipv6test(True, "1::")
self.ipv6test(True, "fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b:8")
self.ipv6test(False, "fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b:5::7:8") # Double "::"
self.ipv6test(False, "12345::6:7:8")
self.ipv6test(True, "fdf8:f53e:61e4::18:8")
self.ipv6test(True, "fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b:8")
self.ipv6test(True, "fc00:db20:35b:7399::5:8")
self.ipv6test(True, "fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b:8")
# IPv4 addresses as dotted-quads
self.ipv6test(True, "1:2:3:4:5:6:1.2.3.4")
self.ipv6test(True, "1:2:3:4:5::1.2.3.4")
self.ipv6test(True, "fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b.2.3.4")
self.ipv6test(True, "fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b.2.3.4")
self.ipv6test(True, "fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b.2.3.4")
self.ipv6test(True, "fc00:db20:35b:7399::5.2.3.4")
self.ipv6test(True, "fc00:e968:6179::de52:7100:1.2.3.4")
self.ipv6test(True, "fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b:1.2.3.4")
self.ipv6test(True, "fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b:1.2.3.4")
self.ipv6test(True, "fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b:1.2.3.4")
self.ipv6test(True, "fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b:11.22.33.44")
self.ipv6test(False, "fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b:400.2.3.4")
self.ipv6test(False, "fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b:260.2.3.4")
self.ipv6test(False, "fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b:256.2.3.4")
self.ipv6test(False, "fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b:1.256.3.4")
self.ipv6test(False, "fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b:1.2.256.4")
self.ipv6test(False, "fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b:1.2.3.256")
self.ipv6test(False, "fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b:300.2.3.4")
self.ipv6test(False, "fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b:1.300.3.4")
self.ipv6test(False, "fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b:1.2.300.4")
self.ipv6test(False, "fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b:1.2.3.300")
self.ipv6test(False, "fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b:900.2.3.4")
self.ipv6test(False, "fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b:1.900.3.4")
self.ipv6test(False, "fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b:1.2.900.4")
self.ipv6test(False, "fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b:1.2.3.900")
self.ipv6test(False, "fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b:300.300.300.300")
self.ipv6test(False, "fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b:3000.30.30.30")
self.ipv6test(False, "1::400.2.3.4")
self.ipv6test(False, "1::260.2.3.4")
self.ipv6test(False, "1::256.2.3.4")
self.ipv6test(False, "1::1.256.3.4")
self.ipv6test(False, "1::1.2.256.4")
self.ipv6test(False, "fc00:db20:35b:7399::5.2.3.256")
self.ipv6test(False, "1::300.2.3.4")
self.ipv6test(False, "1::1.300.3.4")
self.ipv6test(False, "1::1.2.300.4")
self.ipv6test(False, "1::1.2.3.300")
self.ipv6test(False, "1::900.2.3.4")
self.ipv6test(False, "1::1.900.3.4")
self.ipv6test(False, "1::1.2.900.4")
self.ipv6test(False, "1::1.2.3.900")
self.ipv6test(False, "1::300.300.300.300")
self.ipv6test(False, "1::3000.30.30.30")
self.ipv6test(False, "::400.2.3.4")
self.ipv6test(False, "::260.2.3.4")
self.ipv6test(False, "::256.2.3.4")
self.ipv6test(False, "::1.256.3.4")
self.ipv6test(False, "::1.2.256.4")
self.ipv6test(False, "::1.2.3.256")
self.ipv6test(False, "::300.2.3.4")
self.ipv6test(False, "::1.300.3.4")
self.ipv6test(False, "::1.2.300.4")
self.ipv6test(False, "::1.2.3.300")
self.ipv6test(False, "::900.2.3.4")
self.ipv6test(False, "::1.900.3.4")
self.ipv6test(False, "::1.2.900.4")
self.ipv6test(False, "::1.2.3.900")
self.ipv6test(False, "::300.300.300.300")
self.ipv6test(False, "::3000.30.30.30")
self.ipv6test(True, "fe80::217:f2ff:254.7.237.98")
self.ipv6test(True, "::ffff:192.168.1.26")
self.ipv6test(False, "2001:1:1:1:1:1:255Z255X255Y255") # garbage instead of "." in IPv4
self.ipv6test(False, "::ffff:192x168.1.26") # ditto
self.ipv6test(True, "::ffff:192.168.1.1")
# IPv4-compatible IPv6 address, full, deprecated
self.ipv6test(True, '0:0:0:0:0:0:172.16.17.32')
self.ipv6test(True, "0:0:0:0:0:FFFF:192.168.127.12") # IPv4-mapped IPv6 address, full
self.ipv6test(True, "::13.1.68.3") # IPv4-compatible IPv6 address, compressed, deprecated
self.ipv6test(True, "::FFFF:192.168.127.12") # IPv4-mapped IPv6 address, compressed
self.ipv6test(True, "fe80:0:0:0:204:61ff:254.157.241.86")
self.ipv6test(True, "fe80::204:61ff:254.157.241.86")
self.ipv6test(True, "::ffff:12.34.56.78")
self.ipv6test(False, "::ffff:2.3.4")
self.ipv6test(False, "::ffff:257.1.2.3")
self.ipv6test(False, "1.2.3.4:fc00:db20:35b:7399::5") # Aeron
self.ipv6test(False, "1.2.3.4:fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b")
self.ipv6test(False, "1.2.3.4:fdf8:f53e:61e4::18")
self.ipv6test(False, "1.2.3.4:1111::5555")
self.ipv6test(False, "1.2.3.4::5555")
self.ipv6test(False, "1.2.3.4::")
# Testing IPv4 addresses represented as dotted-quads
# Leading zero's in IPv4 addresses not allowed: some systems treat the
# leading "0" in ".086" as the start of an octal number
# Update: The BNF in RFC-3986 explicitly defines the dec-octet
# (for IPv4 addresses) not to have a leading zero
self.ipv6test(False, "fe80:0000:0000:0000:0204:61ff:254.157.241.086")
self.ipv6test(True, "::ffff:192.0.2.128") # but this is OK, since there's a single digit
self.ipv6test(False, "XXXX:XXXX:XXXX:XXXX:XXXX:XXXX:1.2.3.4")
self.ipv6test(False, "1111:2222:3333:4444:5555:6666:256.256.256.256")
# Subnet mask not accepted
self.ipv6test(False, "2001:0DB8:0000:CD30:0000:0000:0000:0000/60") # full, with prefix
self.ipv6test(False, "2001:0DB8::CD30:0:0:0:0/60") # compressed, with prefix
self.ipv6test(False, "2001:0DB8:0:CD30::/60") # compressed, with prefix #2
self.ipv6test(False, "::/128") # compressed, unspecified address type, non-routable
self.ipv6test(False, "::1/128") # compressed, loopback address type, non-routable
self.ipv6test(False, "FF00::/8") # compressed, multicast address type
self.ipv6test(False, "FE80::/10") # compressed, link-local unicast, non-routable
self.ipv6test(False, "FEC0::/10") # compressed, site-local unicast, deprecated
self.ipv6test(False, "172.16.58.3/60") # standard IPv4, prefix not allowed
self.ipv6test(True, "fe80:0000:0000:0000:0204:61ff:fe9d:f156")
self.ipv6test(True, "fe80:0:0:0:204:61ff:fe9d:f156")
self.ipv6test(True, "fe80::204:61ff:fe9d:f156")
self.ipv6test(True, "::1")
self.ipv6test(True, "fe80::")
self.ipv6test(True, "fe80::1")
self.ipv6test(False, ":")
self.ipv6test(True, "::ffff:c000:280")
# Aeron supplied these test cases
self.ipv6test(False, "fc00:db20:35b:7399::5:")
self.ipv6test(False, "fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b:")
self.ipv6test(False, "fdf8:f53e:61e4::18:")
self.ipv6test(False, "fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b:")
self.ipv6test(False, "::5555:")
self.ipv6test(False, ":::")
self.ipv6test(False, "1111:")
self.ipv6test(False, ":")
self.ipv6test(False, ":1111:2222:3333:4444::5555")
self.ipv6test(False, ":1111:2222:3333::5555")
self.ipv6test(False, ":1111:2222::5555")
self.ipv6test(False, ":1111::5555")
self.ipv6test(False, ":::5555")
self.ipv6test(False, ":::")
# Additional test cases
# from https://rt.cpan.org/Public/Bug/Display.html?id=50693
self.ipv6test(True, "2001:0db8:85a3:0000:0000:8a2e:0370:7334")
self.ipv6test(True, "2001:db8:85a3:0:0:8a2e:370:7334")
self.ipv6test(True, "2001:db8:85a3::8a2e:370:7334")
self.ipv6test(True, "2001:0db8:0000:0000:0000:0000:1428:57ab")
self.ipv6test(True, "2001:0db8:0000:0000:0000::1428:57ab")
self.ipv6test(True, "2001:0db8:0:0:0:0:1428:57ab")
self.ipv6test(True, "2001:0db8:0:0::1428:57ab")
self.ipv6test(True, "2001:0db8::1428:57ab")
self.ipv6test(True, "2001:db8::1428:57ab")
self.ipv6test(True, "0000:0000:0000:0000:0000:0000:0000:0001")
self.ipv6test(True, "::1")
self.ipv6test(True, "::ffff:0c22:384e")
self.ipv6test(True, "2001:0db8:1234:0000:0000:0000:0000:0000")
self.ipv6test(True, "2001:0db8:1234:ffff:ffff:ffff:ffff:ffff")
self.ipv6test(True, "2001:db8:a::123")
self.ipv6test(True, "fe80::")
self.ipv6test(False, "123")
self.ipv6test(False, "ldkfj")
self.ipv6test(False, "2001::FFD3::57ab")
self.ipv6test(False, "2001:db8:85a3::8a2e:37023:7334")
self.ipv6test(False, "2001:db8:85a3::8a2e:370k:7334")
self.ipv6test(False, "1:2:3:4:5:6:7:8:9")
self.ipv6test(False, "1::2::3")
self.ipv6test(False, "1:::3:4:5")
self.ipv6test(False, "fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b:5:6:7:8:9")
# New from Aeron
self.ipv6test(True, "fc00:e968:6179::de52:7100")
self.ipv6test(True, "1111:2222:3333:4444:5555:6666:7777::")
self.ipv6test(True, "1111:2222:3333:4444:5555:6666::")
self.ipv6test(True, "1111:2222:3333:4444:5555::")
self.ipv6test(True, "1111:2222:3333:4444::")
self.ipv6test(True, "1111:2222:3333::")
self.ipv6test(True, "1111:2222::")
self.ipv6test(True, "1111::")
# self.ipv6test(True, "::") #duplicate
self.ipv6test(True, "fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b")
self.ipv6test(True, "fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b")
self.ipv6test(True, "fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b")
self.ipv6test(True, "fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b")
self.ipv6test(True, "fc00:e968:6179::de52:7100")
self.ipv6test(True, "fc00:db20:35b:7399::5")
self.ipv6test(True, "::8888")
self.ipv6test(True, "fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b:8888")
self.ipv6test(True, "fdf8:f53e:61e4::18:8888")
self.ipv6test(True, "fdf8:f53e:61e4::18:8888")
self.ipv6test(True, "fdf8:f53e:61e4::18:8888")
self.ipv6test(True, "fc00:e968:6179::de52:7100:8888")
self.ipv6test(True, "::7777:8888")
self.ipv6test(True, "fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b:7777:8888")
self.ipv6test(True, "fc00:e968:6179::de52:7100:7777:8888")
self.ipv6test(True, "fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b:7777:8888")
self.ipv6test(True, "fdf8:f53e:61e4::18:7777:8888")
self.ipv6test(True, "::6666:7777:8888")
self.ipv6test(True, "fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b:6666:7777:8888")
self.ipv6test(True, "fdf8:f53e:61e4::18:6666:7777:8888")
self.ipv6test(True, "fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b:6666:7777:8888")
self.ipv6test(True, "::5555:6666:7777:8888")
self.ipv6test(True, "fc00:db20:35b:7399::5:5555:6666:7777:8888")
self.ipv6test(True, "fdf8:f53e:61e4::18:5555:6666:7777:8888")
self.ipv6test(True, "::4444:5555:6666:7777:8888")
self.ipv6test(True, "fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b:4444:5555:6666:7777:8888")
self.ipv6test(True, "::3333:4444:5555:6666:7777:8888")
self.ipv6test(True, "::2222:3333:4444:5555:6666:7777:8888")
self.ipv6test(True, "1111:2222:3333:4444:5555:6666:172.16.58.3")
self.ipv6test(True, "fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b.123.123.123")
self.ipv6test(True, "fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b.123.123.123")
self.ipv6test(True, "fdf8:f53e:61e4::18.123.123.123")
self.ipv6test(True, "fc00:e968:6179::de52:7100.123.123.123")
self.ipv6test(True, "fdf8:f53e:61e4::18.123.123.123")
self.ipv6test(True, "::123.123.123.123")
self.ipv6test(True, "fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b:172.16.58.3")
self.ipv6test(True, "fc00:e968:6179::de52:7100:172.16.58.3")
self.ipv6test(True, "fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b:172.16.58.3")
self.ipv6test(True, "fdf8:f53e:61e4::18:172.16.58.3")
self.ipv6test(True, "::6666:172.16.58.3")
self.ipv6test(True, "fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b:6666:172.16.58.3")
self.ipv6test(True, "fdf8:f53e:61e4::18:6666:172.16.58.3")
self.ipv6test(True, "fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b:6666:172.16.58.3")
self.ipv6test(True, "::5555:6666:172.16.58.3")
self.ipv6test(True, "fc00:db20:35b:7399::5:5555:6666:172.16.58.3")
self.ipv6test(True, "fdf8:f53e:61e4::18:5555:6666:123.123.123.123")
self.ipv6test(True, "::4444:5555:6666:172.16.58.3")
self.ipv6test(True, "fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b:4444:5555:6666:172.16.58.3")
self.ipv6test(True, "::2222:3333:4444:5555:6666:172.16.58.3")
# Playing with combinations of "0" and "::"
# NB: these are all sytactically correct, but are bad form
# because "0" adjacent to "::" should be combined into "::"
self.ipv6test(True, "::0:0:0:0:0:0:0")
self.ipv6test(True, "::0:0:0:0:0:0")
self.ipv6test(True, "::0:0:0:0:0")
self.ipv6test(True, "::0:0:0:0")
self.ipv6test(True, "::0:0:0")
self.ipv6test(True, "::0:0")
self.ipv6test(True, "::0")
self.ipv6test(True, "0:0:0:0:0:0:0::")
self.ipv6test(True, "0:0:0:0:0:0::")
self.ipv6test(True, "0:0:0:0:0::")
self.ipv6test(True, "0:0:0:0::")
self.ipv6test(True, "0:0:0::")
self.ipv6test(True, "0:0::")
self.ipv6test(True, "0::")
# New invalid from Aeron
# Invalid data
self.ipv6test(False, "XXXX:XXXX:XXXX:XXXX:XXXX:XXXX:XXXX:XXXX")
# Too many components
self.ipv6test(False, "1111:2222:3333:4444:5555:6666:7777:8888:9999")
self.ipv6test(False, "fc00:e968:6179::de52:7100::")
self.ipv6test(False, "::2222:3333:4444:5555:6666:7777:8888:9999")
# Too few components
self.ipv6test(False, "1111:2222:3333:4444:5555:6666:7777")
self.ipv6test(False, "1111:2222:3333:4444:5555:6666")
self.ipv6test(False, "1111:2222:3333:4444:5555")
self.ipv6test(False, "1111:2222:3333:4444")
self.ipv6test(False, "1111:2222:3333")
self.ipv6test(False, "1111:2222")
self.ipv6test(False, "1111")
# Missing :
self.ipv6test(False, "11112222:3333:4444:5555:6666:7777:8888")
self.ipv6test(False, "1111:22223333:4444:5555:6666:7777:8888")
self.ipv6test(False, "1111:2222:33334444:5555:6666:7777:8888")
self.ipv6test(False, "1111:2222:3333:44445555:6666:7777:8888")
self.ipv6test(False, "1111:2222:3333:4444:55556666:7777:8888")
self.ipv6test(False, "1111:2222:3333:4444:5555:66667777:8888")
self.ipv6test(False, "1111:2222:3333:4444:5555:6666:77778888")
# Missing : intended for ::
self.ipv6test(False, "1111:2222:3333:4444:5555:6666:7777:8888:")
self.ipv6test(False, "1111:2222:3333:4444:5555:6666:7777:")
self.ipv6test(False, "1111:2222:3333:4444:5555:6666:")
self.ipv6test(False, "1111:2222:3333:4444:5555:")
self.ipv6test(False, "1111:2222:3333:4444:")
self.ipv6test(False, "1111:2222:3333:")
self.ipv6test(False, "1111:2222:")
self.ipv6test(False, "1111:")
self.ipv6test(False, ":")
self.ipv6test(False, ":8888")
self.ipv6test(False, ":7777:8888")
self.ipv6test(False, ":6666:7777:8888")
self.ipv6test(False, ":5555:6666:7777:8888")
self.ipv6test(False, ":4444:5555:6666:7777:8888")
self.ipv6test(False, ":3333:4444:5555:6666:7777:8888")
self.ipv6test(False, ":2222:3333:4444:5555:6666:7777:8888")
self.ipv6test(False, ":1111:2222:3333:4444:5555:6666:7777:8888")
# :::
self.ipv6test(False, ":::2222:3333:4444:5555:6666:7777:8888")
self.ipv6test(False, "1111:::3333:4444:5555:6666:7777:8888")
self.ipv6test(False, "1111:2222:::4444:5555:6666:7777:8888")
self.ipv6test(False, "1111:2222:3333:::5555:6666:7777:8888")
self.ipv6test(False, "1111:2222:3333:4444:::6666:7777:8888")
self.ipv6test(False, "1111:2222:3333:4444:5555:::7777:8888")
self.ipv6test(False, "1111:2222:3333:4444:5555:6666:::8888")
self.ipv6test(False, "1111:2222:3333:4444:5555:6666:7777:::")
# Double ::")
self.ipv6test(False, "::2222::4444:5555:6666:7777:8888")
self.ipv6test(False, "::2222:3333::5555:6666:7777:8888")
self.ipv6test(False, "::2222:3333:4444::6666:7777:8888")
self.ipv6test(False, "::2222:3333:4444:5555::7777:8888")
self.ipv6test(False, "::2222:3333:4444:fdf8:f53e:61e4::18")
self.ipv6test(False, "::2222:3333:4444:5555:7777:8888::")
self.ipv6test(False, "fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b::5555:6666:7777:8888")
self.ipv6test(False, "fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b:4444::6666:7777:8888")
self.ipv6test(False, "fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b:4444:5555::7777:8888")
self.ipv6test(False, "fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b:4444:fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b")
self.ipv6test(False, "fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b:4444:5555:6666:7777::")
self.ipv6test(False, "1111:2222::4444::6666:7777:8888")
self.ipv6test(False, "fc00:db20:35b:7399::5:fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b:8888")
self.ipv6test(False, "fc00:db20:35b:7399::5:fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b")
self.ipv6test(False, "fc00:db20:35b:7399::5:5555:6666:7777::")
self.ipv6test(False, "fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b::7777:8888")
self.ipv6test(False, "fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b:6666::8888")
self.ipv6test(False, "fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b:6666:7777::")
self.ipv6test(False, "fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b::8888")
self.ipv6test(False, "fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b:7777::")
self.ipv6test(False, "fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b::")
# Too many components"
self.ipv6test(False, "fc00:e968:6179::de52:7100:1.2.3.4")
self.ipv6test(False, "1111:2222:3333:4444:5555:6666:7777:1.2.3.4")
self.ipv6test(False, "1111:2222:3333:4444:5555:6666::1.2.3.4")
self.ipv6test(False, "::2222:3333:4444:5555:6666:7777:1.2.3.4")
self.ipv6test(False, "1111:2222:3333:4444:5555:6666:1.2.3.4.5")
# Too few components
self.ipv6test(False, "1111:2222:3333:4444:5555:1.2.3.4")
self.ipv6test(False, "1111:2222:3333:4444:1.2.3.4")
self.ipv6test(False, "1111:2222:3333:1.2.3.4")
self.ipv6test(False, "1111:2222:1.2.3.4")
self.ipv6test(False, "1111:1.2.3.4")
# Missing :
self.ipv6test(False, "11112222:3333:4444:5555:6666:1.2.3.4")
self.ipv6test(False, "1111:22223333:4444:5555:6666:1.2.3.4")
self.ipv6test(False, "1111:2222:33334444:5555:6666:1.2.3.4")
self.ipv6test(False, "1111:2222:3333:44445555:6666:1.2.3.4")
self.ipv6test(False, "1111:2222:3333:4444:55556666:1.2.3.4")
self.ipv6test(False, "1111:2222:3333:4444:5555:66661.2.3.4")
# Missing .
self.ipv6test(False, "1111:2222:3333:4444:5555:6666:255255.255.255")
self.ipv6test(False, "1111:2222:3333:4444:5555:6666:255.255255.255")
self.ipv6test(False, "1111:2222:3333:4444:5555:6666:255.255.255255")
# Missing : intended for ::
self.ipv6test(False, ":1.2.3.4")
self.ipv6test(False, ":6666:1.2.3.4")
self.ipv6test(False, ":5555:6666:1.2.3.4")
self.ipv6test(False, ":4444:5555:6666:1.2.3.4")
self.ipv6test(False, ":3333:4444:5555:6666:1.2.3.4")
self.ipv6test(False, ":2222:3333:4444:5555:6666:1.2.3.4")
self.ipv6test(False, ":1111:2222:3333:4444:5555:6666:1.2.3.4")
# :::
self.ipv6test(False, ":::2222:3333:4444:5555:6666:1.2.3.4")
self.ipv6test(False, "1111:::3333:4444:5555:6666:1.2.3.4")
self.ipv6test(False, "1111:2222:::4444:5555:6666:1.2.3.4")
self.ipv6test(False, "1111:2222:3333:::5555:6666:1.2.3.4")
self.ipv6test(False, "1111:2222:3333:4444:::6666:1.2.3.4")
self.ipv6test(False, "1111:2222:3333:4444:5555:::1.2.3.4")
# Double ::
self.ipv6test(False, "::2222::4444:5555:6666:1.2.3.4")
self.ipv6test(False, "::2222:3333::5555:6666:1.2.3.4")
self.ipv6test(False, "::2222:3333:4444::6666:1.2.3.4")
self.ipv6test(False, "::2222:3333:4444:5555::1.2.3.4")
self.ipv6test(False, "fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b::5555:6666:1.2.3.4")
self.ipv6test(False, "fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b:4444::6666:1.2.3.4")
self.ipv6test(False, "fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b:5555::1.2.3.4")
self.ipv6test(False, "1111:2222::4444::6666:1.2.3.4")
self.ipv6test(False, "fc00:db20:35b:7399::5:5555::1.2.3.4")
self.ipv6test(False, "fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b::1.2.3.4")
# Missing parts
self.ipv6test(False, "::.")
self.ipv6test(False, "::..")
self.ipv6test(False, "::...")
self.ipv6test(False, "::1...")
self.ipv6test(False, "::1.2..")
self.ipv6test(False, "::1.2.3.")
self.ipv6test(False, "::.2..")
self.ipv6test(False, "::.2.3.")
self.ipv6test(False, "::.2.3.4")
self.ipv6test(False, "::..3.")
self.ipv6test(False, "::..3.4")
self.ipv6test(False, "::...4")
# Extra : in front
self.ipv6test(False, ":1111:2222:3333:4444:5555:6666:7777::")
self.ipv6test(False, ":1111:2222:3333:4444:5555:6666::")
self.ipv6test(False, ":1111:2222:3333:4444:5555::")
self.ipv6test(False, ":1111:2222:3333:4444::")
self.ipv6test(False, ":1111:2222:3333::")
self.ipv6test(False, ":1111:2222::")
self.ipv6test(False, ":1111::")
self.ipv6test(False, ":::")
self.ipv6test(False, ":fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b")
self.ipv6test(False, | |
import tkinter
from math import sqrt, sin, cos, atan, radians, pi
from evolution import *
def magnitude(vector):
# Take a list/tuple that represents a vector and return its magnitude.
return sqrt(sum([i ** 2 for i in vector]))
def direction(td_vector):
# Take a 2d vector (list with two elements) and return the angle of the vector.
if magnitude(td_vector) > 0:
return atan(td_vector[1] / td_vector[0])
# These functions are used to check if line segments intersect.
def orientation(a, b, c):
return (c[1] - a[1]) * (b[0] - a[0]) > (b[1] - a[1]) * (c[0] - a[0])
def intersect(a, b, c, d):
return orientation(a,c,d) != orientation(b,c,d) and orientation(a,b,c) != orientation(a,b,d)
class Car:
possible_states = (
"do_nothing",
"gas",
"brake",
"turn_right",
"turn_left",
)
mass = 1000 # kg
wheelbase = 2.4 # m
steering_angle = radians(45) # radian
drag_const = 0.39 # Drag constant.
rr_const = 11.7 # Rolling resistance constant.
braking_const = mass * 9.8 * 0.9 # Friction = normal force [AKA m * g] * friction coefficient.
id_counter = 0
def __init__(self, canvas, track_walls, network, weights, x0, y0, x1, y1):
self.canvas = canvas
self.walls = track_walls
self.ID = Car.id_counter
Car.id_counter += 1
# Create the car.
self.car = [
self.canvas.create_line(x0, y0, x0, y1, fill = "green", width = 2), # Left Vert
self.canvas.create_line(x1, y0, x1, y1, fill = "green", width = 2), # Right Vert
self.canvas.create_line(x0, y0, x1, y0, fill = "red", width = 2), # Top Hor
self.canvas.create_line(x0, y1, x1, y1, fill = "green", width = 2), # Bot Hor
]
# Calculate car center coordinates
self.car_centerX = (canvas.coords(self.car[2])[0] + canvas.coords(self.car[2])[2]) / 2
self.car_centerY = (canvas.coords(self.car[0])[1] + canvas.coords(self.car[0])[3]) / 2
# Set default state after creation.
self.state = Car.possible_states[0]
self.orientation = radians(90)
self.u = [cos(self.orientation), sin(self.orientation)] # Unit vector for the orientation of the car.
self.engine_force = 2000 # N
self.velocity = [0, 0] # The car is at rest.
self.angular_velocity = 0 # radians/s
self.is_dead = False
self.distance_travelled = 0 # metres
self.vision_lengths = [
BoundVar(0),
BoundVar(0),
BoundVar(0),
BoundVar(0),
BoundVar(0),
BoundVar(0)
]
self.calculate_vision()
self.callbacks = [
self.do_nothing,
self.gas,
self.brake,
self.turn_right,
self.turn_left
]
# Initialize Neural Network
self.weights = weights
self.network = network
self.network.car = self
self.network.create_input_nodes(self.vision_lengths, bias = True)
self.network.create_output_nodes(self.callbacks)
self.network.connect(self.weights)
self.network.update()
# Forces and physics stuff.
self.f_traction = [i * self.engine_force for i in self.u] # self.u * Car.engine_force
self.f_drag = [i * -Car.drag_const * magnitude(self.velocity) for i in self.velocity] # drag_const * acceleration * |acceleration|
self.f_rr = [i * -Car.rr_const for i in self.velocity] # velocity * -rolling resistance
self.f_braking = [0, 0] # direction unit vector * braking force
self.f_centripetal = [0, 0]
self.acceleration = [(self.f_traction[i] + self.f_drag[i] + self.f_rr[i] + self.f_centripetal[i]) / Car.mass for i in range(2)] # Vector sum of f_traction, f_drag, f_rr divided by mass
# Callbacks
def do_nothing(self):
self.state = Car.possible_states[0]
def gas(self):
self.state = Car.possible_states[1]
def brake(self):
self.state = Car.possible_states[2]
def turn_right(self):
self.state = Car.possible_states[3]
def turn_left(self):
self.state = Car.possible_states[4]
def calculate_vision(self):
# Vision.
self.vision = [
[self.canvas.create_line(
self.car_centerX,
self.car_centerY,
self.car_centerX + 600*cos(self.orientation),
self.car_centerY - 600*sin(self.orientation),
fill = "red",
width = 2
)], # Front
[self.canvas.create_line(
self.car_centerX,
self.car_centerY,
self.car_centerX + 600*cos(self.orientation - radians(25)),
self.car_centerY - 600*sin(self.orientation - radians(25)),
fill = "red",
width = 2
)], # Front Right 25 degrees
[self.canvas.create_line(
self.car_centerX,
self.car_centerY,
self.car_centerX + 600*cos(self.orientation + radians(25)),
self.car_centerY - 600*sin(self.orientation + radians(25)),
fill = "red",
width = 2
)], # Front Left 25 Degrees
[self.canvas.create_line(
self.car_centerX,
self.car_centerY,
self.car_centerX + 600*cos(self.orientation - pi/2),
self.car_centerY - 600*sin(self.orientation - pi/2),
fill = "red",
width = 2
)], # Right
[self.canvas.create_line(
self.car_centerX,
self.car_centerY,
self.car_centerX + 600*cos(self.orientation + pi/2),
self.car_centerY - 600*sin(self.orientation + pi/2),
fill = "red",
width = 2
)], # Left
[self.canvas.create_line(
self.car_centerX,
self.car_centerY,
self.car_centerX + 600*cos(self.orientation + pi),
self.car_centerY - 600*sin(self.orientation + pi),
fill = "red",
width = 2
)] # Back
]
for line in range(len(self.vision)):
self.vision_line = self.canvas.coords(self.vision[line][0])
for wall in self.walls:
self.wall_coords = self.canvas.coords(wall)
self.v1 = [
(self.vision_line[0], self.vision_line[1]),
(self.vision_line[2], self.vision_line[3])
]
self.w1 = [
(self.wall_coords[0], self.wall_coords[1]),
(self.wall_coords[2], self.wall_coords[3])
]
if intersect(self.v1[0], self.v1[1], self.w1[0], self.w1[1]):
if self.car_centerX - self.vision_line[2] == 0 and self.wall_coords[0] - self.wall_coords[2] == 0:
continue
elif self.car_centerX - self.vision_line[2] == 0: # m1 is 1/0
self.x = self.vision_line[0]
self.m2 = (self.wall_coords[1] - self.wall_coords[3]) / (self.wall_coords[0] - self.wall_coords[2])
self.b2 = self.wall_coords[1] - self.m2 * self.wall_coords[0]
self.y = self.m2 * self.x + self.b2
elif self.wall_coords[0] - self.wall_coords[2] == 0: # m2 is 1/0
self.x = self.wall_coords[0]
self.m1 = (self.car_centerY - self.vision_line[3]) / (self.car_centerX - self.vision_line[2])
self.b1 = self.vision_line[1] - self.m1 * self.vision_line[0]
self.y = self.m1 * self.x + self.b1
else:
self.m1 = (self.car_centerY - self.vision_line[3]) / (self.car_centerX - self.vision_line[2])
self.m2 = (self.wall_coords[1] - self.wall_coords[3]) / (self.wall_coords[0] - self.wall_coords[2])
self.b1 = self.vision_line[1] - self.m1 * self.vision_line[0]
self.b2 = self.wall_coords[1] - self.m2 * self.wall_coords[0]
try:
self.x = (self.b2 - self.b1) / (self.m1 - self.m2)
except ZeroDivisionError:
continue
self.y = self.m1 * self.x + self.b1
self.vision[line].append(
self.canvas.create_line(self.car_centerX, self.car_centerY, self.x, self.y, fill = "red", width = 2)
)
for line_list in self.vision:
for _ in range(len(line_list) - 1):
self.temp_line_coords = [self.canvas.coords(line) for line in line_list]
self.line_lengths = [sqrt((line[3] - line[1]) ** 2 + (line[2] - line[0]) ** 2) for line in self.temp_line_coords]
self.canvas.delete(line_list[self.line_lengths.index(max(self.line_lengths))])
line_list.pop(self.line_lengths.index(max(self.line_lengths)))
self.line_lengths.pop(self.line_lengths.index(max(self.line_lengths)))
self.vision = [i[0] for i in self.vision]
for i in range(len(self.vision)):
coords = self.canvas.coords(self.vision[i])
self.vision_lengths[i].update_val(sqrt((coords[0] - coords[2]) ** 2 + (coords[1] - coords[3]) ** 2))
def update(self): # Check at half-second intervals.
# Update the values of the forces, acceleration, and velocity.
if not self.is_dead:
self.network.update()
if self.state == Car.possible_states[0]: # Car is doing nothing.
self.f_traction = [0, 0]
self.f_braking = [0, 0]
elif self.state == Car.possible_states[1]: # GAS GAS GAS
self.f_traction = [i * self.engine_force for i in self.u] # self.u * Car.engine_force
self.f_braking = [0, 0]
elif self.state == Car.possible_states[2]: # Driver is being a coward (braking)
self.f_traction = [0, 0]
# There should be no braking force if the car is not moving.
if [round([i * magnitude(self.velocity) for i in self.u][i] - self.velocity[i]) for i in range(2)] == [0, 0] and magnitude(self.velocity) > 10:
# Check if self.velocity's direction is the same as the direction of self.u (orientation) and if the velocity's magnitude is not 0.
self.f_braking = [-i * Car.braking_const for i in self.u] # direction unit vector * braking force
else:
self.f_braking = [0, 0]
self.velocity = [0, 0]
elif self.state == Car.possible_states[3]: # Right turn.
self.f_traction = [i * self.engine_force for i in self.u]
self.f_braking = [0, 0]
self.turning_radius = Car.wheelbase / sin(Car.steering_angle)
self.angular_velocity = magnitude(self.velocity) / self.turning_radius / 10 # radians/ms
self.orientation -= self.angular_velocity
self.u = [cos(self.orientation), sin(self.orientation)]
self.velocity = [
magnitude(self.velocity) * cos(self.orientation),
magnitude(self.velocity) * sin(self.orientation)
]
self.top_left = [
self.canvas.coords(self.car[0])[0],
self.canvas.coords(self.car[0])[1]
]
self.top_right = [
self.canvas.coords(self.car[0])[2],
self.canvas.coords(self.car[0])[3]
]
self.bot_left = [
self.canvas.coords(self.car[1])[0],
self.canvas.coords(self.car[1])[1]
]
self.bot_right = [
self.canvas.coords(self.car[1])[2],
self.canvas.coords(self.car[1])[3]
]
self.coords = [self.top_left, self.top_right, self.bot_left, self.bot_right]
for i in range(len(self.coords)):
self.temp_x = self.coords[i][0] - self.car_centerX
self.temp_y = self.coords[i][1] - self.car_centerY
self.rotated_x = self.temp_x * cos(self.angular_velocity) - self.temp_y * sin(self.angular_velocity)
self.rotated_y = self.temp_x * sin(self.angular_velocity) + self.temp_y * cos(self.angular_velocity)
self.coords[i][0] = self.rotated_x + self.car_centerX
self.coords[i][1] = self.rotated_y + self.car_centerY
for i in self.car:
self.canvas.delete(i)
self.car = [
self.canvas.create_line(self.top_left[0], self.top_left[1], self.bot_left[0], self.bot_left[1], fill = "green", width = 2), # Left Vert
self.canvas.create_line(self.top_right[0], self.top_right[1], self.bot_right[0], self.bot_right[1], fill = "green", width = 2), # Right Vert
self.canvas.create_line(self.top_left[0], self.top_left[1], self.top_right[0], self.top_right[1], fill = "red", width = 2), # Top Hor
self.canvas.create_line(self.bot_left[0], self.bot_left[1], self.bot_right[0], self.bot_right[1], fill = "green", width = 2), # Bot Hor
]
elif self.state == Car.possible_states[4]: # Left turn.
self.f_traction = [i * self.engine_force for i in self.u]
self.f_braking = [0, 0]
self.turning_radius = Car.wheelbase / sin(Car.steering_angle)
self.angular_velocity = -magnitude(self.velocity) / self.turning_radius / 10 # radians/ms
self.orientation -= self.angular_velocity
self.u = [cos(self.orientation), sin(self.orientation)]
self.velocity = [
magnitude(self.velocity) * cos(self.orientation),
magnitude(self.velocity) * sin(self.orientation)
]
self.top_left = [
self.canvas.coords(self.car[0])[0],
self.canvas.coords(self.car[0])[1]
]
self.top_right = [
self.canvas.coords(self.car[0])[2],
self.canvas.coords(self.car[0])[3]
]
self.bot_left = [
self.canvas.coords(self.car[1])[0],
self.canvas.coords(self.car[1])[1]
]
self.bot_right | |
# -*- coding: utf-8 -*-
"""
***************************************************************************
repo.py
---------------------
Date : November 2013
Copyright : (C) 2013-2016 Boundless, http://boundlessgeo.com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = '<NAME>'
__date__ = 'November 2013'
__copyright__ = '(C) 2013-2016 Boundless, http://boundlessgeo.com'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import re
from commitish import Commitish
from tag import Tag
import geogig
from geogigexception import GeoGigException
from feature import Feature
from tree import Tree
from utils import mkdir
from py4jconnector import Py4JCLIConnector
from geogigserverconnector import GeoGigServerConnector
import tempfile
import datetime
def _resolveref(ref):
'''
Tries to resolve the pased object into a string representing a commit reference
(a SHA-1, branch name, or something like HEAD~1)
This should be called by all commands using references, so they can accept both
strings and Commitish objects indistinctly
'''
if ref is None:
return None
if isinstance(ref, Commitish):
return ref.ref
elif isinstance(ref, basestring):
return ref
else:
return str(ref)
SHA_MATCHER = re.compile(r"\b([a-f0-9]{40})\b")
class Repository(object):
_logcache = None
def __init__(self, url, connector=None, init=False, initParams=None):
'''
url: The url of the repository. Only file paths are supported so far. Remote repos are not supported
connector: the connector to use to communicate with the repository
init: True if the repository should be initialized
'''
self.url = url
self.connector = Py4JCLIConnector() if connector is None else connector
if init:
try:
mkdir(url)
except Exception, e:
raise GeoGigException("Cannot create repository folder.\nCheck that path is correct and you have permission")
self.connector.setRepository(self)
try:
self.connector.checkisrepo()
isAlreadyRepo = True
except GeoGigException, e:
isAlreadyRepo = False
if init:
if isAlreadyRepo:
raise GeoGigException("Cannot init, the folder is already a geogig repository")
else:
self.init(initParams)
self.connector.checkisrepo()
self.cleancache()
@staticmethod
def newrepofromclone(url, path, connector=None, username=None, password=<PASSWORD>):
'''
Clones a given repository into a local folder and returns a repository object representing it
url: the url of the repo to clone
path: the path to clone the repo into
connector: the connector to use to communicate with the repository
'''
connector = Py4JCLIConnector() if connector is None else connector
connector.clone(url, path, username, password)
return Repository(path, connector)
def createdat(self):
'''Returns the creation date of this repository'''
return self.connector.createdat()
def cleancache(self):
self._logcache = None
def description(self):
'''Returns the description of this repository'''
#TODO
return ''
def revparse(self, rev):
'''Returns the SHA-1 of a given element, represented as a string'''
if SHA_MATCHER.match(rev) is not None:
return rev
else:
return self.connector.revparse(rev)
@property
def head(self):
'''Returns a Commitish representing the current HEAD'''
return self.connector.head()
@property
def index(self):
'''Returns a Commitish representing the index'''
return Commitish(self, geogig.STAGE_HEAD)
@property
def workingtree(self):
'''Returns a Commitish representing workingtree'''
return Commitish(self, geogig.WORK_HEAD)
@property
def master(self):
'''Returns a Commitish representing the master branch'''
return Commitish(self, geogig.MASTER)
def isdetached(self):
'''Returns true if the repos has a detached HEAD'''
return self.head.id == self.head.ref
def synced(self, branch=geogig.HEAD, credentials=None):
'''
Returns a tuple with number of (ahead, behind) commits between this repo and a remote
It uses the passed branch or, if not passed, the current branch
If the repository is headless, or if not remote is defined, it will throw an exception
It uses the "origin" remote if it exists, otherwise it uses the first remote available.
If the remote requires authentication, a tuple of (username,password) must be passed
in the credentials parameter
'''
if (branch == geogig.HEAD and self.isdetached()):
raise GeoGigException("Cannot use current branch. The repository has a detached HEAD")
remotes = self.remotes
if remotes:
if "origin" in remotes:
remote = remotes["origin"]
remotename = "origin"
else:
remotename = remotes.keys()[0]
remote = remotes.values()[0]
else:
raise GeoGigException("No remotes defined")
if isremoteurl(remote):
repo = Repository(remote, GeoGigServerConnector(credentials))
else:
conn = self.connector.__class__()
repo = Repository(remote[len("file:/"):], conn)
localtip = self.revparse(branch)
remotetip = repo.revparse(branch)
if remotetip == localtip:
return 0, 0
if remotetip == geogig.NULL_ID:
log = self.log(branch)
push = len(log)
pull = 0
else:
trackedbranchhead = self.revparse("refs/remotes/" + remotename + "/" + branch)
log = self.log(branch, trackedbranchhead)
push = len(log)
log = repo.log(branch, trackedbranchhead)
pull = len(log)
return push, pull
def mergemessage(self):
'''
Return the merge message if the repo is in a merge operation stopped due to conflicts.
Returns an empty string if it is not the case
'''
return self.connector.mergemessage()
def log(self, tip=None, sincecommit=None, until=None, since=None, path=None, n=None):
'''
Returns a list of Commit starting from the passed tip ref, or HEAD if there is no passed ref,
and up to the sincecommit, if passed, or to first commit in the history if not.
If a path is passed, it only returns commits in which that path was modified
Date limits can be passed using the since and until parameters
A maximum number of commits can be set using the n parameter
'''
tip = tip or geogig.HEAD
if path is not None or tip != geogig.HEAD or n is not None or since is not None or until is not None or sincecommit is not None:
return self.connector.log(_resolveref(tip), _resolveref(sincecommit), _resolveref(until), _resolveref(since), path, n)
if self._logcache is None:
self._logcache = self.connector.log(_resolveref(tip), _resolveref(sincecommit), _resolveref(until), _resolveref(since), path, n)
return self._logcache
def commitatdate(self, t):
'''Returns a Commit corresponding to a given instant, which is passed as a datetime.datetime'''
epoch = datetime.datetime.utcfromtimestamp(0)
delta = t - epoch
milisecs = int(delta.total_seconds()) * 1000
log = self.connector.log(geogig.HEAD, until=str(milisecs), n=1)
if log:
return log[0]
else:
raise GeoGigException("Invalid date for this repository")
@property
def trees(self):
return self._trees()
def _trees(self, ref=geogig.HEAD, path=None, recursive=False):
'''Returns a set of Tree objects with all the trees for the passed ref and path'''
return [e for e in self.children(ref, path, recursive) if isinstance(e, Tree)]
def features(self, ref=geogig.HEAD, path=None, recursive=False):
'''Returns a set of Feature objects with all the features for the passed ref and path'''
return [e for e in self.children(ref, path, recursive) if isinstance(e, Feature)]
def children(self, ref=geogig.HEAD, path=None, recursive=False):
'''Returns a set of Tree and Feature objects with all the children for the passed ref and path'''
return self.connector.children(_resolveref(ref), path, recursive)
@property
def branches(self):
''' Returns a dict with branch names as keys and branch refs as values'''
return self.connector.branches()
@property
def tags(self):
'''Returns a dict with tag names as keys and tag objects as values'''
tags = self.connector.tags()
tags = {k: Tag(self, v, k) for k, v in tags.iteritems()}
return tags
def clone(self, path):
'''Clones this repo in the specified path. Returns a reference to the cloned repo'''
url = self.url.replace('\\', '/')
self.connector.clone(url, path)
return Repository(path, self.connector.__class__(), False)
def createbranch(self, ref, name, force=False, checkout=False):
'''Creates a new branch in the repo. Returns the commitish representing the branch'''
if checkout:
self.cleancache()
return self.connector.createbranch(_resolveref(ref), name, force, checkout)
def deletebranch(self, name, remote=False):
'''Deletes the passed branch'''
self.connector.deletebranch(name, remote)
def createtag(self, ref, name, message):
'''Creates a new tag, with the passed message'''
self.connector.createtag(_resolveref(ref), name, message)
def deletetag(self, name):
'''Deletes the passed tag'''
self.connector.deletetag(name)
def diff(self, refa=geogig.HEAD, refb=geogig.WORK_HEAD, path=None):
'''Returns a list of DiffEntry representing the changes between 2 commits.
If a path is passed, it only shows changes corresponding to that path'''
return self.connector.diff(_resolveref(refa), _resolveref(refb), path)
def difftreestats(self, refa=geogig.HEAD, refb=geogig.WORK_HEAD):
'''Returns a dict with tree changes statistics for the passed refs. Keys are paths, values are tuples
in the form (added, deleted, modified) corresponding to changes made to that path'''
return self.connector.difftreestats(_resolveref(refa), _resolveref(refb))
def treediff(self, path, refa=geogig.HEAD, refb=geogig.WORK_HEAD):
'''Returns a tuple attributes, features with a description of features changed between the specified refs
Attributes is a dict with attribute names as keys and the description of the attribute as value
Features is a list, with each element being another list representing a feature and the changes
in it between the two specifed versions.
The length of this list is the same as the one of attributes dictionary
The value for an attribute is a tuple of (change_type, old value, new value) in case the change for the
attribute is | |
(q1+q2)*B[(2, 1)] + (q1+q2)*B[(3, 0)] + q1*B[(0, 3)]
For `T_0`, we can note that, in the projection, `\delta`
is mapped to `q`::
sage: T[0](x)
(-q^2*q1-q^2*q2)*B[(1, 2)] + (-q*q1-q*q2)*B[(2, 1)] + (-q^3*q2)*B[(0, 3)]
Note that there is no translation part, and in particular
1 is an eigenvector for all `T_i`'s::
sage: T[0](KL0.one())
q1*B[(0, 0)]
sage: T[1](KL0.one())
q1*B[(0, 0)]
sage: Y = T.Y()
sage: alphacheck=Y.keys().simple_roots()
sage: Y[alphacheck[0]](KL0.one())
((-q2)/(q*q1))*B[(0, 0)]
Matching with Ion Bogdan's hand calculations from 3/15/2013::
sage: L = RootSystem(["A",1,1]).weight_space(extended=True)
sage: K = QQ['q,u'].fraction_field()
sage: q, u = K.gens()
sage: KL = L.algebra(K)
sage: KL0 = KL.classical()
sage: L0 = KL0.basis().keys()
sage: omega = L0.fundamental_weights()
sage: T = KL.demazure_lusztig_operators_on_classical(q, u, -1/u, convention="dominant")
sage: Y = T.Y()
sage: alphacheck = Y.keys().simple_roots()
sage: Ydelta = Y[Y.keys().null_root()]
sage: Ydelta.word, Ydelta.signs, Ydelta.scalar
((), (), 1/q)
sage: Y1 = Y[alphacheck[1]]
sage: Y1.word, Y1.signs, Y1.scalar # This is T_0 T_1 (T_1 acts first, then T_0); Ion gets T_1 T_0
((1, 0), (1, 1), 1)
sage: Y0 = Y[alphacheck[0]]
sage: Y0.word, Y0.signs, Y0.scalar # This is 1/q T_1^-1 T_0^-1
((0, 1), (-1, -1), 1/q)
Note that the following computations use the "dominant" convention::
sage: T0 = T.Tw(0)
sage: T0(KL0.monomial(omega[1]))
q*u*B[-Lambda[1]] + ((u^2-1)/u)*B[Lambda[1]]
sage: T0(KL0.monomial(2*omega[1]))
((q*u^2-q)/u)*B[0] + q^2*u*B[-2*Lambda[1]] + ((u^2-1)/u)*B[2*Lambda[1]]
sage: T0(KL0.monomial(-omega[1]))
1/(q*u)*B[Lambda[1]]
sage: T0(KL0.monomial(-2*omega[1]))
((-u^2+1)/(q*u))*B[0] + 1/(q^2*u)*B[2*Lambda[1]]
"""
# In type BC dual we used q^2 and q elsewhere
# Not sure this is the right thing to do or just a workaround ...
# This probably makes up for the fact that, in type BC
# dual, the null coroot is twice Sage's deltacheck
# whereas the null root is delta. So we need to map delta
# to q^2 in the q_projection.
# Should this go in q_project instead?
ct = self.cartan_type()
a0check = ct.acheck()[ct.special_node()]
T_on_basis = functools.partial(self.demazure_lusztig_operator_on_classical_on_basis,
q1=q1, q2=q2, q=q**a0check, convention=convention)
return HeckeAlgebraRepresentation(self.classical(), T_on_basis, self.cartan_type(), q1=q1, q2=q2, q=q, side="left")
@cached_method
def T0_check_on_basis(self, q1, q2, convention="antidominant"):
r"""
Return the `T_0^\vee` operator acting on the basis.
This implements the formula for `T_{0'}` in Section 6.12 of [Haiman06]_.
REFERENCES:
.. [Haiman06] \M. Haiman, Cherednik algebras, Macdonald polynomials and combinatorics, ICM 2006.
.. WARNING::
The current implementation probably returns just
nonsense, if the convention is not "dominant".
EXAMPLES::
sage: K = QQ['q1,q2'].fraction_field()
sage: q1,q2 = K.gens()
sage: L = RootSystem(["A",1,1]).ambient_space()
sage: L0 = L.classical()
sage: KL = L.algebra(K)
sage: some_weights = L.fundamental_weights()
sage: f = KL.T0_check_on_basis(q1,q2, convention="dominant")
sage: f(L0.zero())
(q1+q2)*B[(0, 0)] + q1*B[(1, -1)]
sage: L = RootSystem(["A",3,1]).ambient_space()
sage: L0 = L.classical()
sage: KL = L.algebra(K)
sage: some_weights = L0.fundamental_weights()
sage: f = KL.T0_check_on_basis(q1,q2, convention="dominant")
sage: f(L0.zero()) # not checked
(q1+q2)*B[(0, 0, 0, 0)] + q1^3/q2^2*B[(1, 0, 0, -1)]
The following results have not been checked::
sage: for x in some_weights:
....: print("{} : {}".format(x, f(x)))
(1, 0, 0, 0) : q1*B[(1, 0, 0, 0)]
(1, 1, 0, 0) : q1*B[(1, 1, 0, 0)]
(1, 1, 1, 0) : q1*B[(1, 1, 1, 0)]
Some examples for type `B_2^{(1)}` dual::
sage: L = RootSystem("B2~*").ambient_space()
sage: L0 = L.classical()
sage: e = L.basis()
sage: K = QQ['q,u'].fraction_field()
sage: q,u = K.gens()
sage: q1 = u
sage: q2 = -1/u
sage: KL = L.algebra(K)
sage: KL0 = KL.classical()
sage: f = KL.T0_check_on_basis(q1,q2, convention="dominant")
sage: T = KL.twisted_demazure_lusztig_operators(q1,q2, convention="dominant")
Direct calculation::
sage: T.Tw(0)(KL0.monomial(L0([0,0])))
((u^2-1)/u)*B[(0, 0)] + u^3*B[(1, 1)]
sage: KL.T0_check_on_basis(q1,q2, convention="dominant")(L0([0,0]))
((u^2-1)/u)*B[(0, 0)] + u^3*B[(1, 1)]
Step by step calculation, comparing by hand with <NAME>::
sage: res = T.Tw(2)(KL0.monomial(L0([0,0]))); res
u*B[(0, 0)]
sage: res = res * KL0.monomial(L0([-1,1])); res
u*B[(-1, 1)]
sage: res = T.Tw_inverse(1)(res); res
(u^2-1)*B[(0, 0)] + u^2*B[(1, -1)]
sage: res = T.Tw_inverse(2)(res); res
((u^2-1)/u)*B[(0, 0)] + u^3*B[(1, 1)]
"""
L = self.basis().keys()
ct = L.cartan_type()
special_node = ct.special_node()
a0 = ct.a()[special_node]
A0 = self.classical()
T = A0.demazure_lusztig_operators(q1, q2, convention=convention)
# TODO: use the formula expressing the inverse of T as a Demazure Lusztig operator? Or go through the affine action of T_0 for the dual
L0 = A0.basis().keys()
# The dominant short root of the classical system
if ct.type() == 'BC':
# CHECKME: this is not exactly phi, but phi rescaled
# appropriately so that it's in the orbit of the
# simple classical roots
phi = -a0*L0(L.simple_roots()[0])
else:
phi = L0(L0.root_system.coroot_lattice().highest_root().associated_coroot())
# Variant: try to fetch it from the other affinization; something like:
# The a0 only has an influence in type BC; it handles the fact that alpha_0
# is not in the orbit of the classical roots
#phi1 = - L0(L'.other_affinization().simple_roots()[special_node]) * a0
#assert phi == phi1
j, v = phi.to_simple_root(reduced_word=True)
translation = A0.monomial(-L0.simple_root(j)/a0)
Tv = T[v]
Tinv = T.Tw_inverse(v+(j,))
def T0_check(weight):
return -q1*q2*Tinv( translation * Tv(A0.monomial(weight)))
# For debugging purposes
T0_check.phi = phi
T0_check.j = j
T0_check.v = v
return T0_check
@cached_method
def classical(self):
"""
Return the group algebra of the corresponding classical lattice.
EXAMPLES::
sage: KL = RootSystem(["A",2,1]).ambient_space().algebra(QQ)
sage: KL.classical()
Algebra of the Ambient space of the Root system of type ['A', 2] over Rational Field
"""
return self.basis().keys().classical().algebra(self.base_ring())
def q_project_on_basis(self, l, q):
r"""
Return the monomial `c * cl(l)` in the group algebra of the classical lattice.
INPUT:
- ``l`` -- an element of the root lattice realization
- ``q`` -- an element of the ground ring
Here, `cl(l)` is the projection of `l` in the classical
lattice, and `c` is the coefficient of `l` in `\delta`.
.. SEEALSO:: :meth:`q_project_on_basis`
EXAMPLES::
sage: K = QQ['q'].fraction_field()
sage: q = K.gen()
sage: KL = RootSystem(["A",2,1]).ambient_space().algebra(K)
sage: L = KL.basis().keys()
sage: e = L.basis()
sage: KL.q_project_on_basis( 4*e[1] + 3*e[2] + e['deltacheck'] - 2*e['delta'], q)
1/q^2*B[(0, 4, 3)]
"""
KL0 = self.classical()
L0 = KL0.basis().keys()
return KL0.term(L0(l), q**l["delta"])
def q_project(self, x, q):
r"""
Implement the `q`-projection morphism from ``self`` to the group algebra of the classical space.
INPUT:
- ``x`` -- an element of the group algebra of ``self``
- ``q`` -- an element of the ground ring
This is an algebra morphism mapping `\delta` to `q` and
`X^b` to its classical counterpart for the other elements
`b` of the basis of the realization.
EXAMPLES::
sage: K = QQ['q'].fraction_field()
sage: q = K.gen()
sage: KL = RootSystem(["A",2,1]).ambient_space().algebra(K)
sage: L = KL.basis().keys()
sage: e = L.basis()
sage: x = KL.an_element() + KL.monomial(4*e[1] + 3*e[2] + e['deltacheck'] - 2*e['delta']); x
B[2*e[0] + 2*e[1] + 3*e[2]] + B[4*e[1] + 3*e[2] - 2*e['delta'] + e['deltacheck']]
sage: KL.q_project(x, q)
B[(2, 2, 3)] + 1/q^2*B[(0, 4, 3)]
sage: KL = RootSystem(["BC",3,2]).ambient_space().algebra(K)
sage: L = KL.basis().keys()
sage: e = L.basis()
sage: x = KL.an_element() + KL.monomial(4*e[1] + 3*e[2] + e['deltacheck'] - 2*e['delta']); x
B[2*e[0] + 2*e[1] + 3*e[2]] + B[4*e[1] + 3*e[2] - 2*e['delta'] + e['deltacheck']]
sage: KL.q_project(x, q)
B[(2, 2, 3)] + 1/q^2*B[(0, 4, 3)]
.. WARNING::
Recall that the null root, usually denoted `\delta`,
is in fact ``a[0]\delta`` in Sage's notation, in order
to avoid half integer coefficients (this only makes a
difference in type BC). Similarly, what's usually
denoted `q` is in fact ``q^a[0]`` in Sage's notations,
to avoid manipulating square roots::
sage: KL.q_project(KL.monomial(L.null_root()),q)
q^2*B[(0, 0, 0)]
"""
L0 = self.classical()
return L0.linear_combination( (self.q_project_on_basis(l, q), c) for l,c in x )
def twisted_demazure_lusztig_operator_on_basis(self, weight, i, q1, q2, convention="antidominant"):
r"""
Return the twisted Demazure-Lusztig operator acting on the basis.
INPUT:
- ``weight`` -- an element `\lambda` of the weight lattice
- ``i`` -- an element of the index set
- ``q1,q2`` -- two elements of the ground ring
- ``convention`` -- "antidominant", "bar", or "dominant" (default: "antidominant")
.. SEEALSO:: :meth:`twisted_demazure_lusztig_operators`
EXAMPLES::
sage: L = RootSystem(["A",3,1]).ambient_space()
sage: e = L.basis()
sage: K = QQ['q1,q2'].fraction_field()
sage: q1, q2 = K.gens()
sage: KL = L.algebra(K)
sage: Lambda = L.classical().fundamental_weights()
sage: KL.twisted_demazure_lusztig_operator_on_basis(Lambda[1]+2*Lambda[2], 1, q1, q2, convention="dominant")
(-q2)*B[(2, 3, 0, 0)]
sage: KL.twisted_demazure_lusztig_operator_on_basis(Lambda[1]+2*Lambda[2], 2, q1, q2, convention="dominant")
(-q1-q2)*B[(3, 1, 1, 0)] + (-q2)*B[(3, 0, 2, 0)]
sage: KL.twisted_demazure_lusztig_operator_on_basis(Lambda[1]+2*Lambda[2], 3, q1, q2, convention="dominant")
q1*B[(3, 2, 0, 0)]
sage: KL.twisted_demazure_lusztig_operator_on_basis(Lambda[1]+2*Lambda[2], 0, q1, q2, convention="dominant")
((q1*q2+q2^2)/q1)*B[(1, 2, 1, 1)] + ((q1*q2+q2^2)/q1)*B[(1, | |
################################################################################
# #
# Author: #
# #
# <NAME>, PhD #
# Senior Specialist #
# RedCastle Resources, Inc. #
# Working onsite at: #
# USDA Forest Service #
# Remote Sensing Applications Center (RSAC) #
# 2222 West 2300 South #
# Salt Lake City, UT 84119 #
# Office: (801) 975-3828 #
# Mobile: (801) 694-9215 #
# Email: <EMAIL> #
# RSAC FS Intranet website: http://fsweb.rsac.fs.fed.us/ #
# RSAC FS Internet website: http://www.fs.fed.us/eng/rsac/ #
# #
# Purpose: #
# #
# Calcuates mean focal standard deviation (sd). #
# #
# Used in the script MODIS_Daily_Processing.py #
# #
################################################################################
# #
# IMPORT PYTHON MODULES #
# #
################################################################################
import time, pp, os, subprocess, glob
################################################################################
# #
# IMPORT CUSTOM PYTHON MODULES #
# #
# AssignProcessesToCPUs: used to assign processes to individual processors #
# CheckLogFiles: Checks .log files for errors #
# CreateBatFiles: Used to create ERDAS formatted DOS bat files #
# GetInputFilesForCompositeModels: used to get input and output filenames #
# RunBatchFiles: used to run the .bat files #
# TimeString: used to get and format the current time for printing purposes #
# #
################################################################################
from AssignProcessesToCPUs import GetFileDict
from CheckLogFiles import CheckLogFiles
from CreateBatFiles import CreateBatFile
import GetInputFilesForCompositeModels
from RunBatchFiles import RunBatchFile
from TimeString import TimeString
################################################################################
# #
# VARIABLE DESCRIPTIONS #
# #
#******************************************************************************#
# #
# SCRIPT DEFINED VARIABLES #
# #
# COMPOSITEDIRECTORIES: because composite periods overlap, each day, except #
# for days 1-8, belong to two composite periods. #
# COMPOSITEDIRECTORIES contains a list of composite #
# directories for a particular day. For example, the #
# compositing directoies for day 9 are: #
# ['W:/TERRA/composites/2014/001_016','W:/TERRA/composites/2014/009_024']
# PROCESSING_DIRECTORY: processing directories for each satellite #
# #
#******************************************************************************#
# #
# USER DEFINED VARIABLES #
# #
# NCPUS: number of CPUs available for processing #
# SATELLITE: type of satellite sensor (i.e., AQUA, TERRA) #
# #
#******************************************************************************#
# #
# OPTIONAL VARIABLES #
# #
# HTML_LINES: Python dictionary where the keys are the names of the #
# satellites (TERRA, AQUA). The values of the dictionary keys are #
# lists of HTML text that are written to html files that informs #
# the users what the program is doing. #
# #
################################################################################
NCPUS = int(os.environ['NUMBER_OF_PROCESSORS'])
SATELLITE = ['TERRA','AQUA']
COMPOSITEDIRECTORIES = {}
PROCESSING_DIRECTORY = {}
HTML_LINES = {}
LOG = 'W:/scripts/Logs/log_' + str(time.localtime()[7]) + '.txt'
ERDAS_Executable = 'C:/Intergraph/ERDAS IMAGINE 2013/bin/Win32Release/eml.exe'
################################################################################
# #
# Function: CalculateMeanFocalSD #
# #
# Function Purpose: #
# #
# Calculate mean focal standard deviation. #
# #
################################################################################
def CalculateMeanFocalSD():
InputFiles = {}
print TimeString(),'-> List of mean focal sd models'
LogOutput = open(LOG,'a')
LogOutput.write(TimeString() + ' -> List of mean focal sd models\n')
InputFilesDict = {}
InputFilesDict = GetInputFilesForCompositeModels.GetInputImages(SATELLITE,PROCESSING_DIRECTORY,COMPOSITEDIRECTORIES,['_focal_sd_01.img','_focal_sd_01.ige'])
for satellite in InputFilesDict.keys():
InputFiles[satellite] = []
for Inputs in InputFilesDict[satellite]:
InputFile = Inputs[0]
MaskFile = Inputs[1]
OutputFile = Inputs[2]
ModelText = []
ModelText.append('SET CELLSIZE MIN;\n')
ModelText.append('SET WINDOW INTERSECTION;\n')
ModelText.append('SET AOI NONE;\n\n')
ModelText.append('INTEGER RASTER InputImage FILE OLD PUBINPUT NEAREST NEIGHBOR AOI NONE "' + InputFile + '";\n')
ModelText.append('INTEGER RASTER ImageMask FILE OLD PUBINPUT NEAREST NEIGHBOR AOI NONE "' + MaskFile + '";\n')
ModelText.append('FLOAT RASTER OutputImage FILE NEW PUBOUT IGNORE 0 ATHEMATIC FLOAT SINGLE "' + OutputFile + '";\n')
ModelText.append('INTEGER MATRIX Filter5x5;\n\n')
ModelText.append('Filter5x5 = MATRIX(5, 5:\n')
ModelText.append(' 1, 1, 1, 1, 1, \n')
ModelText.append(' 1, 1, 1, 1, 1, \n')
ModelText.append(' 1, 1, 1, 1, 1, \n')
ModelText.append(' 1, 1, 1, 1, 1, \n')
ModelText.append(' 1, 1, 1, 1, 1);\n\n')
ModelText.append('#define Mask FLOAT(EITHER 0.0 IF ( $InputImage(1) == 0 || $InputImage(2) == 0 || $InputImage(3) == 0 || $InputImage(4) == 0 || $InputImage(7) == 0 ) OR FLOAT($ImageMask) OTHERWISE )\n')
ModelText.append('#define Band7_FocalSD FLOAT(FOCAL STANDARD DEVIATION ( $InputImage(7) , $Filter5x5 ) )\n')
ModelText.append('#define Band4_FocalSD FLOAT(FOCAL STANDARD DEVIATION ( $InputImage(4) , $Filter5x5 ) )\n')
ModelText.append('#define Band3_FocalSD FLOAT(FOCAL STANDARD DEVIATION ( $InputImage(3) , $Filter5x5 ) )\n')
ModelText.append('#define Band2_FocalSD FLOAT(FOCAL STANDARD DEVIATION ( $InputImage(2) , $Filter5x5 ) )\n')
ModelText.append('#define Band1_FocalSD FLOAT(FOCAL STANDARD DEVIATION ( $InputImage(1) , $Filter5x5 ) )\n')
ModelText.append('#define FocalSD_Stack FLOAT(STACKLAYERS ( $Band1_FocalSD , $Band2_FocalSD , $Band3_FocalSD , $Band4_FocalSD , $Band7_FocalSD ))\n')
ModelText.append('#define Mean_FocalSD FLOAT(STACK MEAN ( $FocalSD_Stack ) )\n')
ModelText.append('OutputImage = EITHER -1000.0 IF ( $Mean_FocalSD == 0.0 && $Mask == 1.0 ) OR $Mean_FocalSD * $Mask OTHERWISE ;\n')
ModelText.append('QUIT;\n')
InputFiles[satellite].append(CreateBatFile(COMPOSITEDIRECTORIES[satellite][0].replace('\\','/'), os.path.splitext(os.path.split(OutputFile)[1])[0],ModelText))
print os.path.splitext(os.path.split(OutputFile)[1])[0] + '.mdl'
LogOutput.write(os.path.splitext(os.path.split(OutputFile)[1])[0] + '.mdl\n')
LogOutput.close()
return InputFiles
## # TimesDict is used solely to print stuff to the HTML.
## TimesDict = {}
## for Image in InputImages:
##
## TimesDict[Image[2]] = {}
## TimesDict[Image[2]]['start'] = TimeString()
## TimesDict[Image[2]]['error'] = ''
## try:
##
## # Define the input and output images.
## InputFile = Image[0].replace('\\','/')
## MaskFile = Image[1].replace('\\','/')
## OutputFile = Image[2].replace('\\','/')
##
## # Create the ERDAS model.
## MeanFocalSDModel = imagine.modeler.Model()
##
## # Load the mask input image and the MODIS swath input image.
## MaskInputImage = MeanFocalSDModel.RasterInput(MaskFile, DataType = 'Integer')
## InputImage = MeanFocalSDModel.RasterInput(InputFile, DataType = 'Integer')
##
## # Get the bands.
## Layer1 = MeanFocalSDModel.BandSelection(InputImage,'1')
## Layer2 = MeanFocalSDModel.BandSelection(InputImage,'2')
## Layer3 = MeanFocalSDModel.BandSelection(InputImage,'3')
## Layer4 = MeanFocalSDModel.BandSelection(InputImage,'4')
## Layer7 = MeanFocalSDModel.BandSelection(InputImage,'7')
##
## # Create masks for each band.
## Layer1Mask = MeanFocalSDModel.EitherOr(MeanFocalSDModel.Eq(Layer1,0),0,MaskInputImage)
## Layer2Mask = MeanFocalSDModel.EitherOr(MeanFocalSDModel.Eq(Layer2,0),0,MaskInputImage)
## Layer3Mask = MeanFocalSDModel.EitherOr(MeanFocalSDModel.Eq(Layer3,0),0,MaskInputImage)
## Layer4Mask = MeanFocalSDModel.EitherOr(MeanFocalSDModel.Eq(Layer4,0),0,MaskInputImage)
## Layer7Mask = MeanFocalSDModel.EitherOr(MeanFocalSDModel.Eq(Layer7,0),0,MaskInputImage)
##
## # Define the kernel that will be used to calculate the mean
## # focal sd.
## Matrix5x5 = MeanFocalSDModel.KernelMatrixInput(MatrixType = 'Integer', KernelLibrary = 'C:/Intergraph/ERDAS IMAGINE 2013/etc/default.klb', KernelName = '5x5 Low Pass', Normalize = False)
##
## # Calculate the focal sd for each layer.
## FocalSD_Layer1 = MeanFocalSDModel.EitherOr(MeanFocalSDModel.Eq(Layer1Mask,1),MeanFocalSDModel.FocalStandardDeviation(Layer1,Matrix5x5,IgnoreValue = 0.0),0)
## FocalSD_Layer2 = MeanFocalSDModel.EitherOr(MeanFocalSDModel.Eq(Layer2Mask,1),MeanFocalSDModel.FocalStandardDeviation(Layer2,Matrix5x5,IgnoreValue = 0.0),0)
## FocalSD_Layer3 = MeanFocalSDModel.EitherOr(MeanFocalSDModel.Eq(Layer3Mask,1),MeanFocalSDModel.FocalStandardDeviation(Layer3,Matrix5x5,IgnoreValue = 0.0),0)
## FocalSD_Layer4 = MeanFocalSDModel.EitherOr(MeanFocalSDModel.Eq(Layer4Mask,1),MeanFocalSDModel.FocalStandardDeviation(Layer4,Matrix5x5,IgnoreValue = 0.0),0)
## FocalSD_Layer7 = MeanFocalSDModel.EitherOr(MeanFocalSDModel.Eq(Layer7Mask,1),MeanFocalSDModel.FocalStandardDeviation(Layer7,Matrix5x5,IgnoreValue = 0.0),0)
##
## # Calculate the mean focal sd.
## Mean = MeanFocalSDModel.Mean(FocalSD_Layer1,FocalSD_Layer2,FocalSD_Layer3,FocalSD_Layer4,FocalSD_Layer7)
##
## # Create some masks.
## ZerosMask = MeanFocalSDModel.EitherOr(MeanFocalSDModel.Eq(Mean,0),1,0)
## Mask = MeanFocalSDModel.Multiply(Layer1Mask,Layer2Mask,Layer3Mask,Layer4Mask,Layer7Mask,MaskInputImage)
## RealZerosMask = MeanFocalSDModel.Multiply(ZerosMask,Mask)
##
## # Mask the mean focal sd image.
## Recode0 = MeanFocalSDModel.EitherOr(MeanFocalSDModel.Eq(RealZerosMask,1),-1000,Mean)
## MaskedMean = MeanFocalSDModel.Multiply(Recode0,Mask)
##
## # Define the output image.
## OutputImage = MeanFocalSDModel.RasterOutput(MaskedMean,OutputFile, PixelType = 'f32', Thematicity = 'Continuous',ComputePyramids = False)
##
## # Run the model.
## MeanFocalSDModel.Execute()
##
## except Exception, error:
## TimesDict[Image[2]]['error'] = 'error creating ' + Image[2] + '; ' + str(error)
## TimesDict[Image[2]]['end'] = TimeString()
## return TimesDict
################################################################################
# #
# Function: Run #
# #
################################################################################
def Run(date):
global FINALCHECK
FINALCHECK = False
HTML_Lines_Changed = False
############################################################################
# #
# Get a list of input images for each satellite. #
# #
############################################################################
BatFilesDict = CalculateMeanFocalSD()
NumberOfFiles = 0
BatFilesList = []
for satellite in BatFilesDict.keys():
NumberOfFiles = NumberOfFiles + len(BatFilesDict[satellite])
BatFilesList.extend(BatFilesDict[satellite])
BatFilesList.sort()
##
## InputFilesDict = {}
## InputFilesDict = GetInputFilesForCompositeModels.GetInputImages(SATELLITE,PROCESSING_DIRECTORY,COMPOSITEDIRECTORIES,['_focal_sd_01.img','_focal_sd_01.ige'])
## NumberOfFiles = 0
## InputFilesList = []
## OutputImagesList = {}
## for satellite in InputFilesDict.keys():
## OutputImagesList[satellite] = []
## NumberOfFiles = NumberOfFiles + len(InputFilesDict[satellite])
## InputFilesList.extend(InputFilesDict[satellite])
## for Images in InputFilesDict[satellite]:
## OutputImagesList[satellite].append(Images[2])
## InputFilesList.sort()
############################################################################
# #
# Run the bat files. The purpose of the while loop is sometimes ERDAS #
# will not run a process for some unknown reason. The loop will continue #
# to run until there are no files left to process or the loop has run 10 #
# times. #
# #
############################################################################
ERDASIsRunning = False
if (len(BatFilesList) != 0):
EML = subprocess.Popen(ERDAS_Executable, close_fds=True)
time.sleep(60)
ERDASIsRunning = True
count = 0
while (NumberOfFiles != 0) & (count < 10):
count = count + 1
print TimeString(),'-> creating',NumberOfFiles,'mean focal sd images'
LogOutput = open(LOG,'a')
LogOutput.write(TimeString() + ' -> creating ' + str(NumberOfFiles) + ' mean focal sd images\n')
LogOutput.close()
HTML_Lines_Changed = True
########################################################################
# #
# Write stuff to HTML #
# #
########################################################################
for satellite in SATELLITE:
if (HTML_LINES.get(satellite) == None):
HTML_LINES[satellite] = []
HTML_LINES[satellite].append("<body>\n")
HTML_LINES[satellite].append("<div>\n")
HTML_LINES[satellite].append("<p><b><span style='font-size:20.0pt'>Processing Day: " + satellite.upper() + " " + date + "</span></b></p>\n")
HTML_LINES[satellite].append("<p></p>\n")
HTML_LINES[satellite].append("<dl>\n")
HTML_LINES[satellite].append("<dt><b>Mean Focal SD</b></dt>\n")
HTML_LINES[satellite].append("<p></p>\n")
HTML_LINES[satellite].append("<dd>" + TimeString() + " -> Processing " + str(len(BatFilesDict[satellite])) + " Files</dd>\n")
HTML_LINES[satellite].append("<p></p>\n")
HTML_LINES[satellite].append("<ul>\n")
########################################################################
# #
# Assign processes to the CPUs #
# #
########################################################################
FileDict = GetFileDict(NCPUS,BatFilesList)
########################################################################
# #
# Run the process #
# #
########################################################################
job_server = pp.Server()
[job_server.submit(RunBatchFile, (FileDict[cpu],),(),('subprocess',)) for cpu in range(1,NCPUS+1)]
# Results = [job_server.submit(CalculateMeanFocalSD,(FileDict[cpu],),(TimeString,),('imagine','time')) for cpu in range(1,NCPUS+1)]
job_server.wait()
job_server.print_stats()
job_server.destroy()
########################################################################
# #
# Check the log files for errors. #
# #
########################################################################
Directories = {}
for satellite in SATELLITE:
Directories[satellite] = COMPOSITEDIRECTORIES[satellite][0]
New_HTML_LINES = CheckLogFiles(satellite,Directories,'path*_noimage_mask.log',FINALCHECK)
HTML_LINES[satellite].extend(New_HTML_LINES[satellite])
## ########################################################################
## # #
## # Write stuff to HTML #
## # #
## ########################################################################
##
## for Dict in Results:
## for Image in Dict().keys():
## for satellite in SATELLITE:
## print 'Processing',Image,'Started:',Dict()[Image]['start'],'Ended:',Dict()[Image]['end']
## LogOutput.write('Processing ' + Image + ' Started: ' + str(Dict()[Image]['start']) + ' Ended: ' + str(Dict()[Image]['end']) + '\n')
## try:
## x = OutputImagesList[satellite].index(Image)
## except:
## pass
## else:
## if (Dict()[Image]['error'] != | |
the main program window.
This [unfortunately] needs to rely on a search methodology to target entry field widgets that need updating,
because they can be destroyed and re-created (thus, early references to existing widgets can't be trusted). """
# Convert the value to a bytearray and create a list
newHex = '{0:0{1}X}'.format( self.allFlagsValue, self.flagFieldLength*2 ) # Formats as hex; pads up to n zeroes (second arg)
# Update the field entry widgets in the Structural Analysis tab, if it's currently showing this set of flags
structTable = getattr( Gui.structurePropertiesFrame, 'structTable', None )
if structTable:
# Get the offset of the structure shown in the panel (offset of the first field entry), to see if it's the same as the one we're editing
firstFieldOffsets = structTable.grid_slaves( column=1, row=0 )[0].offsets # Should never be a list when generated here
if firstFieldOffsets == self.structure.offset:
# Set the value of the entry widget, and trigger its bound update function (which will handle everything from validation through data-saving)
hexEntryWidget = structTable.grid_slaves( column=1, row=self.fieldAndValueIndex )[0]
self.updateWidget( hexEntryWidget, newHex )
# Update the field entry widgets in the Texture Tree's Properties tab, if it's currently showing this set of flags
flagWidgets = Gui.texturePropertiesPane.flagWidgets
if self.structure.length == 0xC: # Pixel Proc. struct
structOffset = 0
elif self.structure.length == 0x18: # Material struct
structOffset = 4
elif self.structure.length == 0x5C: # Texture struct
structOffset = 0x40
else: # Allow this method to fail silently
print 'Unexpected structure length for the Flag Decoder update method:', hex( self.structure.length )
structOffset = 0
for widget in flagWidgets:
# Attempt to match this widget's flag offsets to the start of this window's structure offset
if self.structure.offset in ( offset - structOffset for offset in widget.offsets ): # Makes a tuple of potential structure start offsets
# Avoid updating this widget if this window is from the SA tab and there's more than one set of flags being represented by the target widget
if not isinstance( self.fieldOffsets, list ) and len( widget.offsets ) > 1:
# Do however update the widget to show that some of the structs it refers to have different values than others
widget['highlightbackground'] = 'orange'
widget['highlightthickness'] = 2
else:
self.updateWidget( widget, newHex )
break
# Update the actual data in the file for each offset
updateName = self.structure.fields[self.fieldAndValueIndex].replace( '_', ' ' ).replace( '\n', ' ' )
# Update the value in the file containing the modified flag(s)
descriptionOfChange = updateName + ' modified in ' + globalDatFile.fileName
newData = bytearray.fromhex( newHex )
if type( self.fieldOffsets ) == list: # This is expected to be for an entry on the Texture Tree tab's Properties tab
for offset in self.fieldOffsets:
globalDatFile.updateData( offset, newData, descriptionOfChange )
else: # This is expected to be for an entry on the Structural Analysis tab
globalDatFile.updateData( self.fieldOffsets, newData, descriptionOfChange )
updateProgramStatus( updateName + ' Updated' )
def updateWidget( self, widget, newHex ):
""" Just handles some cosmetic changes for the widget. Actual saving
of the data is handled by the updateFlagsInFile method. """
# Update the values shown
widget.delete( 0, 'end' )
widget.insert( 0, newHex )
# Change the background color of the widget, to show that changes have been made to it and are pending saving
widget.configure( background='#faa' )
# Add the widget to a list to keep track of what widgets need to have their background restored to white when saving
global editedDatEntries
editedDatEntries.append( widget )
def showStructInStructuralAnalysis( structOffset ):
# Ensure the SA tab has been populated with the base structures (header/RT/root&reference nodes/etc)
if not Gui.fileStructureTree.get_children(): # SAV tab hasn't been populated yet. Perform analysis.
analyzeDatStructure()
# Add the structure and any parents required for it to the treeview
tic = time.clock()
addParentStructures( structOffset, initialCall=True )
toc = time.clock()
print 'time to add parents:', toc-tic
# Get the iids of all of the struct instances that are in the treeview
targetStructIids = getStructureIids( (structOffset,) )
if not targetStructIids:
# Unable to add the structure; it may be an orphan
operationResultsText = 'Unable to add this to the treeview, which means that it may be an orphan, or a decendant of one.'
else:
Gui.fileStructureTree.focus( targetStructIids[0] ) # Set a keyboard focus to the first item
Gui.fileStructureTree.see( targetStructIids[0] ) # Scroll to the first item, so it's visible (folders should already be expanded)
Gui.fileStructureTree.selection_set( targetStructIids )
if len( targetStructIids ) == 1:
operationResultsText = '1 instance of this structure was found.'
else:
operationResultsText = '{} instances of this structure were found.'.format( len(targetStructIids) )
# Expand the size of the treeview column, if needed.
currentViewingWidth = Gui.fileStructureTree.column( '#0', 'width' ) # Excludes the Offset column
for item in targetStructIids:
adjustSavColumnWidth( item, currentViewingWidth )
print operationResultsText
return operationResultsText
class ColorSwatch( ttk.Label ):
""" Creates a circular image (on a label widget), to show a color example and allow for editing it.
hexColor should be an 8 character hex string of RRGGBBAA """
# Not using the imageBank in this case to avoid ImageTk.PhotoImage
colorMask = Image.open( imagesFolder + "\\colorChooserMask.png" )
def __init__( self, parent, hexColor, entryWidget=None ):
# Create the label itself and bind the click even handler to it
ttk.Label.__init__( self, parent, cursor='hand2' )
if entryWidget:
self.entryWidget = entryWidget
self.bind( '<1>', self.editColor )
# Create the image swatch that will be displayed, and attach it to self to prevent garbage collection
self.renderCircle( hexColor )
def renderCircle( self, hexColor ):
# Convert the hex string provided to an RGBA values list
fillColor = hex2rgb( hexColor )[0]
# Create a new, 160x160 px, blank image
swatchImage = Image.new( 'RGBA', (160, 160), (0, 0, 0, 0) )
# Draw a circle of the given color on the new image
drawable = ImageDraw.Draw( swatchImage )
drawable.ellipse( (10, 10, 150, 150), fill=fillColor )
# Scale down the image. It's created larger, and then scaled down to
# create anti-aliased edges (it would just be a hexagon otherwise).
swatchImage.thumbnail( (16, 16), Image.ANTIALIAS )
# Overlay the highlight/shadow mask on top of the above color (for a depth effect)
swatchImage.paste( self.colorMask, (0, 0), self.colorMask )
self.swatchImage = ImageTk.PhotoImage( swatchImage )
self.configure( image=self.swatchImage )
def editColor( self, event ):
# Create a window where the user can choose a new color
colorPicker = MeleeColorPicker( 'Modifying ' + self.entryWidget.updateName, initialColor=self.entryWidget.get() )
Gui.root.wait_window( colorPicker.window ) # Wait for the above window to close before proceeding
# Get the new color hex and make sure it's new (if it's not, the operation was canceled, or there's nothing to be done anyway)
if colorPicker.initialColor != colorPicker.currentHexColor:
if len( colorPicker.currentHexColor ) != self.entryWidget.byteLength * 2:
msg( 'The value generated from the color picker (' + colorPicker.currentHexColor + ') does not match the byte length requirement of the destination.' )
else:
# Replace the text in the entry widget
self.entryWidget.delete( 0, 'end' )
self.entryWidget.insert( 0, colorPicker.currentHexColor )
# Update the data in the file with the entry's data, and redraw the color swatch
updateEntryHex( '', widget=self.entryWidget )
# def modifyFolders( parentIid, openFolders ): # Collapses or expands all folder items in a treeview (of level parentIid or lower).
# for item in Gui.fileStructureTree.get_children( parentIid ):
# if len( Gui.fileStructureTree.get_children(item) ) != 0: # Item is a folder.
# Gui.fileStructureTree.item( item, open=openFolders )
# modifyFolders( item, openFolders )
# def expandSAV( tags ):
# if tags == '':
# modifyFolders( '', True )
# else:
# # First, collapse all items.
# modifyFolders( '', False )
# # Expand items, down to the level specified.
# targetItems = Gui.fileStructureTree.tag_has( tags )
# for iid in targetItems:
# Gui.fileStructureTree.item( iid, open=True )
# parent = Gui.fileStructureTree.parent( iid )
# while parent != '':
# Gui.fileStructureTree.item( parent, open=True )
# parent = Gui.fileStructureTree.parent( parent )
# def collapseSAV( tags ):
# # First, collapse all items.
# modifyFolders( '', False )
# targetItems = Gui.fileStructureTree.tag_has( tags )
# for iid in targetItems:
# parent = Gui.fileStructureTree.parent( iid )
# while parent != '':
# Gui.fileStructureTree.item( parent, open=True )
# parent = Gui.fileStructureTree.parent( parent )
# def highlightSAV( tag, highlightColor ):
# Gui.fileStructureTree.tag_configure( tag, background=highlightColor )
# def setSAVlineHighlights(): # Adds/removes line highlighting on the Structural Analysis tab.
# for tag, color, variable in Gui.savHighlightColors:
# if variable.get(): Gui.fileStructureTree.tag_configure( tag, background=color )
# else: Gui.fileStructureTree.tag_configure( tag, background='' )
# def removeAllSAVlineHighlighting():
# for tag, color, variable in Gui.savHighlightColors:
# Gui.fileStructureTree.tag_configure( tag, background='' )
# variable.set( False )
#===============================#
# ~ ~ Manual Placements tab ~ ~ #
#===============================#
def scanFolderStructure():
# Prompt the user to choose a folder to look for textures in
parentFolder = tkFileDialog.askdirectory(
title="Choose a folder. All PNGs and TPLs in the chosen folder, and in all subfolders, will be selected.",
initialdir=settings.get( 'General Settings', 'defaultSearchDirectory' ),
mustexist=True)
if parentFolder != '':
# Update the default directory to start in when opening or exporting files.
with open( settingsFile, 'w') as theSettingsFile:
settings.set( 'General Settings', 'defaultSearchDirectory', | |
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.enable_secret_version), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
resources.SecretVersion()
)
await client.enable_secret_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_enable_secret_version_flattened():
client = SecretManagerServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.enable_secret_version), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = resources.SecretVersion()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.enable_secret_version(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_enable_secret_version_flattened_error():
client = SecretManagerServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.enable_secret_version(
service.EnableSecretVersionRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_enable_secret_version_flattened_async():
client = SecretManagerServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.enable_secret_version), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = resources.SecretVersion()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
resources.SecretVersion()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.enable_secret_version(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_enable_secret_version_flattened_error_async():
client = SecretManagerServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.enable_secret_version(
service.EnableSecretVersionRequest(), name="name_value",
)
@pytest.mark.parametrize("request_type", [service.DestroySecretVersionRequest, dict,])
def test_destroy_secret_version(request_type, transport: str = "grpc"):
client = SecretManagerServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.destroy_secret_version), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = resources.SecretVersion(
name="name_value",
state=resources.SecretVersion.State.ENABLED,
etag="etag_value",
)
response = client.destroy_secret_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == service.DestroySecretVersionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, resources.SecretVersion)
assert response.name == "name_value"
assert response.state == resources.SecretVersion.State.ENABLED
assert response.etag == "etag_value"
def test_destroy_secret_version_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SecretManagerServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.destroy_secret_version), "__call__"
) as call:
client.destroy_secret_version()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == service.DestroySecretVersionRequest()
@pytest.mark.asyncio
async def test_destroy_secret_version_async(
transport: str = "grpc_asyncio", request_type=service.DestroySecretVersionRequest
):
client = SecretManagerServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.destroy_secret_version), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
resources.SecretVersion(
name="name_value",
state=resources.SecretVersion.State.ENABLED,
etag="etag_value",
)
)
response = await client.destroy_secret_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == service.DestroySecretVersionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, resources.SecretVersion)
assert response.name == "name_value"
assert response.state == resources.SecretVersion.State.ENABLED
assert response.etag == "etag_value"
@pytest.mark.asyncio
async def test_destroy_secret_version_async_from_dict():
await test_destroy_secret_version_async(request_type=dict)
def test_destroy_secret_version_field_headers():
client = SecretManagerServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.DestroySecretVersionRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.destroy_secret_version), "__call__"
) as call:
call.return_value = resources.SecretVersion()
client.destroy_secret_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_destroy_secret_version_field_headers_async():
client = SecretManagerServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.DestroySecretVersionRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.destroy_secret_version), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
resources.SecretVersion()
)
await client.destroy_secret_version(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_destroy_secret_version_flattened():
client = SecretManagerServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.destroy_secret_version), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = resources.SecretVersion()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.destroy_secret_version(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_destroy_secret_version_flattened_error():
client = SecretManagerServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.destroy_secret_version(
service.DestroySecretVersionRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_destroy_secret_version_flattened_async():
client = SecretManagerServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.destroy_secret_version), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = resources.SecretVersion()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
resources.SecretVersion()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.destroy_secret_version(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_destroy_secret_version_flattened_error_async():
client = SecretManagerServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.destroy_secret_version(
service.DestroySecretVersionRequest(), name="name_value",
)
@pytest.mark.parametrize("request_type", [iam_policy_pb2.SetIamPolicyRequest, dict,])
def test_set_iam_policy(request_type, transport: str = "grpc"):
client = SecretManagerServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = policy_pb2.Policy(version=774, etag=b"etag_blob",)
response = client.set_iam_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == iam_policy_pb2.SetIamPolicyRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, policy_pb2.Policy)
assert response.version == 774
assert response.etag == b"etag_blob"
def test_set_iam_policy_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SecretManagerServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
| |
-1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 6
W -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 6
Y -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 6
A C D E F G H I K L M N P Q R S T V W Y""",
)
mat = SubsMat.SeqMat(MatrixInfo.johnson)
self.assertEqual(len(mat), 210)
self.checkMatrix(
mat,
"""\
A 6
C -3 16
D -1 -9 8
E 0 -6 2 8
F -3 -4 -7 -6 10
G 0 -8 -2 -2 -8 8
H -3 -8 0 -2 -1 -3 12
I -2 -7 -4 -4 0 -5 -5 8
K 0 -8 -1 1 -5 -3 0 -4 7
L -3 -8 -8 -5 1 -7 -4 2 -3 7
M -1 -4 -5 -2 0 -5 -2 2 -1 4 11
N -1 -7 2 0 -3 -1 1 -4 0 -4 -3 8
P -1 -8 -1 -1 -5 -2 -4 -5 0 -2 -9 -2 10
Q 0 -6 -1 2 -6 -2 1 -7 1 -4 0 0 -3 9
R -1 -5 -3 0 -6 -2 0 -5 3 -3 -4 -1 -3 2 10
S 0 -7 0 -2 -4 -1 -2 -4 -1 -5 -4 1 -1 -1 0 5
T 0 -6 -1 0 -5 -3 -3 -3 0 -4 -3 0 -2 0 -1 2 6
V 0 -4 -5 -4 -1 -5 -3 3 -3 1 0 -5 -5 -3 -4 -4 -1 7
W -5 -9 -6 -7 3 -6 -4 -3 -5 -1 0 -6 -7 -8 -3 -6 -9 -4 15
Y -4 -7 -3 -3 3 -5 0 -2 -3 -2 -1 -1 -7 -5 -2 -3 -2 -1 2 10
A C D E F G H I K L M N P Q R S T V W Y""",
)
mat = SubsMat.SeqMat(MatrixInfo.levin)
self.assertEqual(len(mat), 210)
self.checkMatrix(
mat,
"""\
A 2
C 0 2
D 0 0 2
E 1 0 1 2
F -1 -1 -1 -1 2
G 0 0 0 0 -1 2
H 0 0 0 0 -1 0 2
I 0 0 -1 -1 1 -1 -1 2
K 0 0 0 0 -1 0 0 -1 2
L 0 0 -1 -1 0 -1 -1 0 -1 2
M 0 0 -1 -1 0 -1 -1 0 -1 2 2
N 0 0 1 0 -1 0 0 -1 1 -1 -1 3
P -1 0 0 -1 -1 0 0 -1 0 -1 -1 0 3
Q 0 0 0 1 -1 0 0 -1 0 -1 -1 1 0 2
R 0 0 0 0 -1 0 0 -1 1 -1 -1 0 0 0 2
S 1 0 0 0 -1 0 0 -1 0 -1 -1 0 0 0 0 2
T 0 0 0 0 -1 0 0 0 0 0 0 0 0 0 0 0 2
V 0 0 -1 -1 0 -1 -1 1 -1 1 0 -1 -1 -1 -1 -1 0 2
W -1 -1 -1 -1 0 -1 -1 0 -1 0 0 -1 -1 -1 0 -1 -1 0 2
Y -1 -1 -1 -1 1 -1 0 0 -1 0 0 -1 -1 -1 -1 -1 -1 0 0 2
A C D E F G H I K L M N P Q R S T V W Y""",
)
mat = SubsMat.SeqMat(MatrixInfo.mclach)
self.assertEqual(len(mat), 210)
self.checkMatrix(
mat,
"""\
A 8
C 1 9
D 3 1 8
E 4 0 5 8
F 1 0 1 0 9
G 3 1 3 3 0 8
H 3 3 4 2 4 2 8
I 2 1 1 1 3 1 2 8
K 3 0 3 4 0 3 4 1 8
L 2 0 1 1 5 1 2 5 2 8
M 3 3 2 1 5 1 3 5 1 6 8
N 3 1 5 4 0 3 4 1 4 1 2 8
P 4 0 3 4 1 3 3 1 3 1 1 1 8
Q 3 0 4 5 0 2 4 0 4 3 3 4 3 8
R 2 1 1 3 1 3 5 1 5 2 1 3 3 5 8
S 4 2 3 4 2 3 3 2 3 2 2 5 3 4 4 8
T 3 2 3 4 1 2 4 3 3 3 3 3 3 3 3 5 8
V 3 1 1 2 3 2 2 5 2 5 4 1 2 2 2 2 3 8
W 1 2 0 1 6 1 3 3 1 3 1 0 0 2 3 3 2 2 9
Y 1 1 1 2 6 0 4 3 1 3 2 2 0 1 2 3 1 3 6 9
A C D E F G H I K L M N P Q R S T V W Y""",
)
mat = SubsMat.SeqMat(MatrixInfo.miyata)
self.assertEqual(len(mat), 210)
self.checkMatrix(
mat,
"""\
A 1
C 0 1
D -1 -2 1
E -1 -2 0 1
F -1 0 -3 -2 1
G 0 0 -1 -1 -2 1
H 0 -1 0 0 -1 -1 1
I -1 0 -2 -2 0 -2 -1 1
K -1 -2 0 0 -1 -2 0 -1 1
L -1 0 -2 -2 0 -2 -1 1 -1 1
M -1 0 -2 -1 0 -2 0 0 -1 0 1
N 0 -1 0 0 -2 0 0 -2 0 -2 -1 1
P 1 0 -1 -1 -1 0 0 -1 -1 -1 -1 0 1
Q 0 -1 0 0 -1 -1 0 -1 0 -1 -1 0 0 1
R -1 -1 -1 0 -1 -2 0 -1 0 -1 -1 0 -1 0 1
S 0 0 0 0 -2 0 0 -1 -1 -1 -1 0 0 0 -1 1
T 0 0 0 0 -1 0 0 0 0 -1 0 0 0 0 0 0 1
V 0 0 -2 -1 0 -1 0 0 -1 0 0 -1 0 0 -1 0 0 1
W -2 -2 -3 -2 0 -3 -1 0 -1 0 0 -3 -2 -2 -1 -3 -2 -1 1
Y -1 -1 -2 -1 0 -2 -1 0 -1 0 0 -2 -1 -1 0 -2 -1 0 0 1
A C D E F G H I K L M N P Q R S T V W Y""",
)
mat = SubsMat.SeqMat(MatrixInfo.nwsgappep)
self.assertEqual(len(mat), 253)
self.checkMatrix(
mat,
"""\
A 1
B 0 1
C 0 0 1
D 0 1 0 1
E 0 0 0 1 1
F 0 0 0 -1 0 1
G 0 0 0 0 0 0 1
H 0 0 0 0 0 0 0 1
I 0 0 0 0 0 0 0 0 1
K 0 0 0 0 0 0 0 0 0 1
L 0 0 0 0 0 1 0 0 0 0 1
M 0 0 0 0 0 0 0 0 0 0 1 1
N 0 1 0 0 0 0 0 0 0 0 0 0 1
P 0 0 0 0 0 0 0 0 0 0 0 0 0 1
Q 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1
R 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1
S 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1
T 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1
V 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 1
W 0 0 -1 -1 -1 1 -1 0 0 0 0 0 0 0 0 1 0 0 0 1
Y 0 0 1 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1
Z 0 0 0 0 1 0 0 0 0 | |
= PickPlace
option_vars = []
def place_on_table_sampler(state: State, goal: Set[GroundAtom],
rng: np.random.Generator,
objs: Sequence[Object]) -> Array:
# Always at the current location.
del goal, rng # this sampler is deterministic
assert len(objs) == 1
held_obj = objs[0]
x = state.get(held_obj, "pose") + state.get(held_obj, "grasp")
return np.array([x], dtype=np.float32)
place_on_table_nsrt = NSRT("PlaceOnTable", parameters,
preconditions, add_effects, delete_effects,
set(), option, option_vars,
place_on_table_sampler)
nsrts.add(place_on_table_nsrt)
return nsrts
def _get_cluttered_table_gt_nsrts(with_place: bool = False) -> Set[NSRT]:
"""Create ground truth NSRTs for ClutteredTableEnv."""
can_type, = _get_types_by_names("cluttered_table", ["can"])
HandEmpty, Holding, Untrashed = _get_predicates_by_names(
"cluttered_table", ["HandEmpty", "Holding", "Untrashed"])
if with_place:
Grasp, Place = _get_options_by_names("cluttered_table_place",
["Grasp", "Place"])
else:
Grasp, Dump = _get_options_by_names("cluttered_table",
["Grasp", "Dump"])
nsrts = set()
# Grasp
can = Variable("?can", can_type)
parameters = [can]
option_vars = [can]
option = Grasp
preconditions = {LiftedAtom(HandEmpty, []), LiftedAtom(Untrashed, [can])}
add_effects = {LiftedAtom(Holding, [can])}
delete_effects = {LiftedAtom(HandEmpty, [])}
def grasp_sampler(state: State, goal: Set[GroundAtom],
rng: np.random.Generator,
objs: Sequence[Object]) -> Array:
del goal # unused
assert len(objs) == 1
can = objs[0]
# Need a max here in case the can is trashed already, in which case
# both pose_x and pose_y will be -999.
end_x = max(0.0, state.get(can, "pose_x"))
end_y = max(0.0, state.get(can, "pose_y"))
if with_place:
start_x, start_y = 0.2, 0.1
else:
start_x, start_y = rng.uniform(0.0, 1.0,
size=2) # start from anywhere
return np.array([start_x, start_y, end_x, end_y], dtype=np.float32)
grasp_nsrt = NSRT("Grasp",
parameters, preconditions, add_effects, delete_effects,
set(), option, option_vars, grasp_sampler)
nsrts.add(grasp_nsrt)
if not with_place:
# Dump
can = Variable("?can", can_type)
parameters = [can]
option_vars = []
option = Dump
preconditions = {
LiftedAtom(Holding, [can]),
LiftedAtom(Untrashed, [can])
}
add_effects = {LiftedAtom(HandEmpty, [])}
delete_effects = {
LiftedAtom(Holding, [can]),
LiftedAtom(Untrashed, [can])
}
dump_nsrt = NSRT("Dump", parameters, preconditions, add_effects,
delete_effects, set(), option, option_vars,
null_sampler)
nsrts.add(dump_nsrt)
else:
# Place
can = Variable("?can", can_type)
parameters = [can]
option_vars = [can]
option = Place
preconditions = {
LiftedAtom(Holding, [can]),
LiftedAtom(Untrashed, [can])
}
add_effects = {LiftedAtom(HandEmpty, [])}
delete_effects = {LiftedAtom(Holding, [can])}
def place_sampler(state: State, goal: Set[GroundAtom],
rng: np.random.Generator,
objs: Sequence[Object]) -> Array:
start_x, start_y = 0.2, 0.1
# Goal-conditioned sampling
if CFG.cluttered_table_place_goal_conditioned_sampling:
# Get the pose of the goal object
assert len(goal) == 1
goal_atom = next(iter(goal))
assert goal_atom.predicate == Holding
goal_obj = goal_atom.objects[0]
goal_x = state.get(goal_obj, "pose_x")
goal_y = state.get(goal_obj, "pose_y")
# Place up w.r.t the goal, and to some distance left
# or right such that we're not going out of x bounds
# 0 to 0.4.
end_y = goal_y * 1.1
end_x = goal_x + 0.2
if end_x > 0.4:
end_x = goal_x - 0.2
return np.array([start_x, start_y, end_x, end_y],
dtype=np.float32)
# Non-goal-conditioned sampling
del state, goal, objs
return np.array(
[start_x, start_y,
rng.uniform(0, 0.4),
rng.uniform(0, 1.0)],
dtype=np.float32)
place_nsrt = NSRT("Place", parameters, preconditions, add_effects,
delete_effects, set(), option, option_vars,
place_sampler)
nsrts.add(place_nsrt)
return nsrts
def _get_blocks_gt_nsrts() -> Set[NSRT]:
"""Create ground truth NSRTs for BlocksEnv."""
block_type, robot_type = _get_types_by_names(CFG.env, ["block", "robot"])
On, OnTable, GripperOpen, Holding, Clear = _get_predicates_by_names(
CFG.env, ["On", "OnTable", "GripperOpen", "Holding", "Clear"])
Pick, Stack, PutOnTable = _get_options_by_names(
CFG.env, ["Pick", "Stack", "PutOnTable"])
nsrts = set()
# PickFromTable
block = Variable("?block", block_type)
robot = Variable("?robot", robot_type)
parameters = [block, robot]
option_vars = [robot, block]
option = Pick
preconditions = {
LiftedAtom(OnTable, [block]),
LiftedAtom(Clear, [block]),
LiftedAtom(GripperOpen, [robot])
}
add_effects = {LiftedAtom(Holding, [block])}
delete_effects = {
LiftedAtom(OnTable, [block]),
LiftedAtom(Clear, [block]),
LiftedAtom(GripperOpen, [robot])
}
pickfromtable_nsrt = NSRT("PickFromTable", parameters, preconditions,
add_effects, delete_effects, set(), option,
option_vars, null_sampler)
nsrts.add(pickfromtable_nsrt)
# Unstack
block = Variable("?block", block_type)
otherblock = Variable("?otherblock", block_type)
robot = Variable("?robot", robot_type)
parameters = [block, otherblock, robot]
option_vars = [robot, block]
option = Pick
preconditions = {
LiftedAtom(On, [block, otherblock]),
LiftedAtom(Clear, [block]),
LiftedAtom(GripperOpen, [robot])
}
add_effects = {
LiftedAtom(Holding, [block]),
LiftedAtom(Clear, [otherblock])
}
delete_effects = {
LiftedAtom(On, [block, otherblock]),
LiftedAtom(Clear, [block]),
LiftedAtom(GripperOpen, [robot])
}
unstack_nsrt = NSRT("Unstack",
parameters, preconditions, add_effects, delete_effects,
set(), option, option_vars, null_sampler)
nsrts.add(unstack_nsrt)
# Stack
block = Variable("?block", block_type)
otherblock = Variable("?otherblock", block_type)
robot = Variable("?robot", robot_type)
parameters = [block, otherblock, robot]
option_vars = [robot, otherblock]
option = Stack
preconditions = {
LiftedAtom(Holding, [block]),
LiftedAtom(Clear, [otherblock])
}
add_effects = {
LiftedAtom(On, [block, otherblock]),
LiftedAtom(Clear, [block]),
LiftedAtom(GripperOpen, [robot])
}
delete_effects = {
LiftedAtom(Holding, [block]),
LiftedAtom(Clear, [otherblock])
}
stack_nsrt = NSRT("Stack", parameters, preconditions, add_effects,
delete_effects, set(), option, option_vars, null_sampler)
nsrts.add(stack_nsrt)
# PutOnTable
block = Variable("?block", block_type)
robot = Variable("?robot", robot_type)
parameters = [block, robot]
option_vars = [robot]
option = PutOnTable
preconditions = {LiftedAtom(Holding, [block])}
add_effects = {
LiftedAtom(OnTable, [block]),
LiftedAtom(Clear, [block]),
LiftedAtom(GripperOpen, [robot])
}
delete_effects = {LiftedAtom(Holding, [block])}
def putontable_sampler(state: State, goal: Set[GroundAtom],
rng: np.random.Generator,
objs: Sequence[Object]) -> Array:
del state, goal, objs # unused
# Note: normalized coordinates w.r.t. workspace.
x = rng.uniform()
y = rng.uniform()
return np.array([x, y], dtype=np.float32)
putontable_nsrt = NSRT("PutOnTable", parameters, preconditions,
add_effects, delete_effects, set(), option,
option_vars, putontable_sampler)
nsrts.add(putontable_nsrt)
return nsrts
def _get_painting_gt_nsrts() -> Set[NSRT]:
"""Create ground truth NSRTs for PaintingEnv."""
obj_type, box_type, lid_type, shelf_type, robot_type = \
_get_types_by_names(CFG.env, ["obj", "box", "lid", "shelf", "robot"])
(InBox, InShelf, IsBoxColor, IsShelfColor, GripperOpen, OnTable, \
NotOnTable, HoldingTop, HoldingSide, Holding, IsWet, IsDry, IsDirty, \
IsClean) = \
_get_predicates_by_names(
CFG.env, ["InBox", "InShelf", "IsBoxColor", "IsShelfColor",
"GripperOpen", "OnTable", "NotOnTable", "HoldingTop",
"HoldingSide", "Holding", "IsWet", "IsDry", "IsDirty",
"IsClean"])
Pick, Wash, Dry, Paint, Place, OpenLid = _get_options_by_names(
CFG.env, ["Pick", "Wash", "Dry", "Paint", "Place", "OpenLid"])
if CFG.env == "repeated_nextto_painting":
(NextTo, NextToBox, NextToShelf, NextToTable) = \
_get_predicates_by_names(
CFG.env, ["NextTo", "NextToBox", "NextToShelf", "NextToTable"])
MoveToObj, MoveToBox, MoveToShelf = _get_options_by_names(
CFG.env, ["MoveToObj", "MoveToBox", "MoveToShelf"])
nsrts = set()
# PickFromTop
obj = Variable("?obj", obj_type)
robot = Variable("?robot", robot_type)
parameters = [obj, robot]
option_vars = [robot, obj]
option = Pick
preconditions = {
LiftedAtom(GripperOpen, [robot]),
LiftedAtom(OnTable, [obj])
}
if CFG.env == "repeated_nextto_painting":
preconditions.add(LiftedAtom(NextTo, [robot, obj]))
add_effects = {LiftedAtom(Holding, [obj]), LiftedAtom(HoldingTop, [obj])}
delete_effects = {LiftedAtom(GripperOpen, [robot])}
def pickfromtop_sampler(state: State, goal: Set[GroundAtom],
rng: np.random.Generator,
objs: Sequence[Object]) -> Array:
del state, goal, rng, objs # unused
return np.array([1.0], dtype=np.float32)
pickfromtop_nsrt = NSRT("PickFromTop", parameters, preconditions,
add_effects, delete_effects, set(), option,
option_vars, pickfromtop_sampler)
nsrts.add(pickfromtop_nsrt)
# PickFromSide
obj = Variable("?obj", obj_type)
robot = Variable("?robot", robot_type)
parameters = [obj, robot]
option_vars = [robot, obj]
option = Pick
preconditions = {
LiftedAtom(GripperOpen, [robot]),
LiftedAtom(OnTable, [obj])
}
if CFG.env == "repeated_nextto_painting":
preconditions.add(LiftedAtom(NextTo, [robot, obj]))
add_effects = {LiftedAtom(Holding, [obj]), LiftedAtom(HoldingSide, [obj])}
delete_effects = {LiftedAtom(GripperOpen, [robot])}
def pickfromside_sampler(state: State, goal: Set[GroundAtom],
rng: np.random.Generator,
objs: Sequence[Object]) -> Array:
del state, goal, rng, objs # unused
return np.array([0.0], dtype=np.float32)
pickfromside_nsrt = NSRT("PickFromSide", parameters, preconditions,
add_effects, delete_effects, set(), option,
option_vars, pickfromside_sampler)
nsrts.add(pickfromside_nsrt)
# Wash
obj = Variable("?obj", obj_type)
robot = Variable("?robot", robot_type)
parameters = [obj, robot]
option_vars = [robot]
option = Wash
preconditions = {
LiftedAtom(Holding, [obj]),
LiftedAtom(IsDry, [obj]),
LiftedAtom(IsDirty, [obj])
}
if CFG.env == "repeated_nextto_painting":
preconditions.add(LiftedAtom(NextTo, [robot, obj]))
add_effects = {LiftedAtom(IsWet, [obj]), LiftedAtom(IsClean, [obj])}
delete_effects = {LiftedAtom(IsDry, [obj]), LiftedAtom(IsDirty, [obj])}
wash_nsrt = NSRT("Wash", parameters, preconditions, add_effects,
delete_effects, set(), option, option_vars, null_sampler)
nsrts.add(wash_nsrt)
# Dry
obj = Variable("?obj", obj_type)
robot = Variable("?robot", robot_type)
parameters = [obj, robot]
option_vars = [robot]
option = Dry
preconditions = {
LiftedAtom(Holding, [obj]),
LiftedAtom(IsWet, [obj]),
}
if CFG.env == "repeated_nextto_painting":
preconditions.add(LiftedAtom(NextTo, [robot, obj]))
add_effects = {LiftedAtom(IsDry, [obj])}
delete_effects = {LiftedAtom(IsWet, [obj])}
dry_nsrt = NSRT("Dry", parameters, preconditions, add_effects,
delete_effects, set(), option, option_vars, null_sampler)
nsrts.add(dry_nsrt)
# PaintToBox
obj = Variable("?obj", obj_type)
box = Variable("?box", box_type)
robot = Variable("?robot", robot_type)
parameters = [obj, box, robot]
option_vars = [robot]
option = Paint
preconditions = {
LiftedAtom(Holding, [obj]),
LiftedAtom(IsDry, [obj]),
LiftedAtom(IsClean, [obj])
}
if CFG.env == "repeated_nextto_painting":
preconditions.add(LiftedAtom(NextTo, [robot, obj]))
add_effects = {LiftedAtom(IsBoxColor, [obj, box])}
delete_effects = set()
def painttobox_sampler(state: State, goal: Set[GroundAtom],
rng: np.random.Generator,
objs: Sequence[Object]) -> Array:
del goal, rng # unused
box_color = state.get(objs[1], "color")
return np.array([box_color], dtype=np.float32)
painttobox_nsrt = NSRT("PaintToBox", parameters, preconditions,
add_effects, delete_effects, set(), option,
option_vars, painttobox_sampler)
nsrts.add(painttobox_nsrt)
# PaintToShelf
obj = Variable("?obj", obj_type)
shelf = Variable("?shelf", shelf_type)
robot = Variable("?robot", robot_type)
parameters = [obj, shelf, robot]
option_vars = [robot]
option = Paint
preconditions = {
LiftedAtom(Holding, [obj]),
LiftedAtom(IsDry, [obj]),
LiftedAtom(IsClean, [obj])
}
if CFG.env == "repeated_nextto_painting":
preconditions.add(LiftedAtom(NextTo, [robot, obj]))
add_effects = {LiftedAtom(IsShelfColor, [obj, shelf])}
delete_effects = set()
def painttoshelf_sampler(state: State, goal: Set[GroundAtom],
rng: np.random.Generator,
objs: Sequence[Object]) -> Array:
del goal, rng # unused
shelf_color = | |
__project__ = 'OCRErrorCorrectpy3'
__author__ = 'jcavalie'
__email__ = "<EMAIL>"
__date__ = '6/23/14'
import os
import pickle
import gc
import string
from collections import deque
import math
from difflib import SequenceMatcher
import regex
from nltk.probability import ConditionalFreqDist
from ParallelOCRalign_Global import OCRalignment
# (r"[\~\`\!\@\$\^\&\*\)\(\_\+\=\}\{\]\[\|\"\;\:\?\/\.\,\%\<\>\\]",'')
# (r"(\d+)","#")
def regexSubstitute( string_ ):
regexTasks = [ (r"(\s+)", " ") ]
for task in regexTasks:
string_ = regex.sub( task[ 0 ], task[ 1 ], string_ )
return string_
def hasPunkt( string_ ):
punkt = r"[|]"
return True if regex.search( punkt, string_ ) is not None else False
def hasNonWordChars( string_ ):
nonWordChars = r"[@$%\^&*)(=+\]\[{}|\\><`~#/]"
return True if regex.search( nonWordChars, string_ ) is not None else False
def removePunctBigrams( ):
with open( 'PickledData/langModels/britOnly/transitionsB.pickle', 'rb' ) as file:
masterBigram = pickle.load( file )
masterBigram_noPunkt = ConditionalFreqDist( )
for condition, freqDist in masterBigram.items( ):
for sample, numOutcomes in freqDist.items( ):
sample = regexSubstitute( sample )
condition = regexSubstitute( condition )
if sample and condition:
masterBigram_noPunkt[ condition ][ sample ] += numOutcomes
print( "finished, dumping Master ..." )
with open( './PickledData/langModels/britOnly/transitionsB1.pickle', 'wb' ) as output:
pickle.dump( masterBigram_noPunkt, output, pickle.HIGHEST_PROTOCOL )
def combineCharBigrams( ):
masterBigram = ConditionalFreqDist( )
langModelDir = './PickledData/langModels/BritWikiCharBigram/'
PklBigrams = os.listdir( langModelDir )
for bigramFile in PklBigrams:
print( "working on: ", bigramFile )
with open( langModelDir + bigramFile, 'rb' ) as input_:
bigram = pickle.load( input_ )
for condition, freqDist in bigram.items( ):
for sample, numOutcomes in freqDist.items( ):
sample = regexSubstitute( sample )
condition = regexSubstitute( condition )
masterBigram[ condition ][ sample ] += numOutcomes
print( "finished with ", bigramFile )
del bigram
gc.collect( )
print( "dumping Master ..." )
with open( './PickledData/langModels/BritWikiCharBigram/fullMasterBigramBritWiki.pickle', 'wb' ) as output:
pickle.dump( masterBigram, output, pickle.HIGHEST_PROTOCOL )
print( "finished dumping Master" )
return
def prune_alignments( ):
with open( 'PickledData/parallelWordMappingsV2_3.pickle', 'rb' ) as pklAlignments2_3, \
open( 'PickledData/parallelWordMappingsV1.pickle', 'rb' ) as pklAlignments1, \
open( 'PickledData/parallelWordMappingsV4.pickle', 'rb' ) as pklAlignments4, \
open( 'PickledData/parallelWordMappingsV5_17.pickle', 'rb' ) as pklAlignments5_17, \
open( 'PickledData/parallelWordMappings_sharedSetV5_17.pickle', 'rb' ) as pklsharedSet:
alignments1 = pickle.load( pklAlignments1 )
alignments2_3 = pickle.load( pklAlignments2_3 )
alignments4 = pickle.load( pklAlignments4 )
alignments5_17 = pickle.load( pklAlignments5_17 )
shared_set = pickle.load( pklsharedSet )
alignments = deque( )
alignments.extend( alignments1 )
alignments.extend( alignments2_3 )
alignments.extend( alignments4 )
alignments.extend( alignments5_17 )
regexTasks = [ ("\n", ""), ]
regexCleanTasks = [ (r"[:;?!,]", ""), (r"[\-]", " "), ]
regexAllTasks = [ ]
LEFTCONTEXT = 0
RIGHTCONTEXT = 1
cleaned_alignments = deque( )
pruned = deque( )
ordinals = deque( )
nonWords = deque( )
wordChars = string.ascii_lowercase + string.digits + "'-"
print( "beginning pruning process" )
for alignment in alignments:
pruneItem = True
PRUNE = False
if alignment is not None and alignment not in shared_set:
intended_Word = alignment.intended_Word.strip( )
OCR_Error = alignment.OCR_Error.strip( )
TrueContext = alignment.TrueContext
OCR_Context = alignment.OCR_Context
trueLeft = TrueContext[ LEFTCONTEXT ]
trueRight = TrueContext[ RIGHTCONTEXT ]
OCRLeft = OCR_Context[ LEFTCONTEXT ]
OCRRight = OCR_Context[ RIGHTCONTEXT ]
if 'greek' in intended_Word:
PRUNE = True
if not PRUNE:
for task in regexTasks:
intended_Word = regex.sub( task[ 0 ], task[ 1 ], intended_Word )
OCR_Error = regex.sub( task[ 0 ], task[ 1 ], OCR_Error )
# if intended_Word[-1] in string.punctuation and OCR_Error[-1] in string.punctuation \
# and intended_Word[-1] == OCR_Error[-1]:
# intended_Word=intended_Word[:-1]
# OCR_Error=OCR_Error[:-1]
#
# elif intended_Word[-1] in string.punctuation and OCR_Context[RIGHTCONTEXT].split()[0][0] \
# in string.punctuation:
#
# intended_Word=intended_Word[:-1]
#
#
#
# if intended_Word[0] in string.punctuation and OCR_Error[0] in string.punctuation \
# and intended_Word[0] == OCR_Error[0]:
# intended_Word=intended_Word[1:]
# OCR_Error=OCR_Error[1:]
#
# elif intended_Word[0] in string.punctuation and OCR_Context[LEFTCONTEXT].split()[-1][-1] \
# in string.punctuation:
#
# intended_Word=intended_Word[1:]
intended_Word = intended_Word.strip( '!"$%&\'()*+,-./:;<=>?@[\\]^_`{|}~#' )
OCR_Error = OCR_Error.strip( '!"$%&\'()*+,-./:;<=>?@[\\]^_`{|}~#' )
if (TrueContext[ RIGHTCONTEXT ] and OCR_Context[ RIGHTCONTEXT ]) and \
(TrueContext[ LEFTCONTEXT ] and OCR_Context[ LEFTCONTEXT ]):
rightAdj_Trueword = TrueContext[ RIGHTCONTEXT ].split( )[ 0 ]
rightAdj_Trueword = rightAdj_Trueword.rstrip( ".,?;:" )
rightAdj_OCRword = OCR_Context[ RIGHTCONTEXT ].split( )[ 0 ]
rightAdj_OCRword = rightAdj_OCRword.rstrip( ".,?;:" )
leftAdj_Trueword = TrueContext[ LEFTCONTEXT ].split( )[ -1 ]
leftAdj_OCRword = OCR_Context[ LEFTCONTEXT ].split( )[ -1 ]
if rightAdj_OCRword != rightAdj_Trueword and \
(((OCR_Error.find( rightAdj_Trueword, -len( rightAdj_Trueword ) ) != -1) and
intended_Word.find( rightAdj_Trueword, -len( rightAdj_Trueword ) ) == -1) or
(((len( OCR_Error ) - len( intended_Word )) == len( rightAdj_Trueword )) and
SequenceMatcher( autojunk = False,
a = OCR_Error[ -len( rightAdj_Trueword ): ],
b = rightAdj_Trueword ).ratio( ) > 0.61)):
intended_Word = intended_Word + ' ' + rightAdj_Trueword
trueRight = TrueContext[ RIGHTCONTEXT ][ TrueContext[ RIGHTCONTEXT ]. \
find( rightAdj_Trueword ) + len(
rightAdj_Trueword ): ]
if TrueContext[ LEFTCONTEXT ][ -1 ] in wordChars or \
OCR_Context[ LEFTCONTEXT ][ -1 ] in wordChars:
if abs( len( leftAdj_OCRword ) - len( leftAdj_Trueword ) ) < 3:
intended_Word = leftAdj_Trueword + intended_Word
OCR_Error = leftAdj_OCRword + OCR_Error
trueLeft = TrueContext[ LEFTCONTEXT ] \
[ :TrueContext[ LEFTCONTEXT ].rfind( leftAdj_Trueword ) ]
OCRLeft = OCR_Context[ LEFTCONTEXT ] \
[ :OCR_Context[ LEFTCONTEXT ].rfind( leftAdj_OCRword ) ]
if (alignment.Type == 'SPLITDICT-R') or (alignment.Type == 'SPLITDICT-L'):
errWords = OCR_Error.split( )
if ''.join( errWords ) == intended_Word:
if alignment.Type == 'SPLITDICT-R':
splitR = errWords[ -1 ]
OCRRight = OCR_Context[ RIGHTCONTEXT ] \
[ (OCR_Context[ RIGHTCONTEXT ].find( splitR ) + len( splitR )): ]
trueRight = OCRRight
trueLeft = OCRLeft
if alignment.Type == 'SPLITDICT-L':
splitL = errWords[ 0 ]
OCRLeft = OCR_Context[ LEFTCONTEXT ][ :OCR_Context[ LEFTCONTEXT ].rfind( splitL ) ]
trueLeft = OCRLeft
trueRight = OCRRight
else:
PRUNE = True
elif alignment.Type == 'SPLITDICT':
trueLeft = OCRLeft
trueRight = OCRRight
if 'greek' in intended_Word:
PRUNE = True
if not PRUNE:
if math.floor( len( OCR_Error ) / 1.2 ) <= len( intended_Word ) <= math.ceil(
len( OCR_Error ) * 1.256 ):
if regex.search( r"(\d)", intended_Word ):
intended_Word = regexSubstitute( intended_Word )
ordinal = OCRalignment( intended_Word.strip( ), OCR_Error.strip( ), (OCRLeft, OCRRight),
(trueLeft, trueRight), alignment.ID, alignment.Type )
ordinals.append( ordinal )
pruneItem = False
elif hasNonWordChars( intended_Word ):
# intended_Word=regexSubstitute(intended_Word)
nonWord = OCRalignment( intended_Word.strip( ), OCR_Error.strip( ), (OCRLeft, OCRRight),
(trueLeft, trueRight), alignment.ID, alignment.Type )
nonWords.append( nonWord )
pruneItem = False
elif SequenceMatcher( a = intended_Word, b = OCR_Error, autojunk = False ).ratio( ) > .27:
for task in regexCleanTasks:
intended_Word = regex.sub( task[ 0 ], task[ 1 ], intended_Word )
if alignment.Type == 'SPLIT' and \
intended_Word.count( ' ' ) == OCR_Error.count( ' ' ) and \
intended_Word.count( ' ' ) > 0:
intendedWords = intended_Word.split( )
errorWords = OCR_Error.split( )
for itr in range( len( intendedWords ) ):
intendedWords[ itr ] = regexSubstitute( intendedWords[ itr ] )
errorWords[ itr ] = regexSubstitute( errorWords[ itr ] )
cleaned = OCRalignment( intendedWords[ itr ].strip( ),
errorWords[ itr ].strip( ),
(OCRLeft + (' '.join( errorWords[ :itr ] )) + (
'' if not itr else ' '),
(' ' if itr < len( errorWords ) - 1 else '') +
(' '.join( errorWords[ itr + 1: ] ) + OCRRight)),
(trueLeft + (' '.join( intendedWords[ :itr ] )) + (
'' if not itr else ' '),
(' ' if itr < len( errorWords ) - 1 else '') +
(' '.join( intendedWords[ itr + 1: ] ) + trueRight)),
alignment.ID, 'SPLIT->REG' )
cleaned_alignments.append( cleaned )
pruneItem = False
else:
intended_Word = regexSubstitute( intended_Word )
OCR_Error = regexSubstitute( OCR_Error )
cleaned = OCRalignment( intended_Word.strip( ), OCR_Error.strip( ), (OCRLeft, OCRRight),
(trueLeft, trueRight), alignment.ID, alignment.Type )
cleaned_alignments.append( cleaned )
pruneItem = False
if alignment and pruneItem:
pruned.append( alignment )
with open( "printedData/prunedAlignmentsV1_17.txt", 'a' ) as outputfile:
outputfile.write( str( alignment ) + '\n' )
print( "alignments count:", len( cleaned_alignments ) )
print( "ordinals count:", len( ordinals ) )
print( "nonWords count:", len( nonWords ) )
print( "pruned count:", len( pruned ) )
print( "printing updated alignments" )
with open( 'printedData/all_wordMappings_taggedV1_17.txt', 'w' ) as outputfile:
for item in cleaned_alignments:
outputfile.write( str( item ) + '\n' )
print( "pickling data" )
with open( 'PickledData/all_wordMappings_taggedV1_17.pickle', 'wb' ) as pklfile:
pickle.dump( cleaned_alignments, pklfile, pickle.HIGHEST_PROTOCOL )
with open( 'PickledData/pruned_wordMappingstV1_17.pickle', 'wb' ) as pklfile:
pickle.dump( pruned, pklfile, pickle.HIGHEST_PROTOCOL )
with open( 'PickledData/ordinals_wordMappingstV1_17.pickle', 'wb' ) as pklfile:
pickle.dump( ordinals, pklfile, pickle.HIGHEST_PROTOCOL )
with open( 'PickledData/nonWords_wordMappingstV1_17.pickle', 'wb' ) as pklfile:
pickle.dump( | |
compute now
# Load reference point value @ w_iter
# refpath_ptr = [prev_idx, curr_xr]
if xr_detect_activated:
ref_zn = ref_path_get_BS(
Zn_path, w_iter,
has_xr, ref_index_xr, refx_xr, refy_xr, refpath_ptr,
out_is_xr, out_xr, 0, 1
)
ref_xn_xr, ref_yn_xr = ensure_xr_BS(
ref_zn, out_xr[0], out_xr[1], out_is_xr[0]
)
else:
ref_zn = Zn_path[w_iter]
ref_xn = ref_zn.real
ref_yn = ref_zn.imag
#==================================================================
# Pertubation iter block
#------------------------------------------------------------------
# dzndc subblock
if calc_hessian:
ref_dxnda = dXnda_path[w_iter] # Note this may be Xrange
ref_dxndb = dXndb_path[w_iter]
ref_dynda = dYnda_path[w_iter]
ref_dyndb = dYndb_path[w_iter]
if xr_detect_activated:
p_iter_hessian(
Z_xr, ref_xn_xr, ref_yn_xr,
ref_dxnda, ref_dxndb, ref_dynda, ref_dyndb
)
else:
p_iter_hessian(
Z, ref_xn, ref_yn,
ref_dxnda, ref_dxndb, ref_dynda, ref_dyndb
)
#------------------------------------------------------------------
# zn subblok
if xr_detect_activated:
# Z_xr[zn] = p_iter_zn(Z_xr, ref_zn_xr, c_xr)# in place mod
p_iter_zn(Z_xr, ref_xn_xr, ref_yn_xr, a_xr, b_xr)# in place mod
# std is used for div condition
Z[xn] = fsxn.to_standard(Z_xr[xn])
Z[yn] = fsxn.to_standard(Z_xr[yn])
else:
# Z[zn] = p_iter_zn(Z, ref_zn, c)
p_iter_zn(Z, ref_xn, ref_yn, a, b)
#==================================================================
# Stopping condition: maximum iter reached
if n_iter >= max_iter:
stop[0] = reason_max_iter
break
#==================================================================
# Stopping condition: divergence
# ZZ = "Total" z + dz
w_iter += 1
# print("incr w_iter", w_iter, n_iter, ref_order)
if w_iter >= ref_order:
w_iter = w_iter % ref_order
if xr_detect_activated:
ref_zn_next = ref_path_get_BS(
Zn_path, w_iter,
has_xr, ref_index_xr, refx_xr, refy_xr, refpath_ptr,
out_is_xr, out_xr, 0, 1
)
else:
ref_zn_next = Zn_path[w_iter]
# div condition computation with std only
XX = Z[xn] + ref_zn_next.real
YY = Z[yn] + ref_zn_next.imag
full_sq_norm = XX ** 2 + YY ** 2
# Flagged as 'diverging'
bool_infty = (full_sq_norm > M_divergence_sq)
if bool_infty:
stop[0] = reason_M_divergence
break
#==================================================================
# Glitch correction - reference point diverging
if (w_iter >= ref_div_iter - 1):
# print("reference point diverging rebase")
# Rebasing - we are already big no underflow risk
Z[xn] = XX
Z[yn] = YY
if xr_detect_activated:
Z_xr[xn] = fsxn.to_Xrange_scalar(XX)
Z_xr[yn] = fsxn.to_Xrange_scalar(YY)
# not a cycle, dZndc_path[0] == 0
# assert (dXnda_path[w_iter] == 0.)
if calc_hessian:
Z_xr[dxnda] = Z_xr[dxnda] + dXnda_path[w_iter]
Z_xr[dxndb] = Z_xr[dxndb] + dXndb_path[w_iter]
Z_xr[dynda] = Z_xr[dynda] + dYnda_path[w_iter]
Z_xr[dyndb] = Z_xr[dyndb] + dYndb_path[w_iter]
else:
if calc_hessian:
# not a cycle, dZndc_path[0] == 0
Z[dxnda] += dXnda_path[w_iter]
Z[dxndb] += dXndb_path[w_iter]
Z[dynda] += dYnda_path[w_iter]
Z[dyndb] += dYndb_path[w_iter]
w_iter = 0
continue
#==================================================================
# Glitch correction - "dynamic glitch"
bool_dyn_rebase = (
(abs(XX) <= abs(Z[xn])) and (abs(YY) <= abs(Z[yn]))
)
if bool_dyn_rebase:
# print("bool_dyn_rebase")
if xr_detect_activated:
# Can we *really* rebase ??
# Note: if Z[zn] underflows we might miss a rebase
# So we cast everything to xr
X_xrn = Z_xr[xn]
Y_xrn = Z_xr[yn]
if out_is_xr[0]:
# Reference underflows, use available xr ref
XX_xr = X_xrn + out_xr[0]
YY_xr = Y_xrn + out_xr[1]
else:
XX_xr = X_xrn + ref_zn_next.real
YY_xr = Y_xrn + ref_zn_next.imag
bool_dyn_rebase_xr = (
(XX_xr * XX_xr + YY_xr * YY_xr)
<= (X_xrn * X_xrn + Y_xrn * Y_xrn)
)
if bool_dyn_rebase_xr:
Z_xr[xn] = XX_xr
Z_xr[yn] = YY_xr
# /!\ keep this, needed for next BLA step
Z[xn] = fsxn.to_standard(XX_xr)
Z[yn] = fsxn.to_standard(YY_xr)
if calc_hessian:
Z_xr[dxnda] = (
Z_xr[dxnda]
+ dXnda_path[w_iter] - dXnda_path[0]
)
Z_xr[dxndb] = (
Z_xr[dxndb]
+ dXndb_path[w_iter] - dXndb_path[0]
)
Z_xr[dynda] = (
Z_xr[dynda]
+ dYnda_path[w_iter] - dYnda_path[0]
)
Z_xr[dyndb] = (
Z_xr[dyndb]
+ dYndb_path[w_iter] - dYndb_path[0]
)
w_iter = 0
continue
else:
# No risk of underflow - safe to rebase
Z[xn] = XX
Z[yn] = YY
if calc_hessian:
# Here we need to substract the first item (as it could
# possibly be a cycle)
Z[dxnda] += dXnda_path[w_iter] - dXnda_path[0]
Z[dxndb] += dXndb_path[w_iter] - dXndb_path[0]
Z[dynda] += dYnda_path[w_iter] - dYnda_path[0]
Z[dyndb] += dYndb_path[w_iter] - dYndb_path[0]
w_iter = 0
continue
# End of iterations for this point
U[0] = w_iter
# Total zn = Zn + zn
ref_zn = Zn_path[w_iter]
if xr_detect_activated:
Z[xn] = fsxn.to_standard(Z_xr[xn] + ref_zn.real)
Z[yn] = fsxn.to_standard(Z_xr[yn] + ref_zn.imag)
if calc_hessian:
Z[dxnda] = fsxn.to_standard(
Z_xr[dxnda] + dXnda_path[w_iter])
Z[dxndb] = fsxn.to_standard(
Z_xr[dxndb] + dXndb_path[w_iter])
Z[dynda] = fsxn.to_standard(
Z_xr[dynda] + dYnda_path[w_iter])
Z[dyndb] = fsxn.to_standard(
Z_xr[dyndb] + dYndb_path[w_iter])
else:
Z[xn] += ref_zn.real
Z[yn] += ref_zn.imag
if calc_hessian:
Z[dxnda] += dXnda_path[w_iter]
Z[dxndb] += dXndb_path[w_iter]
Z[dynda] += dYnda_path[w_iter]
Z[dyndb] += dYndb_path[w_iter]
return n_iter
return numba_impl
@numba.njit
def apply_BLA_BS(M, Z, a, b, xn, yn):
Z_xn = M[0] * Z[xn] + M[1] * Z[yn] + M[4] * a + M[5] * b
Z_yn = M[2] * Z[xn] + M[3] * Z[yn] + M[6] * a + M[7] * b
Z[xn] = Z_xn
Z[yn] = Z_yn
@numba.njit
def apply_BLA_deriv_BS(M, Z, a, b, dxnda, dxndb, dynda, dyndb):
# assert dxnda >= 0
# assert dxndb < len(Z)
Z_dxnda = M[0] * Z[dxnda] + M[1] * Z[dynda]
Z_dxndb = M[0] * Z[dxndb] + M[1] * Z[dyndb]
Z_dynda = M[2] * Z[dxnda] + M[3] * Z[dynda]
Z_dyndb = M[2] * Z[dxndb] + M[3] * Z[dyndb]
Z[dxnda] = Z_dxnda
Z[dxndb] = Z_dxndb
Z[dynda] = Z_dynda
Z[dyndb] = Z_dyndb
#------------------------------------------------------------------------------
# Series approximations
@numba.njit
def numba_SA_run(
SA_loop,
Zn_path, has_xr, ref_index_xr, ref_xr, ref_div_iter, ref_order,
kc, SA_cutdeg, SA_err_sq, SA_stop
):
"""
SA_loop function with signature (P, n_iter, ref_zn_xr, kcX)
Ref_path : Ref_path object
kc = Xrange float
SA_err_sq : float
SA_stop : int or -1
SA_cutdeg : int
SA_stop : user-provided max SA iter. If -1, will default to ref_path length
ref_div_iter : point where Reference point DV
"""
# Note : SA 23064 23063 for order 23063
# ref_path[ref_order-1] ** 2 == -c OK
print("SA", ref_div_iter, ref_order)#, ref_path[ref_order-1])
if SA_stop == -1:
SA_stop = ref_div_iter
else:
SA_stop = min(ref_div_iter, SA_stop)
print_freq = max(5, int(SA_stop / 100000.))
print_freq *= 1000
# print("numba_SA_cycles - output every", print_freq)
SA_valid = True
n_real_iter = 0
n_iter = 0
P0_arr = Xr_template.repeat(1)
P0_err = Xr_float_template.repeat(1)
P = fsx.Xrange_SA(P0_arr, cutdeg=SA_cutdeg, err=P0_err) # P0
kcX_arr = Xr_template.repeat(2)
kcX_arr[1] = kc[0]
kcX_err = Xr_float_template.repeat(1)
kcX = fsx.Xrange_SA(kcX_arr, cutdeg=SA_cutdeg, err=kcX_err)
# refpath_ptr = [prev_idx, curr_xr]
refpath_ptr = np.zeros((2,), dtype=numba.int32)
out_is_xr = np.zeros((1,), dtype=numba.bool_)
out_xr = Xr_template.repeat(1)
while SA_valid:
# keep a copy in case this iter is invalidated
P_old = P.coeffs.copy()
# Load reference point value
# refpath_ptr = [prev_idx, curr_xr]
ref_zn = ref_path_get(
Zn_path, n_iter,
has_xr, ref_index_xr, ref_xr, refpath_ptr,
out_is_xr, out_xr, 0
)
# incr iter
n_real_iter +=1
n_iter += 1
# wraps to 0 when reaching cycle order
if n_iter >= ref_order:
n_iter -= ref_order
ref_zn_xr = ensure_xr(ref_zn, out_xr[0], out_is_xr[0])
P = SA_loop(P, n_iter, ref_zn_xr, kcX)
coeffs_sum = fsxn.Xrange_scalar(0., numba.int32(0))
for i in range(len(P.coeffs)):
coeffs_sum = coeffs_sum + fsxn.extended_abs2(P.coeffs[i])
err_abs2 = P.err[0] * P.err[0]
SA_valid = (
(err_abs2 <= SA_err_sq * coeffs_sum) # relative err
and (coeffs_sum <= 1.e6) # 1e6 to allow 'low zoom'
and (n_iter < SA_stop)
)
if not(SA_valid):
P_ret = fsx.Xrange_polynomial(P_old, P.cutdeg)
n_real_iter -= 1
if n_iter % print_freq == 0 and SA_valid:
ssum = np.sqrt(coeffs_sum)
print(
"SA running", n_real_iter,
"err: ", fsxn.to_Xrange_scalar(P.err[0]),
"<< ", ssum
)
return P_ret, n_real_iter, P.err
#------------------------------------------------------------------------------
# Bilinear approximation
# Note: the bilinear arrays being cheap, they will not be stored but
# re-computed if needed
@numba.njit
def numba_make_BLA(Zn_path, dfdz, kc, eps):
"""
Generates a BVA tree with
- bilinear approximation coefficients A and B
- validaty radius
z_n+2**stg = f**stg(z_n, c) with |c| < r_stg_n is approximated by
z_n+2**stg = A_stg_n * z_n + B_stg_n * c
"""
# number of needed "stages" is (ref_orbit_len).bit_length()
kc_std = fsxn.to_standard(kc[0])
ref_orbit_len = Zn_path.shape[0]
print("ref_orbit_len", ref_orbit_len)
bla_dim = 2
M_bla = np.zeros((2 * ref_orbit_len, bla_dim), dtype=numba.complex128)
r_bla = np.zeros((2 * ref_orbit_len,), dtype=numba.float64)
M_bla_new, r_bla_new, bla_len, stages = init_BLA(
M_bla, r_bla, Zn_path, dfdz, kc_std, eps
)
return M_bla_new, r_bla_new, bla_len, stages
@numba.njit
def numba_make_BLA_BS(Zn_path, dfxdx, dfxdy, dfydx, dfydy, kc, eps):
"""
Generates a BVA tree for non-holomorphic functions with
- bilinear approximation coefficients A and B
- validaty radius
z_n+2**stg = f**stg(z_n, c) with |c| < r_stg_n is approximated by
z_n+2**stg = A_stg_n * z_n + B_stg_n * c
"""
# number of needed "stages" is (ref_orbit_len).bit_length()
kc_std = fsxn.to_standard(kc[0])
ref_orbit_len = Zn_path.shape[0]
print("ref_orbit_len in BLA", ref_orbit_len)
bla_dim = 8
M_bla = np.zeros((2 * ref_orbit_len, bla_dim), dtype=numba.float64)
r_bla | |
'''
Created on Oct 10, 2020
@author: Bill
'''
import csv
import datetime
import os
import shapefile
from distro import linux_distribution
from pip._internal.vcs import git
from OWI_sqlfile import execute_statements_from_file
from OWI_sqlite import c4db
from OWI_config import OWI_version_40 as C
from OWI_config import SWUDS_version_0 as S
"""
Simple type checking, type converting, and text cleaning functions.
These are used for importing .csv files, and assume that the data in the .csv
files will conform to the anticipated data types. No warnings or error messages
will be generated.
"""
def safeint(x):
try: return int(x)
except: return None
def safefloat(x):
try: return float(x)
except: return None
def safetext(x):
try:
rv = x.strip()
if not rv:
return None
return rv
except:
return None
class safedate():
def __init__(self, default='%m/%d/%Y'):
self.fmt = default
self.fmts = ( default,
'%m-%d-%Y',
'%Y/%m/%d',
'%Y-%m-%d',
'%Y%m%d')
def date(self, x):
"""
Try to convert string x to a date using format self.fmt
If that fails, try the other formats in self.fmts. If one succeeds,
store the successful format in self.fmt for use on the next call.
"""
try:
return datetime.datetime.strptime(x,self.fmt).date()
except:
for fmt in self.fmts:
try:
d=datetime.datetime.strptime(x,fmt).date()
self.fmt = fmt
return d
except:
pass
return None
def get_col_names_and_converters(db, table_name, csv_cols):
"""
Return a list of column names, and a dict of type converter functions.
Only include columns appearing in BOTH the table def and in csv_cols.
The csv DictReader method is case sensitive to the column names as entered
in the csv file, while the sql queries are not case sensitive to the column
names. Returned column names must match the case in csv_cols.
"""
data = db.cur.execute(f'PRAGMA TABLE_INFO({table_name})').fetchall()
utbl_cols = [c[1].upper() for c in data]
ucol_types = [c[2].upper() for c in data]
ucsv_cols = [c.upper() for c in csv_cols]
col_names, dcol_func = [], {}
for N,T in zip(utbl_cols, ucol_types):
if not N in ucsv_cols:
continue
n = csv_cols[ucsv_cols.index(N)]
col_names.append(n)
if T == 'INTEGER':
dcol_func[n] = safeint
elif T == 'REAL':
dcol_func[n] = safefloat
elif T == 'TEXT':
dcol_func[n] = safetext
elif T == 'CHAR':
dcol_func[n] = safetext
elif T == 'DATE':
dcol_func[n] = safedate().date
else:
raise NotImplementedError(f'type {T} is not implemented for table {table_name} in column {n}')
return col_names, dcol_func
def csv_generator(csvname, col_names, colfunc):
"""
Yield next line from csv file as a tuple of type converted values
Arguments:
csvname : Filename of an existing csv file to be read.
col_names : Column names as entered in csv header (may be subset or reordered)
col_func : Dictionary of type conversion functions
Notes:
- The yielded values are ordered as in col_names.
- The yielded values are type converted using functions in colfunc.
- Both col_names and the keys used in col_func must match csv header
entries exactly, including case.
"""
# with open(csvname, 'r', encoding='ascii') as datafile:
with open(csvname, 'r') as datafile:
reader = csv.DictReader(datafile)
for line in reader:
yield tuple(colfunc[col](line[col]) for col in col_names)
def csv_wellid_generator(csvname, col_names, colfunc, MNUcol='RELATEID'):
"""
Yield next line from csv file as a tuple of type converted values
Arguments:
csvname : Filename of an existing csv file to be read.
col_names : Column names as entered in csv header (may be subset or reordered)
col_func : Dictionary of type conversion functions
Notes:
- The yielded values are ordered as in col_names.
- The yielded values are type converted using functions in colfunc.
- Both col_names and the keys used in col_func must match csv header
entries exactly, including case.
- Sets wellid to Null if the MNUcol cannot be converted to an integer.
"""
with open(csvname, 'r') as datafile:
reader = csv.DictReader(datafile)
for line in reader:
wellid = safeint(line[MNUcol])
yield tuple([wellid]+[colfunc[col](line[col]) for col in col_names])
def shp_locs_generator(shpname):
"""
Yield next row from a shapefile's attribute table as a tuple.
The first column in a shapefile is an internal flag that is not of interest.
It is replaced with a text value of either 'loc' or 'unloc' depending on
whether 'unloc' appears in shpname.
The order and values of all other columns are preserved
"""
if 'unloc' in shpname:
cwi_loc = 'unloc'
else:
cwi_loc = 'loc'
assert os.path.exists(shpname), f"Shape file not found {shpname}."
with shapefile.Reader(shpname) as shpf:
keys = tuple((f[0] for f in shpf.fields[1:]))
for srec in shpf:
yield tuple([cwi_loc] + [srec.record[k] for k in keys])
class cwi_csvupdate():
""" Methods for importing csv files into OWI database tables. """
def __init__(self,
cwidatacsvdir,
locsdir):
self.cwidatacsvdir = cwidatacsvdir
self.locsdir = locsdir
self.data_table_suffixes = 'ix id ad an c1 c2 pl rm st wl'.split()
self.data_table_names = [f'c4{x}' for x in self.data_table_suffixes]
self.locs_table_name = 'c4locs'
assert os.path.exists(self.cwidatacsvdir), f"Missing {self.cwidatacsvdir}"
assert os.path.exists(self.locsdir), f"Missing {self.locsdir}"
def delete_table_data(self, db,
tables=None):
"""
Delete all from the c4* data and locs tables. Preparing to import.
Arguments
---------
db : an open database instance
tables : either None, or a string including 'data' and/or 'locs'
"""
dodata = tables is None or 'data' in tables
dolocs = tables is None or 'locs' in tables
if dodata:
for t in self.data_table_names:
csvname = os.path.join(self.cwidatacsvdir, f'{t}.csv')
if not os.path.exists(csvname):
print(f'Missing {csvname}, Table {t} not refreshed')
continue
db.query(f"DELETE FROM {t};")
print ('data files emptied')
if dolocs:
for fname, val in(('wells.shp', 'loc' ),
('unloc_wells.shp', 'unloc')):
shpname = os.path.join(self.locsdir, fname)
if os.path.exists(shpname):
db.query (f"DELETE FROM c4locs where cwi_loc = '{val}';",)
print (f"DELETE FROM c4locs where cwi_loc = '{val}';")
else:
print(f'Missing {shpname}, {val} records not refreshed')
db.commit_db()
db.vacuum()
def import_data_from_csv(self, db, schema_has_constraints,
table_names=None):
"""
Create c4 tables in an sqlite db, and read in data from csv files
Notes
-----
Assumes that the csv files have already been downloaded and extracted.
fullset/cwi_CSV.zip
Assumes that data tables have been created.
Skips any table that already has at least 1 record in it.
Some details and steps will depend on the c4version selected, described
below.
schema_versions and constraints
-------------------------------
c4.0.# & c4.1.# define the tables exactly as MGS (except c4locs).
c4.2.# & up add column wellid to every table.
c4.3.# & up may put foreign key and unique constraints on the wellid
column, so the row generator must supply the wellid at the
time that a record is created.
"""
if table_names is None:
table_names = self.data_table_names
existing_tables = db.get_tablenames()
for table_name in table_names:
assert table_name in existing_tables, f'{table_name} missing from db'
print (f'OK {table_name}')
n = db.cur.execute(f"select count(*) from {table_name};").fetchone()[0]
if n>0:
print (f"skipping {table_name}, {n} records already in db.")
continue
csvname = os.path.join(self.cwidatacsvdir, f'{table_name}.csv')
assert os.path.exists(csvname), csvname
ok = self.force_to_ascii(csvname)
with open(csvname, 'r') as f:
headers = f.readline()
csv_cols = headers.replace('"',' ').replace(',',' ').split()
col_names, col_convert = get_col_names_and_converters(db, table_name, csv_cols)
if schema_has_constraints and not 'WELLID' in headers.upper():
insert = (f"INSERT INTO {table_name}\n"
f" (wellid, {', '.join(col_names)})\n"
f" VALUES ({db.qmarks( len(col_names) + 1 )});")
csvgen = csv_wellid_generator
else:
insert = (f"INSERT INTO {table_name}\n"
f" ({', '.join(col_names)})\n"
f" VALUES ({db.qmarks(col_names)});")
csvgen = csv_generator
print ('begin: ',insert)
db.cur.executemany(insert, csvgen(csvname, col_names, col_convert))
print (f"Completed table {table_name}")
def import_locs_from_csv(self, db, schema_has_constraints):
"""
If locs is supplied as a csv file rather than shapefile(s), then read it
in like the other data tables.
"""
csvname = os.path.join(self.cwidatacsvdir, 'c4locs.csv')
if os.path.exists(csvname):
self.import_data_from_csv( db, schema_has_constraints,
table_names=('c4locs',) )
print (f"c4locs table was imported from csv file: {csvname}")
return True
return False
def import_cwi_locs(self, db):
"""
Import the shapefiles into table c4locs.
There is one shapefile for located wells (wells.shp) and another for
unlocated wells (unloc_wells.shp). Both are read into a single table,
c4locs. Their origin is distinguished by the value in a newly added
column 'cwi_loc' with values of either 'loc' or 'unloc'.
All columns defined in the shapefiles should have matching named columns
in c4locs.
The shapefiles have many columns that either reproduce data in c4ix
or other tables, or summarize | |
<reponame>Del9fina/robel
# Copyright 2019 The ROBEL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base environment API for robotics tasks."""
import abc
import collections
from typing import Any, Dict, Optional, Sequence, Union, Tuple
import gym
from gym import spaces
from gym.utils import seeding
import numpy as np
from robel.components.builder import ComponentBuilder
from robel.simulation.sim_scene import SimScene, SimBackend
from robel.simulation.renderer import RenderMode
DEFAULT_RENDER_SIZE = 480
# The simulation backend to use by default.
DEFAULT_SIM_BACKEND = SimBackend.MUJOCO_PY
def make_box_space(low: Union[float, Sequence[float]],
high: Union[float, Sequence[float]],
shape: Optional[Tuple[int]] = None) -> gym.spaces.Box:
"""Returns a Box gym space."""
# HACK: Fallback for gym 0.9.x
# TODO(michaelahn): Consider whether we still need to support 0.9.x
try:
return spaces.Box(low, high, shape, dtype=np.float32)
except TypeError:
return spaces.Box(low, high, shape)
class RobotEnv(gym.Env, metaclass=abc.ABCMeta):
"""Base Gym environment for robotics tasks."""
def __init__(self,
sim_model: Any,
observation_keys: Optional[Sequence[str]] = None,
reward_keys: Optional[Sequence[str]] = None,
use_dict_obs: bool = False,
frame_skip: int = 1,
camera_settings: Optional[Dict] = None,
sim_backend: SimBackend = DEFAULT_SIM_BACKEND,
sticky_action_probability: float = 0.):
"""Initializes a robotics environment.
Args:
sim_model: The path to the simulation to load.
observation_keys: The keys of `get_obs_dict` to extract and flatten
for the default implementation of `_get_obs`. If this is not
set, `get_obs_dict` must return an OrderedDict.
reward_keys: The keys of `get_reward_dict` to extract and sum for
the default implementation of `_get_total_reward`. If this is
not set, `_get_total_reward` will sum all of the values.
use_dict_obs: If True, the observations will be returned as
dictionaries rather than as a flattened array. The observation
space of this environment will be a dictionary space.
frame_skip: The number of simulation steps per environment step.
This multiplied by the timestep defined in the model file is the
step duration.
camera_settings: Settings to apply to the free camera in simulation.
sim_backend: The simulation backend to use.
sticky_action_probability: Repeat previous action with this
probability. Default is 0 (no sticky actions).
"""
self._observation_keys = observation_keys
self._reward_keys = reward_keys
self._use_dict_obs = use_dict_obs
self._sticky_action_probability = sticky_action_probability
self._components = []
# The following spaces are initialized by their respective `initialize`
# methods, e.g. `_initialize_observation_space`.
self._observation_space = None
self._action_space = None
self._state_space = None
# The following are populated by step() and/or reset().
self.last_action = None
self.last_obs_dict = None
self.last_reward_dict = None
self.last_score_dict = None
self.is_done = False
self.step_count = 0
# Load the simulation.
self.sim_scene = SimScene.create(
sim_model, backend=sim_backend, frame_skip=frame_skip)
self.sim = self.sim_scene.sim
self.model = self.sim_scene.model
self.data = self.sim_scene.data
if camera_settings:
self.sim_scene.renderer.set_free_camera_settings(**camera_settings)
# Set common metadata for Gym environments.
self.metadata = {
'render.modes': ['human', 'rgb_array', 'depth_array'],
'video.frames_per_second': int(
np.round(1.0 / self.sim_scene.step_duration))
}
# Ensure gym does not try to patch `_step` and `_reset`.
self._gym_disable_underscore_compat = True
self.seed()
#===========================================================================
# Environment API.
# These methods should not be overridden by subclasses.
#===========================================================================
@property
def observation_space(self) -> gym.Space:
"""Returns the observation space of the environment.
The observation space is the return specification for `reset`,
`_get_obs`, and the first element of the returned tuple from `step`.
Subclasses should override `_initialize_observation_space` to customize
the observation space.
"""
# Initialize and cache the observation space on the first call.
if self._observation_space is None:
self._observation_space = self._initialize_observation_space()
assert self._observation_space is not None
return self._observation_space
@property
def action_space(self) -> gym.Space:
"""Returns the action space of the environment.
The action space is the argument specifiction for `step`.
Subclasses should override `_initialize_action_space` to customize the
action space.
"""
# Initialize and cache the action space on the first call.
if self._action_space is None:
self._action_space = self._initialize_action_space()
assert self._action_space is not None
return self._action_space
@property
def state_space(self) -> gym.Space:
"""Returns the state space of the environment.
The state space is the return specification for `get_state` and is the
argument specification for `set_state`.
Subclasses should override `_initialize_state_space` to customize the
state space.
"""
# Initialize and cache the state space on the first call.
if self._state_space is None:
self._state_space = self._initialize_state_space()
assert self._state_space is not None
return self._state_space
@property
def dt(self) -> float:
"""Returns the step duration of each step, in seconds."""
return self.sim_scene.step_duration
@property
def obs_dim(self) -> int:
"""Returns the size of the observation space.
NOTE: This is for compatibility with gym.MujocoEnv.
"""
if not isinstance(self.observation_space, spaces.Box):
raise NotImplementedError('`obs_dim` only supports Box spaces.')
return np.prod(self.observation_space.shape).item()
@property
def action_dim(self) -> int:
"""Returns the size of the action space."""
if not isinstance(self.action_space, spaces.Box):
raise NotImplementedError('`action_dim` only supports Box spaces.')
return np.prod(self.action_space.shape).item()
def seed(self, seed: Optional[int] = None) -> Sequence[int]:
"""Seeds the environment.
Args:
seed: The value to seed the random number generator with. If None,
uses a random seed.
"""
self.np_random, seed = seeding.np_random(seed)
return [seed]
def reset(self) -> Any:
"""Resets the environment.
Args:
state: The state to reset to. This must match with the state space
of the environment.
Returns:
The initial observation of the environment after resetting.
"""
self.last_action = None
self.sim.reset()
self.sim.forward()
self._reset()
obs_dict = self.get_obs_dict()
self.last_obs_dict = obs_dict
self.last_reward_dict = None
self.last_score_dict = None
self.is_done = False
self.step_count = 0
return self._get_obs(obs_dict)
def step(self, action: Any) -> Tuple[Any, float, bool, Dict]:
"""Runs one timestep of the environment with the given action.
Subclasses must override 4 subcomponents of step:
- `_step`: Applies an action to the robot
- `get_obs_dict`: Returns the current observation of the robot.
- `get_reward_dict`: Calculates the reward for the step.
- `get_done`: Returns whether the episode should terminate.
Args:
action: An action to control the environment.
Returns:
observation: The observation of the environment after the timestep.
reward: The amount of reward obtained during the timestep.
done: Whether the episode has ended. `env.reset()` should be called
if this is True.
info: Auxiliary information about the timestep.
"""
# Perform the step.
action = self._preprocess_action(action)
self._step(action)
self.last_action = action
# Get the observation after the step.
obs_dict = self.get_obs_dict()
self.last_obs_dict = obs_dict
flattened_obs = self._get_obs(obs_dict)
# Get the rewards for the observation.
batched_action = np.expand_dims(np.atleast_1d(action), axis=0)
batched_obs_dict = {
k: np.expand_dims(np.atleast_1d(v), axis=0)
for k, v in obs_dict.items()
}
batched_reward_dict = self.get_reward_dict(batched_action,
batched_obs_dict)
# Calculate the total reward.
reward_dict = {k: v.item() for k, v in batched_reward_dict.items()}
self.last_reward_dict = reward_dict
reward = self._get_total_reward(reward_dict)
# Calculate the score.
batched_score_dict = self.get_score_dict(batched_obs_dict,
batched_reward_dict)
score_dict = {k: v.item() for k, v in batched_score_dict.items()}
self.last_score_dict = score_dict
# Get whether the episode should end.
dones = self.get_done(batched_obs_dict, batched_reward_dict)
done = dones.item()
self.is_done = done
# Combine the dictionaries as the auxiliary information.
info = collections.OrderedDict()
info.update(('obs/' + key, val) for key, val in obs_dict.items())
info.update(('reward/' + key, val) for key, val in reward_dict.items())
info['reward/total'] = reward
info.update(('score/' + key, val) for key, val in score_dict.items())
self.step_count += 1
return flattened_obs, reward, done, info
def render(
self,
mode: str = 'human',
width: int = DEFAULT_RENDER_SIZE,
height: int = DEFAULT_RENDER_SIZE,
camera_id: int = -1,
) -> Optional[np.ndarray]:
"""Renders the environment.
Args:
mode: The type of rendering to use.
- 'human': Renders to a graphical window.
- 'rgb_array': Returns the RGB image as an np.ndarray.
- 'depth_array': Returns the depth image as an np.ndarray.
width: The width of the rendered image. This only affects offscreen
rendering.
height: The height of the rendered image. This only affects
offscreen rendering.
camera_id: The ID of the camera to use. By default, this is the free
camera. If specified, only affects offscreen rendering.
Returns:
If mode is `rgb_array` or `depth_array`, a Numpy array of the
rendered pixels. Otherwise, returns None.
"""
if mode == 'human':
self.sim_scene.renderer.render_to_window()
elif mode == 'rgb_array':
return self.sim_scene.renderer.render_offscreen(
width, height, mode=RenderMode.RGB, camera_id=camera_id)
elif mode == 'depth_array':
return self.sim_scene.renderer.render_offscreen(
width, height, mode=RenderMode.DEPTH, camera_id=camera_id)
else:
raise NotImplementedError(mode)
return None
def close(self):
"""Cleans up any resources used by the environment."""
self._close()
for component | |
' * alignment
tags = get_tag(addr)
prefix = line_prefix(addr)
else:
alignment = 0
for i in range(length):
if ((i + alignment) % bpl) == 0:
flush()
prefix = line_prefix(addr + i)
line = chars = ""
tags = get_tag(addr + i)
if ((i + alignment) & 1) == 0:
line += ' '
val = source.get_byte(addr + i)
ch = "."
if isinstance(val, str):
line += val
else:
line += "%02x" % val
# Until we know how the terminal will display characters > 0x7e,
# don't print them (see bug 1177)
if 0x20 <= val < 0x7f:
ch = chr(val)
chars += ch
flush()
source.finish()
class physmem_source:
def __init__(self, obj):
self.obj = obj
self.unhandled = self.outside = self.tag_unavailable = 0
def addr_prefix(self):
return "p:"
def get_byte(self, addr):
try:
[byte] = self.obj.memory[addr]
except SimExc_InquiryUnhandled, msg:
self.unhandled = 1
return "??"
except SimExc_Memory, msg:
self.outside = 1
return "**"
return byte
def have_tag(self, addr):
try:
return VT_read_phys_memory_tags_mask(self.obj, addr, 1)
except SimExc_InquiryUnhandled, msg:
return 0
except SimExc_Memory, msg:
return 0
def get_tag(self, addr):
try:
if VT_read_phys_memory_tags_mask(self.obj, addr, 1):
return "%d" % SIM_read_phys_memory_tags(self.obj, addr, 1)
else:
self.tag_unavailable = 1
return '/'
except SimExc_InquiryUnhandled, msg:
return '?'
except SimExc_Memory, msg:
return '*'
def finish(self):
if self.outside:
pr("addresses marked \"**\" are outside physical memory\n")
if self.unhandled:
pr("addresses marked \"??\" do not support inquiry\n")
if self.tag_unavailable:
pr("addresses marked \"/\" do not have tags\n")
class virtmem_source(physmem_source):
def __init__(self, obj, cpu, kind):
self.obj = obj
self.cpu = cpu
self.kind = kind
self.unhandled = self.outside = self.no_translation = 0
self.tag_unavailable = 0
def addr_prefix(self):
if self.kind == "":
return get_address_prefix(self.cpu) + ":"
else:
return self.kind + ":"
def get_byte(self, addr):
try:
paddr = translate_to_physical(self.cpu, (self.kind, addr))
except SimExc_Memory, msg:
self.no_translation = 1
return "--"
return physmem_source.get_byte(self, paddr)
def have_tag(self, addr):
try:
paddr = translate_to_physical(self.cpu, (self.kind, addr))
except SimExc_Memory, msg:
self.no_translation = 1
return 0
return physmem_source.have_tag(self, paddr)
def get_tag(self, addr):
try:
paddr = translate_to_physical(self.cpu, (self.kind, addr))
except SimExc_Memory, msg:
return "-"
return physmem_source.get_tag(self, paddr)
def finish(self):
physmem_source.finish(self)
if self.no_translation:
pr("addresses marked \"--\" have no translation\n")
new_command("x", x_cmd,
[arg(obj_t('processor', 'processor'), "cpu-name", "?"),
arg(addr_t, "address"),
arg(int_t, "size", "?", 16)],
type = ["Memory", "Inspecting Simulated State"],
short = "examine raw memory contents",
namespace_copy = ("processor", cpu_x_cmd),
repeat = x_cmd_repeat,
see_also = ["disassemble", "get", "set"],
doc = """
Display the contents of a memory space starting at <arg>address</arg>. Either
the memory space is explicitly specified as in
<cmd><memory-space>.x</cmd> or the CPU connected to the memory space can be
specified; e.g., <cmd><processor>.x</cmd>. By itself, <cmd>x</cmd> operates
on the memory connected to the current frontend processor.
If the memory is accessed via a CPU, the type of <arg>address</arg> is
specified by a prefix. For physical addresses use
<cmd>p:<var>address</var></cmd>; for virtual addresses,
<cmd>v:<var>address</var></cmd> on non-x86 targets. On x86, use
<cmd><var>segment-register</var>:<var>offset</var></cmd> or
<cmd>l:<var>address</var></cmd> for x86 linear addresses.
If no prefix is given it will be interpreted as a virtual address. On x86 the
default is <cmd>ds:<var>address</var></cmd> (data segment addressing).
The <arg>size</arg> argument specifies the number of bytes to examine. When
examining virtual memory, only addresses which can be found in the TLB or
hardware page tables (if any) are shown. Unmapped addresses are shown
as "<tt>--</tt>", undefined physical addresses as "<tt>**</tt>".""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="2812")
new_command("x", memory_space_x_cmd,
[arg(int_t, "address"), arg(int_t, "size", "?", 16)],
short = "examine raw memory contents",
namespace = "memory-space",
repeat = memory_space_x_cmd_repeat,
doc_with = "x", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="2843")
def sum_mem(obj, cpu, type, addr, length, w8, w16, w32):
sum8 = 0
sum16 = 0
sum32 = 0
buff = []
if cpu and type in ["", "v"]:
type = get_address_prefix(cpu)
line = ""
last_byte = [0, 0, 0, 0]
for i in range(length):
outside = unhandled = 0
try:
if type == "" or type == "p":
phys_addr = addr + i;
else:
phys_addr = translate_to_physical(cpu, (type, addr + i))
[byte] = obj.memory[[phys_addr,phys_addr]]
except SimExc_InquiryUnhandled, msg:
print "Inquiry not handled"
return
except SimExc_Memory, msg:
print "Got exception reading memory"
return
if i != 0 and (i % 4) == 0:
sum32 = sum32 + ((last_byte[0] << 0) | (last_byte[1] << 8) | (last_byte[2] << 16) | (last_byte[3] << 24))
sum32 = sum32 & 0xFFFFFFFF
if i != 0 and (i % 2) == 0:
sum16 = sum16 + ((last_byte[(i - 2) % 4] << 0) | (last_byte[(i - 1) % 4] << 8))
sum16 = sum16 & 0xFFFF
last_byte[i % 4] = byte
sum8 = sum8 + byte
sum8 = sum8 & 0xFF
if w8 + w16 + w32 == 0:
w8 = 1
if w8:
print "Checksum 8-bit: %02x" % sum8
if w16:
if length % 2:
print "Range not a multiple of 2 bytes, %d bytes skipped" % (length % 2)
else:
last16 = (last_byte[(length - 2) % 4] << 0) | (last_byte[(length - 1) % 4] << 8)
sum16 = (sum16 + last16) & 0xFFFF
print "Last 16-bit: %04x" % last16
print "Checksum 16-bit: %04x" % sum16
if w32:
if length % 4:
print "Range not a multiple of 4 bytes, %d bytes skipped" % (length % 4)
else:
last32 = (last_byte[0] << 0) | (last_byte[1] << 8) | (last_byte[2] << 16) | (last_byte[3] << 24)
sum32 = (sum32 + last32) & 0x0FFFFFFFF
print "Checksum 32-bit: %08x" % sum32
def sum_cmd(cpu, addr_spec, length, w8, w16, w32):
if not cpu:
(cpu, _) = get_cpu()
try:
sum_mem(cpu.physical_memory, cpu, addr_spec[0], addr_spec[1], length, w8, w16, w32)
except Exception, msg:
print msg
def obj_sum_cmd(obj, address, size, w8, w16, w32):
sum_cmd(obj.name, address, size, w8, w16, w32)
new_command("sum", sum_cmd,
[arg(obj_t('processor', 'processor'), "cpu-name", "?"),
arg(addr_t, "address"),
arg(int_t, "size"),
arg(flag_t, "-w8"),
arg(flag_t, "-w16"),
arg(flag_t, "-w32")],
type = "inspect/change",
short = "sum a memory range",
deprecated = "python",
namespace_copy = ("processor", obj_sum_cmd),
doc = """
Sum a memory range. The width of the running sum is specified with the
<arg>-w8</arg> (default), <arg>-w16</arg>, or <arg>-w32</arg> flag, standing
for 8, 16, and 32 bits respectively.
""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="2925")
#
# -------------------- disassemble --------------------
#
_last_disassemble_addr = 0
def disassemble_cmd_sub(cpu, addr, t, count):
global _last_disassemble_addr
for i in range(count):
instr_len = local_print_disassemble_line(cpu, addr, t, 0)
if instr_len == 0:
break
addr = addr + instr_len
# save the last address if repeat
_last_disassemble_addr = addr
def disassemble_cmd_rep(cpu, address, count):
global _last_disassemble_addr
if not cpu:
(cpu, _) = get_cpu()
if address[0] == "p":
t = 0
elif address[0] in ["v", "cs", ""]:
t = 1
else:
raise CliError, "Illegal address prefix '%s'." % address[0]
t = iff(address[0] == "p", 0, 1)
addr = _last_disassemble_addr
disassemble_cmd_sub(cpu, addr, t, count)
def disassemble_cmd(cpu, address, count):
if not cpu:
(cpu, _) = get_cpu()
if address[0] == "p":
t = 0
elif address[0] in ["v", "cs", ""]:
t = 1
else:
raise CliError, "Illegal address prefix '%s'." % address[0]
if count <= 0:
raise CliError, "Illegal instruction count."
t = iff(address[0] == "p", 0, 1)
if address[1] == -1:
if address[0] == "p":
addr = SIM_logical_to_physical(cpu, Sim_DI_Instruction, SIM_get_program_counter(cpu))
else:
addr = SIM_get_program_counter(cpu)
else:
addr = address[1]
disassemble_cmd_sub(cpu, addr, t, count)
new_command("disassemble", disassemble_cmd,
[arg(obj_t('processor', 'processor'), "cpu-name", "?"),
arg(addr_t, "address", "?", ("v",-1)),
arg(int_t, "count", "?", 1)],
alias = "da",
repeat = disassemble_cmd_rep,
type = ["Memory", "Inspecting Simulated State", "Execution"],
short = "disassemble instructions",
namespace_copy = ("processor", disassemble_cmd),
see_also = ["x", "disassemble-settings", "<processor>.aprof-views"],
doc = """
Disassembles <arg>count</arg> instructions starting at
<arg>address</arg> for processor <arg>cpu-name</arg>. If the processor
is not given the current frontend processor will be used. The method
variant can also be used to select a processor; e.g.,
<cmd>cpu0.disassemble</cmd>.
On some architectures, <arg>address</arg> must be word aligned. A
physical address is given by prefixing the address with <cmd>p:</cmd>
(e.g., <cmd>p:0xf000</cmd>). With no prefix, a virtual address will be
assumed. If the address is omitted the current program counter will be
used. <arg>count</arg> defaults to 1 instruction.
Global disassembly settings, such as whether to print the raw opcodes,
can be set by the <cmd>disassemble-settings</cmd> command.
This command will also include various profiling statistics for the
address of each instruction, one column for each profiler view listed
in the processor attribute <tt>aprof-views</tt>. For descriptions of
the columns, use the <cmd><processor>.aprof-views</cmd> command.""", filename="/mp/simics-3.0/src/core/common/commands.py", linenumber="3002")
#
# -------------------- set-pc --------------------
#
def cpu_set_pc_cmd(cpu_obj, address):
try:
SIM_set_program_counter(cpu_obj, address)
except Exception, msg:
print "Failed setting | |
include extra space flag is set,
# the command line is not empty or invalid
# and the last element in the command line is a
# space character
if include_extra_space and command_line and command_line[-1] == " ":
# adds an empty element to the line split
# representing the extra space
line_split.append("")
# returns the line split
return line_split
def write(self, text, new_line = True):
"""
Writes the given text to the standard output,
may use a newline or not.
:type text: String
:param text: The text to be written to the standard output.
:type new_line: bool
:param new_line: If the text should be suffixed with a newline.
"""
# writes the text contents
sys.stdout.write(text)
# in case a newline should be appended
# writes it
new_line and sys.stdout.write("\n")
# flushes the standard output value
sys.stdout.flush()
def _validate_command(self, command, arguments, output_method):
# retrieves the command information
command_information = self.commands_map.get(command, None)
# in case no command information is found
# (command not found)
if not command_information:
# print the invalid command message
output_method(INVALID_COMMAND_MESSAGE)
# returns none (invalid)
return (
None,
None
)
# retrieves the command arguments
command_arguments = command_information.get("arguments", [])
# retrieves the command mandatory arguments from the
# the command information
command_mandatory_arguments = self.__get_command_mandatory_arguments(command_arguments)
# retrieves the command mandatory arguments length
command_mandatory_arguments_length = len(command_mandatory_arguments)
# retrieves the arguments length
arguments_length = len(arguments)
# in case the arguments length is smaller than the
# command mandatory arguments length
if arguments_length < command_mandatory_arguments_length:
# retrieves the missing arguments count, by subtracting the arguments
# length from the command mandatory arguments length
missing_arguments_count = command_mandatory_arguments_length - arguments_length
# retrieves the missing arguments list
missing_arguments = command_mandatory_arguments[missing_arguments_count * -1:]
# creates the missing argument names list
missing_argument_names = [value.get("name", "undefined") for value in missing_arguments]
# joins the missing argument names to create the missing
# argument names line
missing_argument_names_line = ", ".join(missing_argument_names)
# print the missing mandatory arguments message
output_method(MISSING_MANDATORY_ARGUMENTS_MESSAGE + ": " + missing_argument_names_line)
# returns none (invalid)
return (
None,
None
)
# retrieves the command handler
command_handler = command_information.get("handler", None)
# in case no command handler is defined
if not command_handler:
# print the internal configuration problem message
output_method(INTERNAL_CONFIGURATION_PROBLEM_MESSAGE)
# returns none (invalid)
return (
None,
None
)
# retrieves the received arguments list
received_arguments = command_arguments[:arguments_length]
# retrieves the received argument names
received_argument_names = [value.get("name", "undefined") for value in received_arguments]
# zip the received argument names and the arguments list
received_arguments_tuple = zip(received_argument_names, arguments)
# creates the arguments map from the received
# arguments tuple
arguments_map = dict(received_arguments_tuple)
# creates the command tuple from the command handler
# and the arguments map
command_tuple = (
command_handler,
arguments_map
)
# returns the command tuple
return command_tuple
def _get_command_alternatives(self, command):
# creates the alternatives list
alternatives_list = []
# iterates over all the commands in the
# commands map
for _command in self.commands_map:
# retrieves the command information
command_information = self.commands_map[_command]
# retrieves the command arguments
command_arguments = command_information.get("arguments", [])
# recreates the command value base on either the command
# contains arguments or not
_command = command_arguments and _command + " " or _command
# in case the command starts with the
# value in the command
_command.startswith(command) and alternatives_list.append(_command)
# returns the alternatives list
return alternatives_list
def _get_argument_alternatives(self, command, arguments, console_context):
# creates the alternatives list
alternatives_list = []
# retrieves the command information for the command
command_information = self.commands_map.get(command, None)
# in case the command information is not defined
if not command_information:
# returns immediately an empty
# alternatives list (no alternatives)
return alternatives_list
# retrieves the arguments index
arguments_index = len(arguments) - 1
# retrieves the "target" argument
target_argument = arguments[arguments_index]
# retrieves the command arguments
command_arguments = command_information.get("arguments", [])
# retrieves the command arguments length
command_arguments_length = len(command_arguments)
# in case the command arguments list does not
# contain argument complete for the required argument
if not command_arguments_length > arguments_index:
# returns immediately an empty
# alternatives list (no alternatives)
return alternatives_list
# retrieves the command "target" argument
command_argument = command_arguments[arguments_index]
# retrieves the command argument values
command_argument_values = command_argument.get("values", None)
# retrieves the command argument values type
command_argument_values_type = type(command_argument_values)
# creates the list to hold the alternatives
# base values list
alternatives_base_list = []
# in case the command argument values is a sequence
if command_argument_values_type in SEQUENCE_TYPES:
# sets the alternatives base list as the command
# argument values
alternatives_base_list = command_argument_values
# in the command argument value is a method
elif command_argument_values_type == types.MethodType:
# sets the alternatives base list as the return
# of the command argument values call
alternatives_base_list = command_argument_values(target_argument, console_context)
# iterates over all the commands in the
# commands map
for alternative_base in alternatives_base_list:
# recreates the alternative base value based on either
# the command contains any more arguments or not
alternative_base = command_arguments_length > arguments_index + 1 and alternative_base + " " or alternative_base
# in case the alternative base starts with the
# value in the target argument
alternative_base.startswith(target_argument) and alternatives_list.append(alternative_base)
# returns the alternatives list
return alternatives_list
def _get_best_match(self, alternatives_list):
# in case the alternatives list is not set
if not alternatives_list:
# returns empty string (invalid)
return ""
# retrieves the first alternative
first_alternative = alternatives_list[0]
# retrieves the first alternative length
first_alternative_length = len(first_alternative)
# creates the best match list
best_match_list = []
# iterates over the range of the first
# alternative length
for index in colony.legacy.xrange(first_alternative_length):
# retrieves the base character from the first
# alternative (for the current index)
base_character = first_alternative[index]
# sets the valid flag
valid = True
# iterates over all the alternatives in the
# alternatives list
for alternative in alternatives_list:
# retrieves the alternative length
alternative_length = len(alternative)
# retrieves the (current) alternative
# character (in case the alternative length is valid)
alternative_character = alternative_length > index and alternative[index] or None
# in case the base character and the alternative
# character are not the same
if not base_character == alternative_character:
# unsets the valid flag
valid = False
# breaks the loop
break
# in case the valid flag
# is not set
if not valid:
# breaks the (outer) loop
break
# adds the base character to the best
# match list
best_match_list.append(base_character)
# joins the best match list to retrieve
# the best match
best_match = "".join(best_match_list)
# returns the best match
return best_match
def __get_command_mandatory_arguments(self, command_arguments):
# creates the command mandatory arguments (list)
command_mandatory_arguments = []
# iterates over the command arguments
for command_argument in command_arguments:
# retrieves the command argument mandatory
command_argument_mandatory = command_argument.get("mandatory", False)
# in case the command argument adds it
# to the command mandatory arguments
command_argument_mandatory and command_mandatory_arguments.append(command_argument)
# returns the command mandatory arguments
return command_mandatory_arguments
class ConsoleContext(colony.Protected):
"""
The console context class.
This class defines the context for the current
console execution.
It also contains some useful functions for extra
information retrieval.
"""
console = None
""" The console reference """
path = None
""" The current console path """
user = None
""" The current console user """
authentication_information = None
""" The current authentication information/result """
_get_line = None
""" The reference to the console's method to retrieve a line """
_get_size = None
""" The reference to the console's method to retrieve the console's size """
def __init__(self, console):
"""
Constructor of the class.
:type console: Console
:param console: The console reference.
"""
self.console = console
# sets the current path in the context
self.path = os.getcwd()
@colony.public
def authenticate_user(self, username, password):
try:
# tries to authenticate the user retrieving the result, this call
# should returns the authentication map, then uses this map to
# retrieve the boolean result from it (represents the validation result)
authentication_result = self.console.authenticate_user(username, password, | |
<reponame>joshua-gould/anndata
from __future__ import annotations
from os import PathLike
from collections.abc import Mapping
from functools import partial
from typing import Union
from types import MappingProxyType
from warnings import warn
import h5py
import numpy as np
import pandas as pd
from scipy import sparse
import anndata as ad
from anndata import AnnData, Raw
from anndata._core.index import _normalize_indices
from anndata._core.merge import intersect_keys
from anndata._core.sparse_dataset import SparseDataset
from anndata._core import views
from anndata.compat import (
Literal,
OverloadedDict,
ZarrArray,
ZarrGroup,
_read_attr,
_from_fixed_length_strings,
_decode_structured_array,
)
from anndata._io.utils import report_write_key_on_error, check_key, H5PY_V3
from anndata._warnings import OldFormatWarning
from .registry import (
_REGISTRY,
IOSpec,
get_spec,
read_elem,
read_elem_partial,
write_elem,
)
H5Array = h5py.Dataset
H5Group = h5py.Group
####################
# Dispatch methods #
####################
# def is_full_slice(idx):
# if isinstance(idx, tuple)len(idx) == 1:
# if isinstance(idx, type(None)):
# return True
# elif idx is Ellipsis:
# return True
# elif isinstance(idx, tuple):
# for el in idx:
# if isinstance(el, type(None)):
# pass
# elif isinstance(el, slice):
# if el != slice(None):
# return False
# else:
# return False
# return True
# return False
################################
# Fallbacks / backwards compat #
################################
# Note: there is no need for writing in a backwards compatible format, maybe
@_REGISTRY.register_read(H5Group, IOSpec("", ""))
@_REGISTRY.register_read(H5Array, IOSpec("", ""))
def read_basic(elem):
from anndata._io import h5ad
warn(
f"Element '{elem.name}' was written without encoding metadata.",
OldFormatWarning,
stacklevel=3,
)
if isinstance(elem, Mapping):
# Backwards compat sparse arrays
if "h5sparse_format" in elem.attrs:
return SparseDataset(elem).to_memory()
return {k: read_elem(v) for k, v in elem.items()}
elif isinstance(elem, h5py.Dataset):
return h5ad.read_dataset(elem) # TODO: Handle legacy
@_REGISTRY.register_read(ZarrGroup, IOSpec("", ""))
@_REGISTRY.register_read(ZarrArray, IOSpec("", ""))
def read_basic_zarr(elem):
from anndata._io import zarr
warn(
f"Element '{elem.name}' was written without encoding metadata.",
OldFormatWarning,
stacklevel=3,
)
if isinstance(elem, Mapping):
# Backwards compat sparse arrays
if "h5sparse_format" in elem.attrs:
return SparseDataset(elem).to_memory()
return {k: read_elem(v) for k, v in elem.items()}
elif isinstance(elem, ZarrArray):
return zarr.read_dataset(elem) # TODO: Handle legacy
# @_REGISTRY.register_read_partial(IOSpec("", ""))
# def read_basic_partial(elem, *, items=None, indices=(slice(None), slice(None))):
# if isinstance(elem, Mapping):
# return _read_partial(elem, items=items, indices=indices)
# elif indices != (slice(None), slice(None)):
# return elem[indices]
# else:
# return elem[()]
###########
# AnnData #
###########
def read_indices(group):
obs_group = group["obs"]
obs_idx_elem = obs_group[_read_attr(obs_group.attrs, "_index")]
obs_idx = read_elem(obs_idx_elem)
var_group = group["var"]
var_idx_elem = var_group[_read_attr(var_group.attrs, "_index")]
var_idx = read_elem(var_idx_elem)
return obs_idx, var_idx
def read_partial(
pth: PathLike,
*,
obs_idx=slice(None),
var_idx=slice(None),
X=True,
obs=None,
var=None,
obsm=None,
varm=None,
obsp=None,
varp=None,
layers=None,
uns=None,
) -> ad.AnnData:
result = {}
with h5py.File(pth, "r") as f:
obs_idx, var_idx = _normalize_indices((obs_idx, var_idx), *read_indices(f))
result["obs"] = read_elem_partial(
f["obs"], items=obs, indices=(obs_idx, slice(None))
)
result["var"] = read_elem_partial(
f["var"], items=var, indices=(var_idx, slice(None))
)
if X:
result["X"] = read_elem_partial(f["X"], indices=(obs_idx, var_idx))
else:
result["X"] = sparse.csr_matrix((len(result["obs"]), len(result["var"])))
if "obsm" in f:
result["obsm"] = _read_partial(
f["obsm"], items=obsm, indices=(obs_idx, slice(None))
)
if "varm" in f:
result["varm"] = _read_partial(
f["varm"], items=varm, indices=(var_idx, slice(None))
)
if "obsp" in f:
result["obsp"] = _read_partial(
f["obsp"], items=obsp, indices=(obs_idx, obs_idx)
)
if "varp" in f:
result["varp"] = _read_partial(
f["varp"], items=varp, indices=(var_idx, var_idx)
)
if "layers" in f:
result["layers"] = _read_partial(
f["layers"], items=layers, indices=(obs_idx, var_idx)
)
if "uns" in f:
result["uns"] = _read_partial(f["uns"], items=uns)
return ad.AnnData(**result)
def _read_partial(group, *, items=None, indices=(slice(None), slice(None))):
if group is None:
return None
if items is None:
keys = intersect_keys((group,))
else:
keys = intersect_keys((group, items))
result = {}
for k in keys:
if isinstance(items, Mapping):
next_items = items.get(k, None)
else:
next_items = None
result[k] = read_elem_partial(group[k], items=next_items, indices=indices)
return result
@_REGISTRY.register_write(ZarrGroup, AnnData, IOSpec("anndata", "0.1.0"))
@_REGISTRY.register_write(H5Group, AnnData, IOSpec("anndata", "0.1.0"))
def write_anndata(f, k, adata, dataset_kwargs=MappingProxyType({})):
g = f.require_group(k)
write_elem(g, "X", adata.X, dataset_kwargs=dataset_kwargs)
write_elem(g, "obs", adata.obs, dataset_kwargs=dataset_kwargs)
write_elem(g, "var", adata.var, dataset_kwargs=dataset_kwargs)
write_elem(g, "obsm", dict(adata.obsm), dataset_kwargs=dataset_kwargs)
write_elem(g, "varm", dict(adata.varm), dataset_kwargs=dataset_kwargs)
write_elem(g, "obsp", dict(adata.obsp), dataset_kwargs=dataset_kwargs)
write_elem(g, "varp", dict(adata.varp), dataset_kwargs=dataset_kwargs)
write_elem(g, "layers", dict(adata.layers), dataset_kwargs=dataset_kwargs)
write_elem(g, "uns", dict(adata.uns), dataset_kwargs=dataset_kwargs)
write_elem(g, "raw", adata.raw, dataset_kwargs=dataset_kwargs)
@_REGISTRY.register_read(H5Group, IOSpec("anndata", "0.1.0"))
@_REGISTRY.register_read(H5Group, IOSpec("raw", "0.1.0"))
@_REGISTRY.register_read(ZarrGroup, IOSpec("anndata", "0.1.0"))
@_REGISTRY.register_read(ZarrGroup, IOSpec("raw", "0.1.0"))
def read_anndata(elem):
d = {}
for k in [
"X",
"obs",
"var",
"obsm",
"varm",
"obsp",
"varp",
"layers",
"uns",
"raw",
]:
if k in elem:
d[k] = read_elem(elem[k])
if "X" in d:
d["dtype"] = d["X"].dtype
return AnnData(**d)
@_REGISTRY.register_write(H5Group, Raw, IOSpec("raw", "0.1.0"))
@_REGISTRY.register_write(ZarrGroup, Raw, IOSpec("raw", "0.1.0"))
def write_raw(f, k, raw, dataset_kwargs=MappingProxyType({})):
g = f.create_group(k)
write_elem(g, "X", raw.X, dataset_kwargs=dataset_kwargs)
write_elem(g, "var", raw.var, dataset_kwargs=dataset_kwargs)
write_elem(g, "varm", dict(raw.varm), dataset_kwargs=dataset_kwargs)
############
# Mappings #
############
@_REGISTRY.register_read(H5Group, IOSpec("dict", "0.1.0"))
@_REGISTRY.register_read(ZarrGroup, IOSpec("dict", "0.1.0"))
def read_mapping(elem):
return {k: read_elem(v) for k, v in elem.items()}
@_REGISTRY.register_write(H5Group, OverloadedDict, IOSpec("dict", "0.1.0"))
@_REGISTRY.register_write(H5Group, dict, IOSpec("dict", "0.1.0"))
@_REGISTRY.register_write(ZarrGroup, OverloadedDict, IOSpec("dict", "0.1.0"))
@_REGISTRY.register_write(ZarrGroup, dict, IOSpec("dict", "0.1.0"))
def write_mapping(f, k, v, dataset_kwargs=MappingProxyType({})):
g = f.create_group(k)
for sub_k, sub_v in v.items():
write_elem(g, sub_k, sub_v, dataset_kwargs=dataset_kwargs)
##############
# np.ndarray #
##############
@_REGISTRY.register_write(H5Group, list, IOSpec("array", "0.2.0"))
@_REGISTRY.register_write(ZarrGroup, list, IOSpec("array", "0.2.0"))
def write_list(f, k, elem, dataset_kwargs=MappingProxyType({})):
write_elem(f, k, np.array(elem), dataset_kwargs=dataset_kwargs)
# TODO: Is this the right behaviour for MaskedArrays?
# It's in the `AnnData.concatenate` docstring, but should we keep it?
@_REGISTRY.register_write(H5Group, views.ArrayView, IOSpec("array", "0.2.0"))
@_REGISTRY.register_write(H5Group, np.ndarray, IOSpec("array", "0.2.0"))
@_REGISTRY.register_write(H5Group, h5py.Dataset, IOSpec("array", "0.2.0"))
@_REGISTRY.register_write(H5Group, np.ma.MaskedArray, IOSpec("array", "0.2.0"))
@_REGISTRY.register_write(ZarrGroup, views.ArrayView, IOSpec("array", "0.2.0"))
@_REGISTRY.register_write(ZarrGroup, np.ndarray, IOSpec("array", "0.2.0"))
@_REGISTRY.register_write(ZarrGroup, h5py.Dataset, IOSpec("array", "0.2.0"))
@_REGISTRY.register_write(ZarrGroup, np.ma.MaskedArray, IOSpec("array", "0.2.0"))
def write_basic(f, k, elem, dataset_kwargs=MappingProxyType({})):
"""Write methods which underlying library handles nativley."""
f.create_dataset(k, data=elem, **dataset_kwargs)
@_REGISTRY.register_read(H5Array, IOSpec("array", "0.2.0"))
@_REGISTRY.register_read(ZarrArray, IOSpec("array", "0.2.0"))
@_REGISTRY.register_read(ZarrArray, IOSpec("string-array", "0.2.0"))
def read_array(elem):
return elem[()]
@_REGISTRY.register_read_partial(H5Array, IOSpec("array", "0.2.0"))
@_REGISTRY.register_read_partial(ZarrArray, IOSpec("array", "0.2.0"))
@_REGISTRY.register_read_partial(ZarrArray, IOSpec("string-array", "0.2.0"))
def read_array_partial(elem, *, items=None, indices=(slice(None, None))):
return elem[indices]
# arrays of strings
@_REGISTRY.register_read(H5Array, IOSpec("string-array", "0.2.0"))
def read_string_array(d):
return read_array(d.asstr())
@_REGISTRY.register_read_partial(H5Array, IOSpec("string-array", "0.2.0"))
def read_array_partial(d, items=None, indices=slice(None)):
return read_array_partial(d.asstr(), items=items, indices=indices)
@_REGISTRY.register_write(
H5Group, (views.ArrayView, "U"), IOSpec("string-array", "0.2.0")
)
@_REGISTRY.register_write(
H5Group, (views.ArrayView, "O"), IOSpec("string-array", "0.2.0")
)
@_REGISTRY.register_write(H5Group, (np.ndarray, "U"), IOSpec("string-array", "0.2.0"))
@_REGISTRY.register_write(H5Group, (np.ndarray, "O"), IOSpec("string-array", "0.2.0"))
def write_vlen_string_array(f, k, elem, dataset_kwargs=MappingProxyType({})):
"""Write methods which underlying library handles nativley."""
str_dtype = h5py.special_dtype(vlen=str)
f.create_dataset(k, data=elem.astype(str_dtype), dtype=str_dtype, **dataset_kwargs)
@_REGISTRY.register_write(
ZarrGroup, (views.ArrayView, "U"), IOSpec("string-array", "0.2.0")
)
@_REGISTRY.register_write(
ZarrGroup, (views.ArrayView, "O"), IOSpec("string-array", "0.2.0")
)
@_REGISTRY.register_write(ZarrGroup, (np.ndarray, "U"), IOSpec("string-array", "0.2.0"))
@_REGISTRY.register_write(ZarrGroup, (np.ndarray, "O"), IOSpec("string-array", "0.2.0"))
def write_vlen_string_array_zarr(f, k, elem, dataset_kwargs=MappingProxyType({})):
import numcodecs
f.create_dataset(
k,
shape=elem.shape,
dtype=object,
object_codec=numcodecs.VLenUTF8(),
**dataset_kwargs,
)
f[k][:] = elem
###############
# np.recarray #
###############
def _to_hdf5_vlen_strings(value: np.ndarray) -> np.ndarray:
"""This corrects compound dtypes to work with hdf5 files."""
new_dtype = []
for dt_name, (dt_type, _) in value.dtype.fields.items():
if dt_type.kind in ("U", "O"):
new_dtype.append((dt_name, h5py.special_dtype(vlen=str)))
else:
new_dtype.append((dt_name, dt_type))
return value.astype(new_dtype)
@_REGISTRY.register_read(H5Array, IOSpec("rec-array", "0.2.0"))
@_REGISTRY.register_read(ZarrArray, IOSpec("rec-array", "0.2.0"))
def read_recarray(d):
value = d[()]
dtype = value.dtype
value = _from_fixed_length_strings(value)
if H5PY_V3:
value = _decode_structured_array(value, dtype=dtype)
return value
@_REGISTRY.register_write(H5Group, (np.ndarray, "V"), IOSpec("rec-array", "0.2.0"))
@_REGISTRY.register_write(H5Group, np.recarray, IOSpec("rec-array", "0.2.0"))
def write_recarray(f, k, elem, dataset_kwargs=MappingProxyType({})):
f.create_dataset(k, data=_to_hdf5_vlen_strings(elem), **dataset_kwargs)
@_REGISTRY.register_write(ZarrGroup, (np.ndarray, "V"), IOSpec("rec-array", "0.2.0"))
@_REGISTRY.register_write(ZarrGroup, np.recarray, IOSpec("rec-array", "0.2.0"))
def write_recarray_zarr(f, k, elem, dataset_kwargs=MappingProxyType({})):
from anndata.compat import _to_fixed_length_strings
f.create_dataset(k, data=_to_fixed_length_strings(elem), **dataset_kwargs)
#################
# Sparse arrays #
#################
def write_sparse_compressed(
f, key, value, fmt: Literal["csr", "csc"], dataset_kwargs=MappingProxyType({})
):
g = f.create_group(key)
g.attrs["shape"] = value.shape
# Allow resizing
if "maxshape" not in dataset_kwargs:
dataset_kwargs = dict(maxshape=(None,), **dataset_kwargs)
g.create_dataset("data", data=value.data, **dataset_kwargs)
g.create_dataset("indices", data=value.indices, **dataset_kwargs)
g.create_dataset("indptr", data=value.indptr, **dataset_kwargs)
write_csr = partial(write_sparse_compressed, fmt="csr")
write_csc = partial(write_sparse_compressed, fmt="csc")
_REGISTRY.register_write(H5Group, sparse.csr_matrix, IOSpec("csr_matrix", "0.1.0"))(
write_csr
)
_REGISTRY.register_write(H5Group, views.SparseCSRView, IOSpec("csr_matrix", "0.1.0"))(
write_csr
)
_REGISTRY.register_write(H5Group, sparse.csc_matrix, IOSpec("csc_matrix", "0.1.0"))(
write_csc
)
_REGISTRY.register_write(H5Group, views.SparseCSCView, IOSpec("csc_matrix", "0.1.0"))(
write_csc
)
_REGISTRY.register_write(ZarrGroup, sparse.csr_matrix, IOSpec("csr_matrix", "0.1.0"))(
write_csr
)
_REGISTRY.register_write(ZarrGroup, views.SparseCSRView, IOSpec("csr_matrix", "0.1.0"))(
write_csr
)
_REGISTRY.register_write(ZarrGroup, sparse.csc_matrix, IOSpec("csc_matrix", "0.1.0"))(
write_csc
)
_REGISTRY.register_write(ZarrGroup, views.SparseCSCView, IOSpec("csc_matrix", "0.1.0"))(
write_csc
)
@_REGISTRY.register_write(H5Group, SparseDataset, IOSpec("", "0.1.0"))
@_REGISTRY.register_write(ZarrGroup, SparseDataset, IOSpec("", "0.1.0"))
def write_sparse_dataset(f, k, elem, dataset_kwargs=MappingProxyType({})):
write_sparse_compressed(
f, k, elem.to_backed(), fmt=elem.format_str, dataset_kwargs=dataset_kwargs
)
# TODO: Cleaner way to do this
f[k].attrs["encoding-type"] = f"{elem.format_str}_matrix"
f[k].attrs["encoding-version"] = "0.1.0"
@_REGISTRY.register_read(H5Group, IOSpec("csc_matrix", "0.1.0"))
@_REGISTRY.register_read(H5Group, IOSpec("csr_matrix", "0.1.0"))
@_REGISTRY.register_read(ZarrGroup, IOSpec("csc_matrix", "0.1.0"))
@_REGISTRY.register_read(ZarrGroup, IOSpec("csr_matrix", "0.1.0"))
def read_sparse(elem):
return SparseDataset(elem).to_memory()
@_REGISTRY.register_read_partial(H5Group, IOSpec("csc_matrix", "0.1.0"))
@_REGISTRY.register_read_partial(H5Group, IOSpec("csr_matrix", "0.1.0"))
def read_sparse_partial(elem, *, items=None, indices=(slice(None), slice(None))):
return SparseDataset(elem)[indices]
##############
# DataFrames #
##############
@_REGISTRY.register_write(H5Group, views.DataFrameView, IOSpec("dataframe", "0.2.0"))
@_REGISTRY.register_write(H5Group, pd.DataFrame, IOSpec("dataframe", "0.2.0"))
@_REGISTRY.register_write(ZarrGroup, views.DataFrameView, IOSpec("dataframe", "0.2.0"))
@_REGISTRY.register_write(ZarrGroup, pd.DataFrame, IOSpec("dataframe", "0.2.0"))
def write_dataframe(f, key, df, dataset_kwargs=MappingProxyType({})):
# Check arguments
for reserved in ("_index",):
if reserved in df.columns:
raise ValueError(f"{reserved!r} is a reserved name for dataframe columns.")
group = f.create_group(key)
col_names = [check_key(c) for c in df.columns]
group.attrs["column-order"] = col_names
if df.index.name is not None:
index_name = df.index.name
else:
index_name = "_index"
group.attrs["_index"] = check_key(index_name)
# ._values is "the best" array representation. It's the true array backing the
# object, where `.values` is always a np.ndarray and .array is always a pandas
# array.
write_elem(group, index_name, df.index._values, dataset_kwargs=dataset_kwargs)
for colname, series in df.items():
# TODO: this should write the "true" representation of the series (i.e. the underlying array or ndarray depending)
write_elem(group, colname, series._values, dataset_kwargs=dataset_kwargs)
@_REGISTRY.register_read(H5Group, IOSpec("dataframe", "0.2.0"))
@_REGISTRY.register_read(ZarrGroup, IOSpec("dataframe", "0.2.0"))
def read_dataframe(elem):
columns = list(_read_attr(elem.attrs, "column-order"))
idx_key = _read_attr(elem.attrs, "_index")
df = pd.DataFrame(
{k: read_elem(elem[k]) for k in columns},
index=read_elem(elem[idx_key]),
columns=list(columns),
)
if idx_key != "_index":
df.index.name = idx_key
return df
# TODO: Figure out what indices is allowed to be at each element
@_REGISTRY.register_read_partial(H5Group, IOSpec("dataframe", "0.2.0"))
@_REGISTRY.register_read_partial(ZarrGroup, IOSpec("dataframe", "0.2.0"))
def read_dataframe_partial(
elem, *, items=None, indices=(slice(None, None), slice(None, None))
):
if items is not None:
columns = [
col for col in _read_attr(elem.attrs, "column-order") if col in items
]
else:
columns = list(_read_attr(elem.attrs, "column-order"))
idx_key = _read_attr(elem.attrs, "_index")
df = pd.DataFrame(
{k: read_elem_partial(elem[k], indices=indices[0]) for k in columns},
index=read_elem_partial(elem[idx_key], indices=indices[0]),
columns=list(columns),
)
if idx_key != "_index":
df.index.name = idx_key
return df
# Backwards compat dataframe reading
@_REGISTRY.register_read(H5Group, | |
"""Generator of dynamically typed draft stubs for arbitrary modules.
Basic usage:
$ mkdir out
$ stubgen urllib.parse
=> Generate out/urllib/parse.pyi.
For Python 2 mode, use --py2:
$ stubgen --py2 textwrap
For C modules, you can get more precise function signatures by parsing .rst (Sphinx)
documentation for extra information. For this, use the --docpath option:
$ scripts/stubgen --docpath <DIR>/Python-3.4.2/Doc/library curses
=> Generate out/curses.py.
Use "stubgen -h" for more help.
Note: You should verify the generated stubs manually.
TODO:
- support stubs for C modules in Python 2 mode
- support non-default Python interpreters in Python 3 mode
- if using --no-import, look for __all__ in the AST
- infer some return types, such as no return statement with value -> None
- detect 'if PY2 / is_py2' etc. and either preserve those or only include Python 2 or 3 case
- maybe export more imported names if there is no __all__ (this affects ssl.SSLError, for example)
- a quick and dirty heuristic would be to turn this on if a module has something like
'from x import y as _y'
- we don't seem to always detect properties ('closed' in 'io', for example)
"""
import glob
import imp
import importlib
import json
import os.path
import pkgutil
import subprocess
import sys
import textwrap
from typing import (
Any, List, Dict, Tuple, Iterable, Iterator, Optional, NamedTuple, Set, Union, cast
)
import mypy.build
import mypy.parse
import mypy.errors
import mypy.traverser
from mypy import defaults
from mypy.nodes import (
Expression, IntExpr, UnaryExpr, StrExpr, BytesExpr, NameExpr, FloatExpr, MemberExpr, TupleExpr,
ListExpr, ComparisonExpr, CallExpr, ClassDef, MypyFile, Decorator, AssignmentStmt,
IfStmt, ImportAll, ImportFrom, Import, FuncDef, FuncBase,
ARG_STAR, ARG_STAR2, ARG_NAMED, ARG_NAMED_OPT,
)
from mypy.stubgenc import parse_all_signatures, find_unique_signatures, generate_stub_for_c_module
from mypy.stubutil import is_c_module, write_header
from mypy.options import Options as MypyOptions
Options = NamedTuple('Options', [('pyversion', Tuple[int, int]),
('no_import', bool),
('doc_dir', str),
('search_path', List[str]),
('interpreter', str),
('modules', List[str]),
('ignore_errors', bool),
('recursive', bool),
('fast_parser', bool),
])
def generate_stub_for_module(module: str, output_dir: str, quiet: bool = False,
add_header: bool = False, sigs: Dict[str, str] = {},
class_sigs: Dict[str, str] = {},
pyversion: Tuple[int, int] = defaults.PYTHON3_VERSION,
fast_parser: bool = False,
no_import: bool = False,
search_path: List[str] = [],
interpreter: str = sys.executable) -> None:
target = module.replace('.', '/')
result = find_module_path_and_all(module=module,
pyversion=pyversion,
no_import=no_import,
search_path=search_path,
interpreter=interpreter)
if not result:
# C module
target = os.path.join(output_dir, target + '.pyi')
generate_stub_for_c_module(module_name=module,
target=target,
add_header=add_header,
sigs=sigs,
class_sigs=class_sigs)
else:
# Python module
module_path, module_all = result
if os.path.basename(module_path) == '__init__.py':
target += '/__init__.pyi'
else:
target += '.pyi'
target = os.path.join(output_dir, target)
generate_stub(module_path, output_dir, module_all,
target=target, add_header=add_header, module=module,
pyversion=pyversion, fast_parser=fast_parser)
if not quiet:
print('Created %s' % target)
def find_module_path_and_all(module: str, pyversion: Tuple[int, int],
no_import: bool,
search_path: List[str],
interpreter: str) -> Optional[Tuple[str,
Optional[List[str]]]]:
"""Find module and determine __all__.
Return None if the module is a C module. Return (module_path, __all__) if
Python module. Raise an exception or exit if failed.
"""
if not no_import:
if pyversion[0] == 2:
module_path, module_all = load_python_module_info(module, interpreter)
else:
# TODO: Support custom interpreters.
mod = importlib.import_module(module)
imp.reload(mod)
if is_c_module(mod):
return None
module_path = mod.__file__
module_all = getattr(mod, '__all__', None)
else:
# Find module by going through search path.
module_path = mypy.build.find_module(module, ['.'] + search_path)
if not module_path:
raise SystemExit(
"Can't find module '{}' (consider using --search-path)".format(module))
module_all = None
return module_path, module_all
def load_python_module_info(module: str, interpreter: str) -> Tuple[str, Optional[List[str]]]:
"""Return tuple (module path, module __all__) for a Python 2 module.
The path refers to the .py/.py[co] file. The second tuple item is
None if the module doesn't define __all__.
Exit if the module can't be imported or if it's a C extension module.
"""
cmd_template = '{interpreter} -c "%s"'.format(interpreter=interpreter)
code = ("import importlib, json; mod = importlib.import_module('%s'); "
"print(mod.__file__); print(json.dumps(getattr(mod, '__all__', None)))") % module
try:
output_bytes = subprocess.check_output(cmd_template % code, shell=True)
except subprocess.CalledProcessError:
print("Can't import module %s" % module, file=sys.stderr)
sys.exit(1)
output = output_bytes.decode('ascii').strip().splitlines()
module_path = output[0]
if not module_path.endswith(('.py', '.pyc', '.pyo')):
raise SystemExit('%s looks like a C module; they are not supported for Python 2' %
module)
if module_path.endswith(('.pyc', '.pyo')):
module_path = module_path[:-1]
module_all = json.loads(output[1])
return module_path, module_all
def generate_stub(path: str, output_dir: str, _all_: Optional[List[str]] = None,
target: str = None, add_header: bool = False, module: str = None,
pyversion: Tuple[int, int] = defaults.PYTHON3_VERSION,
fast_parser: bool = False) -> None:
with open(path, 'rb') as f:
source = f.read()
options = MypyOptions()
options.python_version = pyversion
options.fast_parser = fast_parser
try:
ast = mypy.parse.parse(source, fnam=path, errors=None, options=options)
except mypy.errors.CompileError as e:
# Syntax error!
for m in e.messages:
sys.stderr.write('%s\n' % m)
sys.exit(1)
gen = StubGenerator(_all_, pyversion=pyversion)
ast.accept(gen)
if not target:
target = os.path.join(output_dir, os.path.basename(path))
subdir = os.path.dirname(target)
if subdir and not os.path.isdir(subdir):
os.makedirs(subdir)
with open(target, 'w') as file:
if add_header:
write_header(file, module, pyversion=pyversion)
file.write(''.join(gen.output()))
# What was generated previously in the stub file. We keep track of these to generate
# nicely formatted output (add empty line between non-empty classes, for example).
EMPTY = 'EMPTY'
FUNC = 'FUNC'
CLASS = 'CLASS'
EMPTY_CLASS = 'EMPTY_CLASS'
VAR = 'VAR'
NOT_IN_ALL = 'NOT_IN_ALL'
class StubGenerator(mypy.traverser.TraverserVisitor):
def __init__(self, _all_: Optional[List[str]], pyversion: Tuple[int, int]) -> None:
self._all_ = _all_
self._output = [] # type: List[str]
self._import_lines = [] # type: List[str]
self._imports = [] # type: List[str]
self._indent = ''
self._vars = [[]] # type: List[List[str]]
self._state = EMPTY
self._toplevel_names = [] # type: List[str]
self._classes = set() # type: Set[str]
self._base_classes = [] # type: List[str]
self._pyversion = pyversion
def visit_mypy_file(self, o: MypyFile) -> None:
self._classes = find_classes(o)
for node in o.defs:
if isinstance(node, ClassDef):
self._base_classes.extend(self.get_base_types(node))
super().visit_mypy_file(o)
undefined_names = [name for name in self._all_ or []
if name not in self._toplevel_names]
if undefined_names:
if self._state != EMPTY:
self.add('\n')
self.add('# Names in __all__ with no definition:\n')
for name in sorted(undefined_names):
self.add('# %s\n' % name)
def visit_func_def(self, o: FuncDef) -> None:
if self.is_private_name(o.name()):
return
if self.is_not_in_all(o.name()):
return
if self.is_recorded_name(o.name()):
return
if not self._indent and self._state not in (EMPTY, FUNC):
self.add('\n')
if not self.is_top_level():
self_inits = find_self_initializers(o)
for init, value in self_inits:
init_code = self.get_init(init, value)
if init_code:
self.add(init_code)
self.add("%sdef %s(" % (self._indent, o.name()))
self.record_name(o.name())
args = [] # type: List[str]
for i, arg_ in enumerate(o.arguments):
var = arg_.variable
kind = arg_.kind
name = var.name()
init_stmt = arg_.initialization_statement
if init_stmt:
if kind in (ARG_NAMED, ARG_NAMED_OPT) and '*' not in args:
args.append('*')
typename = self.get_str_type_of_node(init_stmt.rvalue, True)
arg = '{}: {} = ...'.format(name, typename)
elif kind == ARG_STAR:
arg = '*%s' % name
elif kind == ARG_STAR2:
arg = '**%s' % name
else:
arg = name
args.append(arg)
retname = None
if o.name() == '__init__':
retname = 'None'
retfield = ''
if retname is not None:
retfield = ' -> ' + retname
self.add(', '.join(args))
self.add("){}: ...\n".format(retfield))
self._state = FUNC
def visit_decorator(self, o: Decorator) -> None:
if self.is_private_name(o.func.name()):
return
for decorator in o.decorators:
if isinstance(decorator, NameExpr) and decorator.name in ('property',
'staticmethod',
'classmethod'):
self.add('%s@%s\n' % (self._indent, decorator.name))
elif (isinstance(decorator, MemberExpr) and decorator.name == 'setter' and
isinstance(decorator.expr, NameExpr)):
self.add('%s@%s.setter\n' % (self._indent, decorator.expr.name))
super().visit_decorator(o)
def visit_class_def(self, o: ClassDef) -> None:
if not self._indent and self._state != EMPTY:
sep = len(self._output)
self.add('\n')
else:
sep = None
self.add('%sclass %s' % (self._indent, o.name))
self.record_name(o.name)
base_types = self.get_base_types(o)
if base_types:
self.add('(%s)' % ', '.join(base_types))
self.add(':\n')
n = len(self._output)
self._indent += ' '
self._vars.append([])
super().visit_class_def(o)
self._indent = self._indent[:-4]
self._vars.pop()
if len(self._output) == n:
if self._state == EMPTY_CLASS and sep is not None:
self._output[sep] = ''
self._output[-1] = self._output[-1][:-1] + ' ...\n'
self._state = EMPTY_CLASS
else:
self._state = CLASS
def get_base_types(self, cdef: ClassDef) -> List[str]:
base_types = [] # type: List[str]
for base in cdef.base_type_exprs:
if isinstance(base, NameExpr):
if base.name != 'object':
base_types.append(base.name)
elif isinstance(base, MemberExpr):
modname = get_qualified_name(base.expr)
base_types.append('%s.%s' % (modname, base.name))
self.add_import_line('import %s\n' % modname)
return base_types
def visit_assignment_stmt(self, o: AssignmentStmt) -> None:
foundl = []
for lvalue in o.lvalues:
if isinstance(lvalue, NameExpr) and self.is_namedtuple(o.rvalue):
assert isinstance(o.rvalue, CallExpr)
self.process_namedtuple(lvalue, o.rvalue)
continue
if isinstance(lvalue, TupleExpr):
items = lvalue.items
elif isinstance(lvalue, ListExpr):
items = lvalue.items
else:
items = [lvalue]
sep = False
found = False
for item in items:
if isinstance(item, NameExpr):
init = self.get_init(item.name, o.rvalue)
if init:
found = True
if not sep and not self._indent and \
self._state not in (EMPTY, VAR):
init = '\n' + init
sep = True
| |
<reponame>migleankstutyte/kaavapino<gh_stars>1-10
import copy
import datetime
import numpy as np
from typing import List, NamedTuple, Type
from actstream import action
from django.contrib.auth import get_user_model
from django.core.exceptions import ObjectDoesNotExist
from django.core.serializers.json import DjangoJSONEncoder, json
from django.db import transaction
from django.db.models import Prefetch, Q
from django.utils.translation import ugettext_lazy as _
from rest_framework import serializers
from rest_framework.exceptions import ValidationError, NotFound, ParseError
from rest_framework.serializers import Serializer
from rest_framework_gis.fields import GeometryField
from projects.actions import verbs
from projects.models import (
Project,
ProjectSubtype,
ProjectPhase,
ProjectPhaseLog,
ProjectPhaseSection,
ProjectPhaseDeadlineSection,
ProjectFloorAreaSection,
ProjectAttributeFile,
ProjectDeadline,
Attribute,
ProjectPhaseSectionAttribute,
ProjectComment,
Deadline,
DeadlineDateCalculation,
ProjectAttributeFileFieldsetPathLocation,
)
from projects.models.project import ProjectAttributeMultipolygonGeometry
from projects.permissions.media_file_permissions import (
has_project_attribute_file_permissions,
)
from projects.serializers.utils import _set_fieldset_path
from projects.serializers.fields import AttributeDataField
from projects.serializers.section import create_section_serializer
from projects.serializers.deadline import DeadlineSerializer
from sitecontent.models import ListViewAttributeColumn
from users.models import User
from users.serializers import UserSerializer
class SectionData(NamedTuple):
section: ProjectPhaseSection
serializer_class: Type[Serializer]
class ProjectDeadlineSerializer(serializers.Serializer):
past_due = serializers.SerializerMethodField()
out_of_sync = serializers.SerializerMethodField()
is_under_min_distance_previous = serializers.SerializerMethodField()
is_under_min_distance_next = serializers.SerializerMethodField()
date = serializers.DateField()
abbreviation = serializers.CharField(source="deadline.abbreviation")
deadline = serializers.SerializerMethodField()
generated = serializers.BooleanField()
def get_deadline(self, projectdeadline):
return DeadlineSerializer(
projectdeadline.deadline
).data
def _resolve_distance_conditions(self, distance, project):
if distance.conditions.count() == 0:
return True
for attribute in distance.conditions.all():
if project.attribute_data.get(attribute.identifier):
return True
return False
def get_is_under_min_distance_next(self, projectdeadline):
if not projectdeadline.date:
return False
next_deadlines = projectdeadline.deadline.distances_to_next.all()
for next_distance in next_deadlines:
# Ignore if distance conditions are not met
if not self._resolve_distance_conditions(
next_distance,
projectdeadline.project,
):
continue
# Ignore if next deadline does not exist for project
try:
next_date = projectdeadline.project.deadlines.get(
deadline=next_distance.deadline
).date
except ProjectDeadline.DoesNotExist:
continue
# Ignore if next date is not set
if not next_date:
continue
if next_distance.date_type:
distance_to_next = next_distance.date_type.valid_days_to(
projectdeadline.date, next_date
)
else:
distance_to_next = (next_date - projectdeadline.date).days
if distance_to_next < next_distance.distance_from_previous:
return True
return False
def get_is_under_min_distance_previous(self, projectdeadline):
if not projectdeadline.date:
return False
prev_deadlines = projectdeadline.deadline.distances_to_previous.all()
for prev_distance in prev_deadlines:
# Ignore if distance conditions are not met
if not self._resolve_distance_conditions(
prev_distance,
projectdeadline.project,
):
continue
# Ignore if previous deadline does not exist for project
try:
prev_date = projectdeadline.project.deadlines.get(
deadline=prev_distance.previous_deadline
).date
except ProjectDeadline.DoesNotExist:
continue
# Ignore if previous date is not set
if not prev_date:
continue
if prev_distance.date_type:
distance_from_prev = prev_distance.date_type.valid_days_to(
prev_date, projectdeadline.date
)
else:
distance_from_prev = (projectdeadline.date - prev_date).days
if distance_from_prev < prev_distance.distance_from_previous:
return True
return False
def get_past_due(self, projectdeadline):
return len([
dl for dl in projectdeadline.project.deadlines.filter(
deadline__index__lte=projectdeadline.deadline.index,
date__lt=datetime.date.today(),
)
if not dl.confirmed
]) > 0
def get_out_of_sync(self, projectdeadline):
return projectdeadline.project.subtype != \
projectdeadline.deadline.phase.project_subtype
class Meta:
model = ProjectDeadline
fields = [
"date",
"abbreviation",
"deadline_id",
"past_due",
"is_under_min_distance_previous",
"is_under_min_distance_next",
"out_of_sync",
"distance_reference_deadline_id",
]
class ProjectListSerializer(serializers.ModelSerializer):
user = serializers.SlugRelatedField(
read_only=False, slug_field="uuid", queryset=get_user_model().objects.all()
)
attribute_data = AttributeDataField(allow_null=True, required=False)
type = serializers.SerializerMethodField()
phase_start_date = serializers.SerializerMethodField()
class Meta:
model = Project
fields = [
"user",
"created_at",
"modified_at",
"name",
"identifier",
"pino_number",
"type",
"subtype",
"attribute_data",
"phase",
"id",
"public",
"owner_edit_override",
"archived",
"onhold",
"create_principles",
"create_draft",
"phase_start_date",
]
def get_type(self, project):
return project.type.pk
def get_phase_start_date(self, project):
try:
return project.deadlines \
.filter(deadline__phase=project.phase) \
.order_by("deadline__index").first().date
except AttributeError:
return None
def get_attribute_data(self, project):
static_properties = [
"user",
"name",
"public",
"pino_number",
"create_principles",
"create_draft",
]
return_data = {}
attrs = ListViewAttributeColumn.objects.all().select_related("attribute")
attribute_data = getattr(project, "attribute_data", {})
for attr in attrs:
identifier = attr.attribute.identifier
value = attribute_data.get(identifier)
if attr.attribute.static_property in static_properties:
return_data[identifier] = getattr(
project, attr.attribute.static_property
)
elif value:
return_data[identifier] = value
return_data['kaavaprosessin_kokoluokka'] = project.phase.project_subtype.name
return return_data
class ProjectSerializer(serializers.ModelSerializer):
user = serializers.SlugRelatedField(
read_only=False, slug_field="uuid", queryset=get_user_model().objects.all()
)
attribute_data = AttributeDataField(allow_null=True, required=False)
type = serializers.SerializerMethodField()
deadlines = serializers.SerializerMethodField()
public = serializers.NullBooleanField(required=False, read_only=True)
owner_edit_override = serializers.NullBooleanField(required=False, read_only=True)
archived = serializers.NullBooleanField(required=False, read_only=True)
onhold = serializers.NullBooleanField(required=False, read_only=True)
generated_deadline_attributes = serializers.SerializerMethodField()
_metadata = serializers.SerializerMethodField()
class Meta:
model = Project
fields = [
"user",
"created_at",
"modified_at",
"name",
"identifier",
"pino_number",
"type",
"subtype",
"attribute_data",
"phase",
"id",
"public",
"owner_edit_override",
"archived",
"onhold",
"deadlines",
"create_principles",
"create_draft",
"generated_deadline_attributes",
"_metadata",
]
read_only_fields = ["type", "created_at", "modified_at"]
def _get_snapshot_date(self, project):
query_params = getattr(self.context["request"], "GET", {})
snapshot_param = query_params.get("snapshot")
snapshot = None
if snapshot_param:
try:
return ProjectPhaseLog.objects.filter(
phase__id=int(snapshot_param),
project=project,
).order_by("-created_at").first().created_at
except AttributeError:
raise NotFound(detail=_("Project data at selected phase start cannot be found"))
except ValueError:
pass
try:
snapshot = datetime.datetime.strptime(
snapshot_param,
"%Y-%m-%dT%H:%M:%S.%fZ%z",
)
except ValueError:
try:
snapshot = datetime.datetime.strptime(
snapshot_param[:-3]+snapshot_param[-2:],
"%Y-%m-%dT%H:%M:%S%z",
)
except ValueError:
raise ParseError(detail=_("Incorrect snapshot datetime format, use one of the following:\n%Y-%m-%dT%H:%M:%S.%fZ%z\n\nphase id"))
if snapshot < project.created_at:
raise NotFound(detail=_("Project data at selected date cannot be found"))
return snapshot
return None
def get_fields(self):
fields = super(ProjectSerializer, self).get_fields()
request = self.context.get('request', None)
try:
if request.user.uuid == self.instance.user.uuid:
fields["public"] = serializers.NullBooleanField(required=False)
fields["onhold"] = serializers.NullBooleanField(required=False)
except AttributeError:
pass
return fields
def get_attribute_data(self, project):
snapshot = self._get_snapshot_date(project)
if snapshot:
attribute_data = {
k: v["new_value"]
for k, v in self._get_updates(project, cutoff=snapshot).items()
}
else:
attribute_data = getattr(project, "attribute_data", {})
self._set_file_attributes(attribute_data, project, snapshot)
# TODO handle snapshot case
self._set_geometry_attributes(attribute_data, project)
if snapshot:
try:
subtype = ProjectPhaseLog.objects.filter(
created_at__lte=self._get_snapshot_date(project),
project=project
).order_by("-created_at").first().phase.project_subtype
except AttributeError:
subtype = project.phase.project_subtype
attribute_data['kaavaprosessin_kokoluokka'] = subtype.name
else:
attribute_data['kaavaprosessin_kokoluokka'] = \
project.phase.project_subtype.name
static_properties = [
"user",
"name",
"public",
"pino_number",
"create_principles",
"create_draft",
]
if not snapshot:
for static_property in static_properties:
try:
identifier = \
Attribute.objects.get(static_property=static_property).identifier
attribute_data[identifier] = getattr(project, static_property)
except Attribute.DoesNotExist:
continue
return attribute_data
def get_type(self, project):
return project.type.pk
def get_deadlines(self, project):
deadlines = project.deadlines.filter(deadline__subtype=project.subtype)
return ProjectDeadlineSerializer(
deadlines,
many=True,
allow_null=True,
required=False,
).data
def get_generated_deadline_attributes(self, project):
return [
dl.deadline.attribute.identifier
for dl in project.deadlines.filter(generated=True)
if dl.deadline.attribute
]
def _set_file_attributes(self, attribute_data, project, snapshot):
request = self.context["request"]
if snapshot:
attribute_files = ProjectAttributeFile.objects \
.filter(project=project, created_at__lte=snapshot) \
.exclude(archived_at__lte=snapshot) \
.order_by(
"fieldset_path_str",
"attribute__pk",
"project__pk",
"-created_at",
) \
.distinct("fieldset_path_str", "attribute__pk", "project__pk")
else:
attribute_files = ProjectAttributeFile.objects \
.filter(project=project, archived_at=None) \
.order_by(
"fieldset_path_str",
"attribute__pk",
"project__pk",
"-created_at",
) \
.distinct("fieldset_path_str", "attribute__pk", "project__pk")
# Add file attributes to the attribute data
# File values are represented as absolute URLs
file_attributes = {}
for attribute_file in attribute_files:
if has_project_attribute_file_permissions(attribute_file, request):
if not attribute_file.fieldset_path:
file_attributes[attribute_file.attribute.identifier] = {
"link": request.build_absolute_uri(attribute_file.file.url),
"description": attribute_file.description,
}
else:
try:
fieldset_content = self.instance.attribute_data.get(
attribute_file.fieldset_path[0]["parent"].identifier, []
)[attribute_file.fieldset_path[0]["index"]]
except (KeyError, IndexError, TypeError):
fieldset_content = {}
_set_fieldset_path(
fieldset_content,
attribute_file.fieldset_path,
file_attributes,
0,
attribute_file.attribute.identifier,
{
"link": request.build_absolute_uri(attribute_file.file.url),
"description": attribute_file.description,
}
)
attribute_data.update(file_attributes)
@staticmethod
def _set_geometry_attributes(attribute_data, project):
attribute_geometries = ProjectAttributeMultipolygonGeometry.objects.filter(
project=project
)
geometry_attributes = {
attribute_geometry.attribute.identifier: GeometryField().to_representation(
value=attribute_geometry.geometry
)
for attribute_geometry in attribute_geometries
}
attribute_data.update(geometry_attributes)
def get__metadata(self, project):
list_view = self.context.get("action", None) == "list"
metadata = {"users": self._get_users(project, list_view=list_view)}
query_params = getattr(self.context["request"], "GET", {})
snapshot_param = query_params.get("snapshot")
if not list_view and not snapshot_param:
metadata["updates"] = self._get_updates(project)
return metadata
@staticmethod
def _get_users(project, list_view=False):
users = [project.user]
if not list_view:
attributes = Attribute.objects.filter(
value_type__in=[Attribute.TYPE_USER, Attribute.TYPE_FIELDSET]
).prefetch_related(
Prefetch(
"fieldset_attributes",
queryset=Attribute.objects.filter(value_type=Attribute.TYPE_USER),
)
)
user_attribute_ids = set()
for attribute in attributes:
if attribute.value_type == Attribute.TYPE_FIELDSET:
fieldset_user_identifiers = attribute.fieldset_attributes.all().values_list(
"identifier", flat=True
)
if attribute.identifier in project.attribute_data:
user_attribute_ids |= ProjectSerializer._get_fieldset_attribute_values(
project, attribute, fieldset_user_identifiers
)
else:
user_id = project.attribute_data.get(attribute.identifier, None)
if user_id:
user_attribute_ids.add(user_id)
# Do not include the user of the project
if str(project.user.uuid) in user_attribute_ids:
user_attribute_ids.remove(str(project.user.uuid))
users += list(User.objects.filter(uuid__in=user_attribute_ids))
return UserSerializer(users, many=True).data
@staticmethod
def _get_fieldset_attribute_values(
project, fieldset_attribute, fieldset_identifiers
):
values = set()
for entry in project.attribute_data[fieldset_attribute.identifier]:
for identifier in fieldset_identifiers:
value = entry.get(identifier, None)
if value:
values.add(value)
return values
@staticmethod
def _get_updates(project, cutoff=None):
# Get the latest attribute updates for distinct attributes
if cutoff:
actions = (
project.target_actions.filter(
verb=verbs.UPDATED_ATTRIBUTE,
timestamp__lte=cutoff,
)
.order_by(
"data__attribute_identifier",
"action_object_content_type",
"action_object_object_id",
"-timestamp",
)
.distinct(
"data__attribute_identifier",
"action_object_content_type",
"action_object_object_id",
)
.prefetch_related("actor")
)
else:
actions = (
project.target_actions.filter(verb=verbs.UPDATED_ATTRIBUTE)
.order_by(
"data__attribute_identifier",
"action_object_content_type",
"action_object_object_id",
"-timestamp",
)
.distinct(
"data__attribute_identifier",
"action_object_content_type",
"action_object_object_id",
)
.prefetch_related("actor")
)
updates = {}
for _action in actions:
attribute_identifier = (
_action.data.get("attribute_identifier", None)
or _action.action_object.identifier
)
updates[attribute_identifier] = {
"user": _action.actor.uuid,
"user_name": _action.actor.get_display_name(),
"timestamp": _action.timestamp,
"new_value": _action.data.get("new_value", None),
"old_value": _action.data.get("old_value", None),
}
return updates
def should_validate_attributes(self):
validate_field_data = self.context["request"].data.get(
"validate_attribute_data", False
)
return serializers.BooleanField().to_internal_value(validate_field_data)
def generate_sections_data(
self,
phase: ProjectPhase,
preview,
validation: bool = True,
) -> List[SectionData]:
sections = []
for section in phase.sections.order_by("index"):
serializer_class = create_section_serializer(
section,
context=self.context,
project=self.instance,
validation=validation,
preview=preview,
)
section_data = SectionData(section, serializer_class)
sections.append(section_data)
return sections
def generate_floor_area_sections_data(
self, floor_area_sections, preview, validation: bool = True
) -> List[SectionData]:
sections = []
for section in floor_area_sections.order_by("index"):
serializer_class = create_section_serializer(
section,
context=self.context,
project=self.instance,
validation=validation,
preview=preview,
)
section_data = SectionData(section, serializer_class)
sections.append(section_data)
return sections
def generate_schedule_sections_data(self, phase, preview, validation=True):
sections = []
deadline_sections = phase.deadline_sections.all()
for section in deadline_sections:
serializer_class = create_section_serializer(
section,
context=self.context,
project=self.instance,
validation=validation,
preview=preview,
)
section_data = SectionData(section, serializer_class)
sections.append(section_data)
return sections
def validate(self, attrs):
archived = attrs.get('archived')
was_archived = self.instance and self.instance.archived
if archived is not False and was_archived:
raise ValidationError(
{"phase": _("Archived projects | |
<gh_stars>1-10
# Standard Library
import copy
import enum
import json
import re
from typing import Any, List, Mapping, Optional, Tuple, Union
# Third Party
from loguru import logger
from pydantic import BaseModel, Field, root_validator
# Local
import bel.core.settings as settings
import bel.db.arangodb
import bel.terms.orthologs
import bel.terms.terms
from bel.core.utils import namespace_quoting, split_key_label
from bel.resources.namespace import get_namespace_metadata
from bel.schemas.constants import AnnotationTypesEnum, EntityTypesEnum
from bel.schemas.terms import Term
Key = str # Type alias for NsVal Key values
NamespacePattern = r"[\w\.]+" # Regex for Entity Namespace
class SpanTypesEnum(str, enum.Enum):
function = "function"
function_name = "function_name"
function_args = "function_args"
relation = "relation"
ns_arg = "ns_arg"
namespace = "namespace"
ns_id = "ns_id"
ns_label = "ns_label"
string_arg = "string_arg"
string = "string"
start_paren = "start_paren"
end_paren = "end_paren"
class Span(BaseModel):
"""Used for collecting string spans
The spans are collect by the index of the first char of the span and the non-inclusive
end of the last span character.
For example:
'cat' with a span of [0, 2] results in 'ca'
You can use -1 as the span end for 1 beyond the last character of the string.
"""
start: int = Field(..., title="Span Start")
end: int = Field(..., title="Span End")
span_str: str = ""
type: Optional[SpanTypesEnum]
class NsArgSpan(Span):
"""Namespace Arg Span"""
namespace: Span
id: Span
label: Optional[Span]
class FunctionSpan(Span):
name: Span # function name span
args: Optional[Span] # parentheses span
class Pair(BaseModel):
"""Paired characters
Used for collecting matched quotes and parentheses
"""
start: Union[int, None] = Field(..., description="index of first in paired chars")
end: Union[int, None] = Field(..., description="Index of second of paired chars")
class ErrorLevelEnum(str, enum.Enum):
Good = "Good"
Error = "Error"
Warning = "Warning"
Processing = "Processing"
class ValidationErrorType(str, enum.Enum):
Nanopub = "Nanopub"
Assertion = "Assertion"
Annotation = "Annotation"
class ValidationError(BaseModel):
type: ValidationErrorType
severity: ErrorLevelEnum
label: str = Field(
"",
description="Label used in search - combination of type and severity, e.g. Assertion-Warning",
)
msg: str
visual: Optional[str] = Field(
None,
description="Visualization of the location of the error in the Assertion string or Annotation using html span tags",
)
visual_pairs: Optional[List[Tuple[int, int]]] = Field(
None,
description="Used when the Assertion string isn't available. You can then post-process these pairs to create the visual field.",
)
index: int = Field(
0,
description="Index to sort validation errors - e.g. for multiple errors in Assertions - start at the beginning of the string.",
)
@root_validator(pre=True)
def set_label(cls, values):
label, type_, severity = (values.get("label"), values.get("type"), values.get("severity"))
if not label:
label = f"{type_}-{severity}"
values["label"] = label.strip()
return values
class ValidationErrors(BaseModel):
status: Optional[ErrorLevelEnum] = "Good"
errors: Optional[List[ValidationError]]
validation_target: Optional[str]
class AssertionStr(BaseModel):
"""Assertion string object - to handle either SRO format or simple string of full assertion"""
entire: str = Field(
"",
description="Will be dynamically created from the SRO fields if null/empty when initialized.",
)
subject: str = ""
relation: str = ""
object: str = ""
@root_validator(pre=True)
def set_entire(cls, values):
entire, subject, relation, object_ = (
values.get("entire"),
values.get("subject"),
values.get("relation"),
values.get("object"),
)
if subject is None:
subject = ""
if relation is None:
relation = ""
if object_ is None:
object_ = ""
if not entire:
entire = f"{subject} {relation} {object_}"
values["entire"] = entire.strip()
return values
class NsVal(object):
"""Namespaced value"""
def __init__(
self, key_label: str = "", namespace: str = "", key: str = "", id: str = "", label: str = ""
):
"""Preferentially use key_label to extract namespace:id!Optional[label]"""
self.key = key
if key_label:
(namespace, id, label) = split_key_label(key_label)
elif key:
(namespace, id) = key.split(":", 1)
self.namespace: str = namespace
self.id: str = namespace_quoting(id)
if not self.key:
self.key = f"{self.namespace}:{self.id}"
self.label = ""
if label:
self.label: str = namespace_quoting(label)
# Add key_label to NsVal
self.update_key_label()
def add_label(self):
if not self.label:
self.update_label()
return self
def update_label(self):
term = bel.terms.terms.get_term(self.key)
if term and term.label:
self.label = namespace_quoting(term.label)
return self
def db_key(self):
"""Used for arangodb key"""
return bel.db.arangodb.arango_id_to_key(self.key)
def update_key_label(self):
"""Return key with label if available"""
self.add_label()
if self.label:
self.key_label = f"{self.namespace}:{self.id}!{self.label}"
else:
self.key_label = f"{self.namespace}:{self.id}"
return self.key_label
def to_string(self):
return __str__(self)
def to_json(self):
return self.key_label
def __str__(self):
if self.label:
return f"{self.namespace}:{self.id}!{self.label}"
else:
return f"{self.namespace}:{self.id}"
__repr__ = __str__
def __len__(self):
return len(self.__str__())
class BelEntity(object):
"""BEL Term - supports original NsVal ns:id!label plus (de)canonicalization and orthologs"""
def __init__(self, term_key: Key = "", nsval: Optional[NsVal] = None):
"""Create BelEntity via a term_key or a NsVal object
You cannot provide a term_key_label string (e.g. NS:ID:LABEL) as a term_key
"""
self.term: Optional[Term] = None
self.canonical: Optional[NsVal] = None
self.decanonical: Optional[NsVal] = None
self.species_key: Key = None
self.entity_types = []
self.orthologs: Mapping[Key, dict] = {}
self.orthologized: bool = False
self.original_species_key: Optional[Key] = None
# NOTE - self.nsval is overridden when orthologized
if term_key:
self.original_term_key = term_key
self.term = bel.terms.terms.get_term(term_key)
if self.term:
self.species_key = self.term.species_key
self.original_species_key = self.species_key
self.nsval: NsVal = NsVal(
namespace=self.term.namespace, id=self.term.id, label=self.term.label
)
self.original_nsval = self.nsval
elif nsval:
self.nsval = nsval
self.original_nsval = nsval
else:
self.nsval = None
self.namespace_metadata = get_namespace_metadata().get(self.nsval.namespace, None)
if self.namespace_metadata is not None and self.namespace_metadata.entity_types:
self.entity_types = self.namespace_metadata.entity_types
self.add_term()
def add_term(self):
"""Add term info"""
if self.namespace_metadata and self.namespace_metadata.namespace_type == "complete":
self.term = bel.terms.terms.get_term(self.nsval.key)
if self.term and self.nsval.key != self.term.key:
self.nsval = NsVal(
namespace=self.term.namespace, id=self.term.id, label=self.term.label
)
if self.term and self.term.entity_types:
self.entity_types = self.term.entity_types
if self.term and self.term.species_key:
self.species_key = self.term.species_key
return self
def add_species(self):
"""Add species if not already set"""
if self.species_key:
return self
if not self.term:
self.add_term()
if self.term and self.term.species_key:
self.species_key = self.term.species_key
elif self.namespace_metadata and self.namespace_metadata.species_key:
self.species_key = self.namespace_metadata.species_key
return self
def add_entity_types(self):
"""get entity_types to BEL Entity"""
if self.entity_types:
return self
entity_types = []
if self.term:
entity_types = self.term.entity_types
elif self.namespace_metadata and self.namespace_metadata.namespace_type == "complete":
self.term = bel.terms.terms.get_term(self.nsval.key)
if self.term:
entity_types = self.term.entity_types
elif self.namespace_metadata and self.namespace_metadata.entity_types:
entity_types = self.namespace_metadata.entity_types
self.entity_types = [et.name for et in entity_types]
return self
def get_entity_types(self):
if not self.entity_types:
self.add_entity_types()
return self.entity_types
def normalize(
self,
canonical_targets: Mapping[str, List[str]] = settings.BEL_CANONICALIZE,
decanonical_targets: Mapping[str, List[str]] = settings.BEL_DECANONICALIZE,
):
"""Collect (de)canonical forms"""
if self.canonical and self.decanonical:
return self
if self.namespace_metadata and self.namespace_metadata.namespace_type != "complete":
self.canonical = self.nsval
self.decanonical = self.nsval
return self
normalized = bel.terms.terms.get_normalized_terms(
self.nsval.key,
canonical_targets=canonical_targets,
decanonical_targets=decanonical_targets,
term=self.term,
)
if normalized["original"] != normalized["normalized"]:
self.nsval = NsVal(key_label=normalized["normalized"])
if self.original_nsval.label:
self.nsval.label = self.original_nsval.label
self.canonical = copy.deepcopy(self.nsval)
if normalized["canonical"]:
self.canonical = NsVal(key_label=normalized["canonical"])
self.canonical.label = ""
self.canonical.key_label = self.canonical.key
self.decanonical = self.nsval
if normalized["decanonical"]:
self.decanonical = NsVal(key_label=normalized["decanonical"])
return self
def canonicalize(
self,
canonical_targets: Mapping[str, List[str]] = settings.BEL_CANONICALIZE,
decanonical_targets: Mapping[str, List[str]] = settings.BEL_DECANONICALIZE,
):
"""Canonicalize BEL Entity
Must set both targets if not using defaults as the underlying normalization handles
both canonical and decanonical forms in the same query
"""
if self.orthologized:
self.nsval = self.orthologs[self.species_key]["canonical"]
else:
self.normalize(
canonical_targets=settings.BEL_CANONICALIZE,
decanonical_targets=settings.BEL_DECANONICALIZE,
)
self.nsval = self.canonical
return self
def decanonicalize(
self,
canonical_targets: Mapping[str, List[str]] = settings.BEL_CANONICALIZE,
decanonical_targets: Mapping[str, List[str]] = settings.BEL_DECANONICALIZE,
):
"""Decanonicalize BEL Entity
Must set both targets if not using defaults as the underlying normalization handles
both canonical and decanonical forms in the same query
"""
if self.orthologized:
self.nsval = self.orthologs[self.species_key]["decanonical"]
else:
self.normalize(
canonical_targets=settings.BEL_CANONICALIZE,
decanonical_targets=settings.BEL_DECANONICALIZE,
)
self.nsval = self.decanonical
return self
def collect_orthologs(self, species_keys: List[Key] = settings.BEL_ORTHOLOGIZE_TARGETS):
"""Get orthologs for BelEntity is orthologizable"""
self.add_entity_types()
self.normalize()
self.add_species()
# Do not run if no species or already exists
if not self.species_key or self.orthologs:
return self
# Only collect orthologs if it's the right entity type
self.add_entity_types()
if not list(set(self.entity_types) & set(["Gene", "RNA", "Micro_RNA", "Protein", "all"])):
return self
orthologs = bel.terms.orthologs.get_orthologs(self.canonical.key, species_keys=species_keys)
for ortholog_species_key in orthologs:
ortholog_key = orthologs[ortholog_species_key]
normalized = bel.terms.terms.get_normalized_terms(ortholog_key)
ortholog_dict = {}
if normalized["canonical"]:
ortholog_dict["canonical"] = NsVal(
key=normalized["canonical"], label=normalized["label"]
)
if normalized["decanonical"]:
ortholog_dict["decanonical"] = NsVal(
key=normalized["decanonical"], label=normalized["label"]
)
self.orthologs[ortholog_species_key] = copy.copy(ortholog_dict)
return self
def orthologize(self, species_key: Key):
"""Orthologize BEL entity - results in canonical form"""
self.add_entity_types()
self.normalize()
self.add_species()
# Do not run if no species or already exists
if not self.species_key:
return self
# Only collect orthologs if it's the right entity type
self.add_entity_types()
if not list(set(self.entity_types) & set(["Gene", "RNA", "Micro_RNA", "Protein", "all"])):
return self
if not self.orthologs:
self.collect_orthologs(species_keys=[species_key])
if species_key not in self.orthologs:
self.orthologized = False
self.nsval = self.canonical
return self
self.orthologized = True
self.species_key = species_key
self.nsval = self.orthologs[species_key]["canonical"]
return self
def orthologizable(self, species_key: Key) -> bool:
"""Is this BEL Entity/NSArg orthologizable?"""
self.add_entity_types()
self.normalize()
| |
<reponame>dotupNET/ghcloneall<filename>tests.py
import os
import re
import subprocess
import sys
try:
from cStringIO import StringIO
except ImportError: # pragma: PY3
from io import StringIO
import pytest
import requests
import requests_cache
import ghcloneall
class MockResponse:
def __init__(self, status_code=200, json=None, links={}):
assert json is not None
self.status_code = status_code
self.links = {
rel: dict(rel=rel, url=url)
for rel, url in links.items()
}
self._json = json
def json(self):
return self._json
def raise_for_status(self):
if self.status_code >= 400:
raise requests.HTTPError()
class MockRequestGet:
def __init__(self):
self.responses = {}
self.not_found = MockResponse(
status_code=404, json={'message': 'not found'},
)
def update(self, responses):
self.responses.update(responses)
def __call__(self, url, headers=None):
return self.responses.get(url, self.not_found)
@pytest.fixture(autouse=True)
def mock_requests_get(monkeypatch):
mock_get = MockRequestGet()
monkeypatch.setattr(requests, 'get', mock_get)
monkeypatch.setattr(requests.Session, 'get', mock_get)
return mock_get
@pytest.fixture(autouse=True)
def mock_requests_cache(monkeypatch):
monkeypatch.setattr(requests_cache, 'install_cache', lambda *a, **kw: None)
class MockPopen:
def __init__(self, stdout=b'', stderr=b'', rc=0):
self.stdout = stdout
self.stderr = stderr
self.rc = rc
def __call__(self, args, stdout=None, stderr=None, cwd=None):
new_stdout = self.stdout
new_stderr = self.stderr
if stderr == subprocess.STDOUT:
new_stdout += new_stderr
new_stderr = None
elif stderr != subprocess.PIPE:
new_stderr = None
if stdout != subprocess.PIPE:
new_stdout = None
return MockPopen(new_stdout, new_stderr, self.rc)
def communicate(self):
return self.stdout, self.stderr
def wait(self):
return self.rc
@pytest.fixture(autouse=True)
def mock_subprocess_Popen(monkeypatch):
mock_Popen = MockPopen()
monkeypatch.setattr(subprocess, 'Popen', mock_Popen)
return mock_Popen
@pytest.fixture(autouse=True)
def mock_config_filename(monkeypatch):
monkeypatch.setattr(ghcloneall, 'CONFIG_FILE', '/dev/null')
def make_page_url(url, page, extra):
if page == 1:
return '%s?%sper_page=100' % (url, extra)
else:
return '%s?%spage=%d&per_page=100' % (url, extra, page)
def mock_multi_page_api_responses(url, pages, extra='sort=full_name&'):
assert len(pages) > 0
responses = {}
for n, page in enumerate(pages, 1):
page_url = make_page_url(url, n, extra)
links = {}
if n != len(pages):
next_page_url = make_page_url(url, n + 1, extra)
links['next'] = next_page_url
responses[page_url] = MockResponse(json=page, links=links)
return responses
class Terminal:
def __init__(self, width=80, height=24):
self.rows = [[' ']*width for n in range(height)]
self.x = 0
self.y = 0
self.width = width
self.height = height
def __str__(self):
return '\n'.join(''.join(row).rstrip() for row in self.rows).rstrip()
def output(self, text):
for s in re.split(r'(\033\[\d*[a-zA-Z]|.)', text):
if s == '\r':
self.x = 0
elif s == '\n':
self.newline()
elif len(s) == 1:
self.put_char(s)
elif s.startswith('\033['):
command = s[-1:]
param = s[2:-1]
if param:
param = int(param)
else:
param = 0
self.control_seq(command, param)
def put_char(self, c):
self.rows[self.y][self.x] = c
self.x += 1
if self.x == self.width:
self.newline()
def newline(self):
self.x = 0
self.y += 1
if self.y == self.height:
self.y -= 1
self.delete_line(0)
def insert_line(self, y):
self.rows.insert(y, [' '] * self.width)
del self.rows[-1]
def delete_line(self, y):
del self.rows[y]
self.rows.append([' '] * self.width)
def control_seq(self, command, param):
if command == 'A':
# move cursor up
self.y -= param
if self.y < 0:
# not 100% sure what real terminals do here
self.y = 0
elif command == 'B':
# move cursor down
self.y += param
if self.y >= self.height - 1:
# not 100% sure what real terminals do here
self.y = self.height - 1
elif command == 'L':
for n in range(param):
self.insert_line(self.y)
elif command == 'M':
for n in range(param):
self.delete_line(self.y)
def show_ansi(text):
"""Make ANSI control sequences visible."""
replacements = {
'\033[1A': '{up1}',
'\033[2A': '{up2}',
'\033[1B': '{down1}',
'\033[1L': '{ins1}',
'\033[2L': '{ins2}',
'\033[1M': '{del1}',
'\033[31m': '{red}',
'\033[32m': '{green}',
'\033[33m': '{brown}',
'\033[m': '{reset}',
'\033': '{esc}',
'\r': '{cr}',
}
pattern = '|'.join(
re.escape(s) for s in sorted(replacements, key=len, reverse=True)
)
return re.sub(pattern, lambda m: replacements[m.group(0)], text)
def show_ansi_result(text, width=80, height=24):
term = Terminal(width, height)
term.output(text)
return str(term)
def compare(actual, expected):
assert show_ansi(actual) == show_ansi(expected)
def test_get_json_and_links(mock_requests_get):
url = 'https://github.example.com/api'
mock_requests_get.update({
url: MockResponse(
json={'json': 'data'},
links={'next': 'https://github.example.com/api?page=2'},
),
})
data, links = ghcloneall.get_json_and_links(url)
assert data == {'json': 'data'}
assert links == {
'next': {
'rel': 'next',
'url': 'https://github.example.com/api?page=2',
},
}
def test_get_json_and_links_failure(mock_requests_get):
url = 'https://github.example.com/api'
mock_requests_get.update({
url: MockResponse(
status_code=400,
json={'message': 'this request is baaad'},
),
})
with pytest.raises(ghcloneall.Error):
ghcloneall.get_json_and_links(url)
def test_get_github_list(mock_requests_get):
mock_requests_get.update({
'https://github.example.com/api?per_page=100': MockResponse(
json=[{'item': 1}, {'item': 2}],
links={
'next': 'https://github.example.com/api?page=2&per_page=100',
}),
'https://github.example.com/api?page=2&per_page=100': MockResponse(
json=[{'item': 3}, {'item': 4}],
links={
'next': 'https://github.example.com/api?page=3&per_page=100',
}),
'https://github.example.com/api?page=3&per_page=100': MockResponse(
json=[{'item': 5}]),
})
url = 'https://github.example.com/api'
progress = []
res = ghcloneall.get_github_list(url, progress_callback=progress.append)
assert res == [
{'item': 1},
{'item': 2},
{'item': 3},
{'item': 4},
{'item': 5},
]
assert progress == [2, 4]
def test_Progress(capsys):
buf = StringIO()
progress = ghcloneall.Progress(stream=buf)
progress.status("hello")
progress.status("world")
progress.finish("bye")
assert buf.getvalue() == (
'\rhello\r'
'\r \r'
'\rworld\r'
'\r \r'
'bye\n'
)
assert show_ansi_result(buf.getvalue()) == (
'bye'
)
def test_Progress_no_output_after_finish(capsys):
buf = StringIO()
progress = ghcloneall.Progress(stream=buf)
progress.status("hi")
progress.finish()
# these are all ignored
progress.status("ho")
progress.clear()
item = progress.item("boo")
item.hide()
item.extra_info("hooo")
assert buf.getvalue() == (
'\rhi\r'
'\r \r'
)
assert show_ansi_result(buf.getvalue()) == ''
def test_Progress_progress(capsys):
buf = StringIO()
progress = ghcloneall.Progress(stream=buf)
progress.progress()
assert buf.getvalue() == (
'\r[....................] 0/0\r'
)
progress.set_limit(5)
assert buf.getvalue() == (
'\r[....................] 0/0\r'
'\r \r'
'\r[....................] 0/5\r'
)
progress.item()
assert buf.getvalue() == (
'\r[....................] 0/0\r'
'\r \r'
'\r[....................] 0/5\r'
'\r \r'
'\r[####................] 1/5\r'
)
assert show_ansi_result(buf.getvalue()) == (
'[####................] 1/5'
)
def test_Progress_context_manager(capsys):
buf = StringIO()
with pytest.raises(KeyboardInterrupt):
with ghcloneall.Progress(stream=buf) as progress:
progress.item()
raise KeyboardInterrupt()
assert buf.getvalue() == (
'\r[####################] 1/0\r'
'\r \r'
'Interrupted\n'
)
assert show_ansi_result(buf.getvalue()) == (
'Interrupted'
)
def test_Progress_item_details(capsys):
buf = StringIO()
progress = ghcloneall.Progress(stream=buf)
item = progress.item("first repo")
compare(
buf.getvalue(),
'{brown}first repo{reset}\n'
'\r[####################] 1/0\r'
)
item.update(" - all good")
compare(
buf.getvalue(),
'{brown}first repo{reset}\n'
'\r[####################] 1/0\r'
'\r{up1}{green}first repo - all good{reset}\r{down1}'
)
assert show_ansi_result(buf.getvalue()) == (
'first repo - all good\n'
'[####################] 1/0'
)
def test_Progress_item_failure(capsys):
buf = StringIO()
progress = ghcloneall.Progress(stream=buf)
item = progress.item("first repo")
compare(
buf.getvalue(),
'{brown}first repo{reset}\n'
'\r[####################] 1/0\r'
)
item.update(" - all bad", failed=True)
compare(
buf.getvalue(),
'{brown}first repo{reset}\n'
'\r[####################] 1/0\r'
'\r{up1}{red}first repo - all bad{reset}\r{down1}'
)
assert show_ansi_result(buf.getvalue()) == (
'first repo - all bad\n'
'[####################] 1/0'
)
def test_Progress_item_finished(capsys):
buf = StringIO()
progress = ghcloneall.Progress(stream=buf)
item = progress.item("first repo")
compare(
buf.getvalue(),
'{brown}first repo{reset}\n'
'\r[####################] 1/0\r'
)
item.finished()
compare(
buf.getvalue(),
'{brown}first repo{reset}\n'
'\r[####################] 1/0\r'
'\r{up1}first repo\r{down1}'
)
assert show_ansi_result(buf.getvalue()) == (
'first repo\n'
'[####################] 1/0'
)
def test_Progress_item_finished_and_hidden(capsys):
buf = StringIO()
progress = ghcloneall.Progress(stream=buf)
item = progress.item("first repo")
compare(
buf.getvalue(),
'{brown}first repo{reset}\n'
'\r[####################] 1/0\r'
)
item.finished(hide=True)
compare(
buf.getvalue(),
'{brown}first repo{reset}\n'
'\r[####################] 1/0\r'
'{up1}{del1}'
)
assert show_ansi_result(buf.getvalue()) == (
'[####################] 1/0'
)
def test_Progress_item_once_hidden_stays_hidden(capsys):
buf = StringIO()
progress = ghcloneall.Progress(stream=buf)
item = progress.item("first repo")
compare(
buf.getvalue(),
'{brown}first repo{reset}\n'
'\r[####################] 1/0\r'
)
item.finished(hide=True)
item.update("ha ha")
item.hide()
compare(
buf.getvalue(),
'{brown}first repo{reset}\n'
'\r[####################] 1/0\r'
'{up1}{del1}'
)
assert show_ansi_result(buf.getvalue()) == (
'[####################] 1/0'
)
def test_Progress_extra_info(capsys):
buf = StringIO()
progress = ghcloneall.Progress(stream=buf)
item = progress.item("first repo")
compare(
buf.getvalue(),
'{brown}first repo{reset}\n'
'\r[####################] 1/0\r'
)
item.extra_info("this is a very good repo btw")
compare(
buf.getvalue(),
'{brown}first repo{reset}\n'
'\r[####################] 1/0\r'
'{ins1} this is a very good repo btw\n'
# plus a redraw in case the insertion pushed the progress bar offscreen
'\r \r'
'\r[####################] 1/0\r'
)
assert show_ansi_result(buf.getvalue()) == (
'first repo\n'
' this is a very good repo btw\n'
'[####################] 1/0'
)
def test_Progress_error_info(capsys):
buf = StringIO()
progress = ghcloneall.Progress(stream=buf)
item = progress.item("first repo")
compare(
buf.getvalue(),
'{brown}first repo{reset}\n'
'\r[####################] 1/0\r'
)
item.error_info("oopsies")
compare(
buf.getvalue(),
'{brown}first repo{reset}\n'
'\r[####################] 1/0\r'
# new output
'{ins1} {red}oopsies{reset}\n'
# plus a redraw in case the insertion pushed the progress bar offscreen
'\r \r'
'\r[####################] 1/0\r'
)
assert show_ansi_result(buf.getvalue()) == (
'first repo\n'
' oopsies\n'
'[####################] 1/0'
)
def test_Progress_extra_info_but_not_really(capsys):
buf = StringIO()
progress = ghcloneall.Progress(stream=buf)
item = progress.item("first repo")
compare(
buf.getvalue(),
'{brown}first repo{reset}\n'
'\r[####################] 1/0\r'
)
item.extra_info("")
compare(
buf.getvalue(),
'{brown}first repo{reset}\n'
'\r[####################] 1/0\r'
)
assert show_ansi_result(buf.getvalue()) == (
'first repo\n'
'[####################] 1/0'
)
def test_Progress_extra_info_multiple_lines(capsys):
buf = StringIO()
progress = ghcloneall.Progress(stream=buf)
item = progress.item("first repo")
compare(
buf.getvalue(),
'{brown}first repo{reset}\n'
'\r[####################] 1/0\r'
)
item.extra_info("hi\nho")
compare(
buf.getvalue(),
'{brown}first repo{reset}\n'
'\r[####################] 1/0\r'
# new output
'{ins2} hi\n'
' ho\n'
'\r \r'
'\r[####################] 1/0\r'
)
assert show_ansi_result(buf.getvalue()) == (
'first repo\n'
' hi\n'
' ho\n'
'[####################] 1/0'
)
def test_Progress_extra_info_not_last_item(capsys):
buf = StringIO()
progress = ghcloneall.Progress(stream=buf)
item1 = progress.item("first repo")
progress.item("second repo")
compare(
buf.getvalue(),
'{brown}first repo{reset}\n'
'\r[####################] 1/0\r'
'\r \r'
'{brown}second repo{reset}\n'
'\r[####################] 2/0\r'
)
item1.extra_info("wow such magic")
compare(
buf.getvalue(),
'{brown}first repo{reset}\n'
'\r[####################] 1/0\r'
'\r \r'
'{brown}second repo{reset}\n'
'\r[####################] 2/0\r'
# new output
'{up1}{ins1} wow such magic\n'
# plus a redraw of everything below the updated item in case the
# insertion pushed the progress bar offscreen
'{brown}second repo{reset}\n'
'\r \r'
'\r[####################] 2/0\r'
)
assert show_ansi_result(buf.getvalue()) == (
'first repo\n'
' wow such magic\n'
'second repo\n'
'[####################] 2/0'
)
def test_Progress_extra_info_not_last_item_redraws_all_below(capsys):
buf = | |
univariate
# statistics and their confidence limits, but no multivariate
# statistic.
statistics = {"tests_passed": _passed,
"R": _R, # Univariate PSRFs
"confidence_limits": _Ru} # Univariate confidence limits
return _test, statistics
def _multivariate_R(self, X):
L, N, M = X.shape
fN = float(N)
fM = float(M)
if L > N:
import warnings
warnings.warn("There are too few samples relative to the number "
"of parameters.")
W = np.zeros((M, L, L))
for m in range(M):
# W = (1.0 / (fM * (fN - 1.0))) *
Xm = X[:, :, m]
W[m, :, :] = np.cov(Xm, ddof=1)
W = np.mean(W, axis=0)
B_n = np.mean(X, axis=1) # Sum over iterations
B_n = np.cov(B_n, ddof=1)
# B = B_n * fN
# No need to actually construct V.
# V = ((fN - 1) / fN) * W + (1.0 + 1.0 / fM) * B_n
W_B_n = np.linalg.solve(W, B_n) # dot(inv(W), B / n)
if L <= 2: # eigs doesn't work for 2x2 matrices
lambda_1 = np.linalg.eigvals(W_B_n)
lambda_1 = lambda_1.real[0]
else:
lambda_1 = linalg.eigs(W_B_n, k=1, return_eigenvectors=False)
lambda_1 = lambda_1.real[0]
# The multivariate potential scale reduction factor (MPSRF).
Rp = ((fN - 1.0) / fN) + ((fM + 1.0) / fM) * lambda_1
Rp = np.sqrt(Rp)
return Rp
def _univariate_R(self, X):
L, N, M = X.shape
fN = float(N)
fM = float(M)
_R = [0] * L
_Ru = [0] * L
for l in range(L): # TODO: Vectorise this loop!
Xl = X[l, :, :]
mus = np.mean(Xl, axis=0)
s2s = np.var(Xl, axis=0, ddof=1)
mu = np.mean(mus)
B = np.var(mus, ddof=1) * fN
B_n = B / fN
W = np.mean(s2s)
if W < consts.TOLERANCE:
raise ValueError("All entries in the matrix are equal, or "
"extremely similar.")
s2p = ((fN - 1.0) / fN) * W + B_n
V = s2p + B_n / fM
var_W = np.var(s2s, ddof=1) / fM
R = V / W
var_B = B * B * 2.0 / (fM - 1.0)
cov_WB = (fN / fM) * (self._cov(s2s, mus**2.0) - 2.0 * mu * self._cov(s2s, mus))
var_V = (((fN - 1.0) / fN)**2.0) * var_W \
+ (((fM + 1.0) / (fM * fN))**2.0) * var_B \
+ ((2.0 * (fM + 1.0) * (fN - 1.0)) / (fM * fN * fN)) * cov_WB
d = (2.0 * V * V) / var_V
cor = ((d + 3.0) / (d + 1.0))
R = cor * R
# The (corrected) potential scale reduction factor ([C]PSRF).
R = np.sqrt(R)
# Perform formal test (compute upper confidence limit)
df_num = fM - 1.0
df_den = 2.0 * W * W / var_W
fcrit = stat.f.ppf(1.0 - self.alpha / 2.0, df_num, df_den)
Ru = (((fN - 1.0) / fN) + ((fM + 1.0) / (fM * fN)) * (B / W) * fcrit) * cor
Ru = np.sqrt(Ru)
_R[l] = R
_Ru[l] = Ru
return _R, _Ru
def _transform(self, X):
"""Transform variables (log or logit) that are not normal.
Arguments
---------
X : numpy.array, shape (L, N, M)
The data matrix (three-dimensional). It is assumed that any sanity
checks have been performed already.
"""
# TODO: Other transformations?
L, N, M = X.shape
for l in range(L):
Xl = X[l, :, :]
min_Xl = np.min(Xl)
if min_Xl >= 0.0:
max_Xl = np.max(Xl)
if max_Xl <= 1.0: # Xl \in [0, 1]^{M \times N}
import scipy.special
scipy.special.logit(Xl, Xl)
else: # Xl \in [0, np.inf]^{M \times N}
np.log(X, out=Xl)
X[l, :, :] = Xl
return X
def _cov(self, a, b):
return np.cov(a, b, ddof=1)[0, 1]
class Geweke(ConvergenceTest):
"""Performs the Geweke test of convergence of a Markov chain.
Arguments
---------
window1 : float, optional
A float in [0, 1] such that window1 + window2 < 1. The proportion of
samples to include in the first window.
window2 : float, optional
A float in [0, 1] such that window1 + window2 < 1. The proportion of
samples to include in the first window.
discard_prop : float, optional
A float in [0, 1]. Discards a fraction ``discard_prop`` of the first
samples (burn-in). Note that it will always keep a number of samples so
that there are at least two samples in each window, if ``discard_prop``
is too large with respect to the number of samples. Default is 0.5,
i.e. discards the first half of the samples.
alpha : float, optional
A float in [0, 1]. The confidence level to compute the confidence limit
for. The test will not automatically correct for multiple comparisons;
you must do this manually. Default is 0.05, which means it performs the
test on the 5 % level.
References
----------
Geweke, John (1992). "Evaluating the Accuracy of Sampling-Based Approaches
to the Calculation of Posterior Moments". In Bayesian Statistics,
<NAME>., <NAME>., <NAME>. and <NAME>. (eds.),
pp. 169--193. Oxford University Press, Oxford, UK.
Heidelberger, Philip and Welch, <NAME>. (1981). "A Spectral Method for
Confidence Interval Generation and Run Length Control in Simulations".
Communications of the ACM, 24(4): 233-245.
Wikipedia contributors (2017), "Autoregressive model". Wikipedia: The Free
Encyclopedia. Wikimedia Foundation, Inc.. Retrieved August 8, 2017, from:
https://en.wikipedia.org/wiki/Autoregressive_model.
Examples
--------
>>> import parsimony.utils.mcmc as mcmc
>>> import numpy as np
>>> np.random.seed(1337)
>>>
>>> X = np.random.rand(2, 200, 3)
>>> test = mcmc.Geweke(alpha=0.05, axis=1)
>>> passed, stats = test(X)
>>> passed
False
>>> X = np.random.rand(2, 10000, 3)
>>> test = mcmc.Geweke(alpha=0.05, axis=1)
>>> passed, stats = test(X)
>>> passed
True
>>> stats["p"] # doctest: +ELLIPSIS
array([[ 0.4731..., 0.0748..., 0.2932...],
[ 0.4954..., 0.7847..., 0.3588...]])
"""
def __init__(self, window1=0.1, window2=0.5, discard_prop=0.5, alpha=0.05,
axis=0):
super(Geweke, self).__init__(discard_prop=discard_prop, alpha=alpha)
self.window1 = max(0.0, min(float(window1), 1.0))
self.window2 = max(0.0, min(float(window2), 1.0))
if self.window1 + self.window2 >= 1.0:
raise ValueError("The sum window1 + window2 must be smaller than "
"1.0.")
self.axis = int(axis)
def test(self, X):
"""Performs the test and computes test statistics.
Arguments
---------
X : numpy.array
The data to test. One of the dimensions (``axis``) corresponds to
the samples from a Markov chain, and the other dimensions
represents different chains (e.g. separate chains and/or different
parameters).
Returns
-------
test_result : bool
Whether the test says the chain has converged or not. For multiple
parameters, returns True only if all parameters' chains have
converged.
statistics : dict
Test statistics. A dict with numpy arrays will be returned where
each element of the array corresponds to the statistics for each
different chain. If one-dimensional, the test statistics will be
returned directly in the dict.
"""
# Discard the first self.discard_prop fraction of the samples.
N = X.shape[self.axis]
if N > 2:
start_N = int(np.floor(N * self.discard_prop) + 0.5)
idx = [slice(None)] * X.ndim
idx[self.axis] = slice(start_N, None)
X = X[idx]
N = X.shape[self.axis]
w1 = int(np.round(self.window1 * N) + 0.5)
w2 = int(np.round(self.window2 * N) + 0.5)
n1 = w1
n2 = N - w2
if n1 < 2 or n2 < 2:
raise ValueError("At least two samples must be computed for each "
"window.")
idx = [slice(None)] * X.ndim
idx[self.axis] = slice(None, w1)
W1 = X[idx]
idx[self.axis] = slice(-w2, None)
W2 = X[idx]
mu1 = np.mean(W1, axis=self.axis)
mu2 = np.mean(W2, axis=self.axis)
s21 = np.var(W1, axis=self.axis, ddof=1)
s22 = np.var(W2, axis=self.axis, ddof=1)
phi1, s21 = autoregression(W1, p=2, lag=1, axis=self.axis)
phi2, s22 = autoregression(W1, p=2, lag=1, axis=self.axis)
s21 = np.divide(s21, (1.0 - np.sum(phi1))**2) # Power spectral density at f=0.
s22 = np.divide(s22, (1.0 - np.sum(phi2))**2)
Z = np.divide(mu1 - mu2,
np.sqrt((s21 / float(n1)) + (s22 / float(n2))))
p = 2.0 * (1.0 - stat.norm.cdf(np.abs(Z)))
_passed = p > self.alpha
statistics = {"tests_passed": _passed,
"z": Z, # Univariate z scores.
"p": p} # p-values.
return np.all(_passed), statistics
class RafteryLewis(ConvergenceTest):
"""Performs the Raftery and Lewis diagnosis test to determine chain length.
Arguments
---------
q : float, optional
A float in [0, 1]. The quantile to investigate. Default is 0.025.
r : float, optional
A float in [0, | |
<reponame>ameet-1997/Prioritized_Experience_Replay
#!/usr/bin/python
# -*- encoding=utf-8 -*-
# author: Ian
# e-mail: <EMAIL>
# description:
import sys
import math
import random
import numpy as np
from baselines.her.binary_heap import BinaryHeap
"""
Important point
self.learn_start > self.batch_size
and self.learn_start > size of partition, i.e., self.size/partition_num
This is because we need to take the first partition at least, as there
is a floor operation in sample
"""
class Experience(object):
def __init__(self, conf):
self.size = conf['size']
# If the transitions should be replaced if the heap is full
# A lower priority node is expelled
self.replace_flag = conf['replace_old'] if 'replace_old' in conf else True
# Represents the max capacity of the heap
self.priority_size = conf['priority_size'] if 'priority_size' in conf else self.size
# The alpha used in Equation (1) in PER paper
self.alpha = conf['alpha'] if 'alpha' in conf else 0.7
# The bias correction term. Usually annealed linearly from
# beta_zero to 1. Section 3.4 of the paper
self.beta_zero = conf['beta_zero'] if 'beta_zero' in conf else 0.5
self.batch_size = conf['batch_size'] if 'batch_size' in conf else 32
self.learn_start = conf['learn_start'] if 'learn_start' in conf else 32
self.total_steps = conf['steps'] if 'steps' in conf else 100000
# partition number N, split total size to N part
self.partition_num = conf['partition_num'] if 'partition_num' in conf else 100
self.partition_size = conf['partition_size'] if 'partition_size' in conf else math.floor(self.size / self.partition_num)
if 'partition_size' in conf:
self.partition_num = math.floor(self.size / self.partition_size)
self.index = 0
self.record_size = 0
self.isFull = False
self._experience = {}
self.priority_queue = BinaryHeap(self.priority_size)
# Added in new code
self.distribution = None
self.dist_index = 1
self.beta_grad = (1 - self.beta_zero) / (self.total_steps - self.learn_start)
# Debug Code
self.debug = {}
# Return the correct distribution, build if required
def return_distribution(self, dist_index):
if (dist_index == self.dist_index) and self.dist_index > 1:
return self.distribution
elif dist_index < self.dist_index:
# print("Dist_index is: "+str(dist_index))
# print("Self.dist_index is: "+str(self.dist_index))
raise Exception('Elements have been illegally deleted from the priority_queue in rank_based')
else:
res = {}
# Store the current dist_index
self.dist_index = dist_index
partition_num = dist_index
# The procedure being followed here is that given on Page 13
# last line. We divide the whole range into 'k' segments
# This has the advantage that the same transition will not be
# picked twice in the same batch (Stratified Sampling)
n = partition_num * self.partition_size
if self.batch_size <= n <= self.priority_size:
distribution = {}
# P(i) = (rank i) ^ (-alpha) / sum ((rank i) ^ (-alpha))
pdf = list(
map(lambda x: math.pow(x, -self.alpha), range(1, n + 1))
)
pdf_sum = math.fsum(pdf)
distribution['pdf'] = list(map(lambda x: x / pdf_sum, pdf))
# split to k segment, and than uniform sample in each k
# set k = batch_size, each segment has total probability is 1 / batch_size
# strata_ends keep each segment start pos and end pos
# The following code creates strata_ends such that
# strata_ends[i]-strata_ends[0] = (i-1)*1/batch_size probability
cdf = np.cumsum(distribution['pdf'])
strata_ends = {1: 0, self.batch_size + 1: n}
step = 1 / self.batch_size
index = 1
for s in range(2, self.batch_size + 1):
while cdf[index] < step:
index += 1
strata_ends[s] = index
step += 1 / self.batch_size
distribution['strata_ends'] = strata_ends
# print("The strata is: "+str(distribution['strata_ends']))
self.distribution = distribution
return self.distribution
def fix_index(self):
"""
get next insert index
:return: index, int
"""
if self.record_size <= self.size:
# self.record_size increases till self.size and stays there after that
self.record_size += 1
# self.index is being monotonically increased and thus say self.size = 3
# When self.index = 3, the heap is full and when self.index = 6, three
# other elements have been added and hence it is still full
# This will happen because replace is True
# But self.index is being set to 1 when replace_flag is true. Hence I don't
# see why % operator was used
if self.index % self.size == 0:
# This condition because self.index = 0 initially, so control will always reach here
self.isFull = True if len(self._experience) == self.size else False
if self.replace_flag:
self.index = 1
# Doubt:: Won't the highest priority node be replaced?
return self.index
else:
sys.stderr.write('Experience replay buff is full and replace is set to FALSE!\n')
return -1
else:
self.index += 1
return self.index
def store(self, experience):
"""
store experience, suggest that experience is a tuple of (s1, a, r, s2, g, t)
so each experience is valid
:param experience: maybe a tuple, or list
:return: bool, indicate insert status
"""
# This function should ideally be called only if a new experience needs to
# be stored because it is given the highest priority
# Get the next position to be inserted in
insert_index = self.fix_index()
if insert_index > 0:
# Remove the previous experience with the same index
if insert_index in self._experience:
del self._experience[insert_index]
# Add the newest experience
self._experience[insert_index] = experience
######Debug
self._experience[insert_index]['new'] = True
######
# add to priority queue
# Add it with max priority so that it gets picked as soon as possible
priority = self.priority_queue.get_max_priority()
# Update the node where the new experience was inserted
self.priority_queue.update(priority, insert_index)
# print('The buffer size is: '+str(self.record_size))
return True
else:
# This happens if replace is set to false and elements
# are trying to be added
sys.stderr.write('Insert failed\n')
return False
def retrieve(self, indices):
"""
get experience from indices
:param indices: list of experience id
:return: experience replay sample
"""
# self.debug['old'] = 0
# self.debug['new'] = 0
# #######Debug
# for v in indices:
# if 'new' in self._experience[v].keys():
# self.debug['new'] += 1
# del self._experience[v]['new']
# else:
# self.debug['old'] += 1
# f = open('new_old_ratio.txt', 'a')
# f.write("The ratio is: "+str(float(self.debug['new'])/self.debug['old'])+'\n')
# #######
# Given a list of Experience_IDs, return the experience
# it represents
return [self._experience[v] for v in indices]
def rebalance(self):
"""
rebalance priority queue
:return: None
"""
# Balance the Heap from scratch
self.priority_queue.balance_tree()
# Function called from rank_based_test.py
def update_priority(self, indices, delta):
"""
update priority according indices and deltas
:param indices: list of experience id
:param delta: list of delta, order correspond to indices
:return: None
"""
# Update the priority of the node. indices[i] should represent
# the experience_ID and delta should represent TD error (priority) --- Check this
# Update the priorities of multiple nodes, given their experience_ids
# and new priorities
for i in range(0, len(indices)):
self.priority_queue.update(math.fabs(delta[i]), indices[i])
# if batch_size argument is passed, use that, else use the one at __init__
def sample(self, global_step, uniform_priority=False, batch_size=32):
"""
sample a mini batch from experience replay
:param global_step: now training step
:return: experience, list, samples
:return: w, list, weights
:return: rank_e_id, list, samples id, used for update priority
"""
if self.record_size < self.learn_start:
# Store a minimum of self.learn_start number of experiences before starting
# any kind of learning. This is done to ensure there is not learning happening
# with very small number of examples, leading to unstable estimates
# Recollect: self.record_size increases till it reaches self.size and then stops there
sys.stderr.write('Record size less than learn start! Sample failed\n')
return False, False, False
# If the replay buffer is not full, find the right partition to use
# If only half the buffer is full, partition number 'self.partition_num/2'
# is used because there are only those many ranks assigned
# dist_index will always be the last partition after the replay
# buffer is full. If it is not full, it will represent some
# partition number less than that
# print("(In rank_based_new.py) Values are (record_size, size, partition_num)::"+str(self.record_size)+"::"+str(self.size)+"::"+str(self.partition_num))
dist_index = math.floor(self.record_size / self.size * self.partition_num)
# dist_index = max(math.floor(self.record_size / self.size * self.partition_num)+1, self.partition_num)
# issue 1 by @camigord
partition_size = math.floor(self.size / self.partition_num)
partition_max = dist_index * partition_size
############################
# distribution = self.distributions[dist_index]
# print("Dist Index is: "+str(dist_index))
distribution = self.return_distribution(dist_index)
############################
# print("Dist Index is: "+str(dist_index))
rank_list = []
# sample from k segments
if uniform_priority==True:
for i in range(1, self.batch_size + 1):
index = | |
x0, n_compounds, names, c0_from_df, single_point = _parse_c0(
c0, names, solvent_density
)
_check_c0(x0)
N, logK = _parse_N_input(N, logK, names, c0_from_df, solvent_density)
_check_N(N, n_compounds)
logK = _parse_K(K, logK, N, solvent_density)
_check_logK(N, logK)
A = _parse_A(A, names, c0_from_df)
_check_A(A, n_compounds)
G = _parse_G(G, names, c0_from_df, G_units, T)
_check_G(G, N, A)
return x0, N, logK, A, G, names, solvent_density, single_point
def to_df(c, c0=None, names=None, units=None):
"""Convert output to a Pandas DataFrame.
It is preferred to use the `names` keyword argument when calling
`eqtk.solve()` that this function.
Parameters
----------
c : Numpy array
Equilibrium concentrations of all species. `c[i, j]` is the
equilibrium concentration of species `j` for initial
concentrations given by `c0[i, :]` in units given by `units`.
c0 : Numpy array, Pandas Series, or Pandas DataFrame; or `None`
The value of `c0` that was used in a call to `eqtk.solve()`,
`eqtk.solveNK()`, `eqtk.solveNG()`, or `eqtk.solveAG()`. If
`None`, then no information about initial concentrations will be
added to the outputted data frame.
names : list of strings
Names of the chemical species. If `None`, the names are assumed
to be 'species_1', 'species_2', etc.
units : str
The units of concentration. The column headings in the outputted
data frame have `f' ({units})'` appended to them.
Returns
-------
output : Pandas DataFrame or Pandas Series
If a single calculation was done, the output is a Pandas Series,
otherwise a Pandas DataFrame. The column headings are
descriptive, e.g., for chemical species HA, with `units = 'mM'`,
the heading is '[HA] (mM)'.
Raises
------
ValueError
If the inputted `c` is not a Numpy array.
"""
if type(c) in [pd.core.series.Series, pd.core.frame.DataFrame]:
raise ValueError("Inputted `c` is already a DataFrame or Series.")
if type(c) != np.ndarray:
raise ValueError("Inputted `c` must be a Numpy array.")
if len(c.shape) == 1:
c = c.reshape((1, len(c)))
units_str = _units_str(units)
if names is None:
names = ["species_" + str(i + 1) for i in range(c.shape[1])]
else:
_check_names_type(names)
if c0 is None:
cols = [name + units_str for name in names]
if len(c) == 1:
return pd.Series(data=c.flatten(), index=cols)
else:
return pd.DataFrame(data=c, columns=cols)
c0, n_compounds, names, _, single_point = _parse_c0(c0, names, solvent_density=1.0)
if c0.shape != c.shape:
raise ValueError("`c0` and `c` have mismatched shapes.")
cols = ["[" + name + "]" + "__0" + units_str for name in names]
cols += ["[" + name + "]" + units_str for name in names]
if single_point:
return pd.Series(index=cols, data=np.concatenate((c0.flatten(), c.flatten())))
return pd.DataFrame(columns=cols, data=np.concatenate((c0, c), axis=1))
def _units_str(units):
return " (" + units + ")" if units is not None else ""
def _parse_output(logx, x0, names, solvent_density, single_point, units, return_log):
"""
"""
if (
not return_log
and np.logical_and(~np.isinf(logx), logx < constants._min_logx).any()
):
warnings.warn(
f"One or more natural log concentrations are less then {constants._min_logx}. You may want to run the calculation with the `return_log=True` keyword argument. If you do that, you must work in dimensionless units."
)
if return_log:
c = logx
c0 = x0
elif solvent_density is None:
c = np.exp(logx)
c0 = x0
else:
c = np.exp(logx) * solvent_density
c0 = x0 * solvent_density
if single_point:
c = c.flatten()
c0 = c0.flatten()
if names is None:
return c
if return_log:
units_str = ""
pre_str = "ln "
else:
pre_str = ""
units_str = _units_str(units)
# Names of columns for outputted data frames
cols = [f"[{name}]__0{units_str}" for name in names]
cols += [f"{pre_str}[{name}]{units_str}" for name in names]
if single_point:
return pd.Series(data=np.concatenate((c0, c)), index=cols)
else:
return pd.DataFrame(data=np.concatenate((c0, c), axis=1), columns=cols)
def _parse_fixed_c(fixed_c, x0, c0_from_df, names, solvent_density):
"""
"""
if type(fixed_c) == list or type(fixed_c) == tuple:
fixed_c = np.array(fixed_c, order="C", dtype=float)
if type(fixed_c) == np.ndarray:
if c0_from_df:
raise ValueError(
"If `c0` is entered as a dict, Series or DataFrame, so must `fixed_c`."
)
if len(fixed_c.shape) == 1:
n_compounds = fixed_c.shape[0]
fixed_c = np.expand_dims(fixed_c, axis=0)
elif len(np.shape(fixed_c)) == 2:
n_compounds = fixed_c.shape[1]
else:
raise ValueError("`fixed_c` is the wrong shape.")
fixed_c = fixed_c.astype(float)
else:
if type(fixed_c) == dict:
fixed_c = _dict_to_df(fixed_c)
if type(fixed_c) == pd.core.frame.DataFrame:
names = _check_names_df(names, list(fixed_c.columns))
elif type(fixed_c) == pd.core.series.Series:
names = _check_names_df(names, list(fixed_c.index))
else:
raise ValueError(
"`fixed_c` must be a Pandas series or data frame or Numpy array."
)
fixed_c = fixed_c[names].to_numpy(dtype=float, copy=True)
if len(fixed_c.shape) == 1:
fixed_c = np.expand_dims(fixed_c, axis=0)
# Check for consistency with x0
if x0.shape[1] != fixed_c.shape[1]:
raise ValueError("`fixed_c` and `c0` must have the same number of columns.")
# Convert negative concentrations to NaN
fixed_c[np.less(fixed_c, 0, where=~np.isnan(fixed_c))] = np.nan
# Cannot have zero entries
if np.any(fixed_c == 0):
raise ValueError(
"Cannot fix the concentration of any species to zero. If you want to remove a species from consideration, you need to specify the relevant entries in `c0` to be zero."
)
# Expand the shapes, as necessary
if x0.shape[0] == 1 and fixed_c.shape[0] > 1:
x0 = np.repeat(x0, fixed_c.shape[0], axis=0)
if x0.shape[0] > 1 and fixed_c.shape[0] == 1:
fixed_c = np.repeat(fixed_c, x0.shape[0], axis=0)
return (
np.ascontiguousarray(fixed_c / solvent_density),
np.ascontiguousarray(x0),
x0.shape[0] == 1,
)
def _nondimensionalize_NK(c0, N, K, T, solvent_density, units):
# Convert K's and c0 to dimensionless
if K is not None:
K_nondim = K / solvent_density ** N.sum(axis=1)
else:
K_nondim = None
c0_nondim = c0 / solvent_density
return c0_nondim, K_nondim
def _nondimensionalize_AG(c0, G, T, solvent_density, units, G_units):
# Compute solvent density in appropriate units
solvent_density = _parse_solvent_density(solvent_density, T, units)
# Convert G's and c0 to dimensionless
G_nondim = _dimensionless_free_energy(G, G_units, T)
c0_nondim = c0 / solvent_density
return c0_nondim, G_nondim, solvent_density
def _parse_solvent_density(solvent_density, T, units):
if solvent_density is None:
return water_density(T, units)
elif (units is None or units == "") and solvent_density != 1.0:
raise ValueError(
"If `solvent_density` is specified, `units` must also be specified."
)
return solvent_density
def water_density(T, units="M"):
"""
Calculate the number density of pure water at atmospheric pressure
in specified units.
Parameters
----------
T : float
Temperature in Kelvin.
units : string, default = 'M'
The units in which the density is to be calculated.
Valid values are: 'M', 'mM', 'uM', 'µM', 'nM', 'pM'.
Returns
-------
water_density : float
Number density of water in `units`.
Notes
-----
Uses pre-calculated values of water density from the IAPWS-95
standards as calculated from the iapws Python package
(https://iapws.readthedocs.io/), as
np.array([iapws.IAPWS95(T=T+273.15, P=0.101325).rho
for T in np.linspace(0.1, 99.9, 999)]) / 18.01528
References
----------
<NAME>, <NAME>, The IAPWS formulation 1995 for the
thermodynamic properties of ordinary water substance for general and
scientific use, J. Phys. Chem. Ref. Data, 31, 387-535, 2002.
https://doi.org/10.1063/1.1461829
"""
# If dimensionless, take solvent density to be unity
if units is None or units == "":
return 1.0
if T < 273.15 or T > 372.15:
raise ValueError("To compute water density, must have 273.16 < T < 373.14.")
i = int(T - 273.15)
if i == 99:
dens = constants._iapws_rho[-1]
else:
rho_1 = constants._iapws_rho[i]
rho_2 = constants._iapws_rho[i + 1]
dens = (rho_2 - rho_1) * (T - i - 273.15) + rho_1
# Valid units
allowed_units = (
None,
"mole fraction" "M",
"molar",
"mM",
"millimolar",
"uM",
"µM",
"micromolar",
"nM",
"nanomolar",
"pM",
"picomolar",
)
# Convert to specified units
if units in ["millimolar", "mM"]:
dens *= 1000.0
elif units in ["micromolar", "uM", "µM"]:
dens *= 1000000.0
elif units in ["nanomolar", "nM"]:
dens *= 1000000000.0
elif units in ["picomolar", "pM"]:
dens *= 1000000000000.0
elif units not in ["molar", "M"]:
raise ValueError(
f"Specified concentration units of {units} not in {allowed_units}."
)
return dens
def _dimensionless_free_energy(G, units, T=293.15):
"""
Convert free energy to dimensionless units, where G is in given units.
"""
if units is None or units == "kT":
return G
elif T is None:
raise ValueError("If G is specified with units, must also supply T.")
kT = _thermal_energy(T, units)
return G / kT
def _thermal_energy(T, units):
"""
Return value of thermal energy kT in specified units. T is
assumed to be in Kelvin.
"""
if T < 100.0:
warnings.warn("WARNING: T may be in wrong units, must be in K.")
allowed_units = ["kcal/mol", "J", "J/mol", "kJ/mol", "pN-nm"]
if units == "kcal/mol":
return constants.kB_kcal_per_mol_K * T
elif units == "J":
| |
except AttributeError as e:
raise ValueError(str(e) + ". Type %d not understood" % type)
return f
def _eval_fun(f, tmp, n, axis, nm, overwrite_x):
if axis == -1 or axis == len(tmp.shape) - 1:
return f(tmp, n, nm, overwrite_x)
tmp = numpy.swapaxes(tmp, axis, -1)
tmp = f(tmp, n, nm, overwrite_x)
return numpy.swapaxes(tmp, axis, -1)
def _raw_dct(x0, type, n, axis, nm, overwrite_x):
f = _get_dct_fun(type, x0.dtype)
return _eval_fun(f, x0, n, axis, nm, overwrite_x)
def _dct(x, type, n=None, axis=-1, overwrite_x=False, normalize=None):
"""
Return Discrete Cosine Transform of arbitrary type sequence x.
Parameters
----------
x : array_like
input array.
n : int, optional
Length of the transform. If ``n < x.shape[axis]``, `x` is
truncated. If ``n > x.shape[axis]``, `x` is zero-padded. The
default results in ``n = x.shape[axis]``.
axis : int, optional
Axis along which the dct is computed; the default is over the
last axis (i.e., ``axis=-1``).
overwrite_x : bool, optional
If True, the contents of `x` can be destroyed; the default is False.
Returns
-------
z : ndarray
"""
x0, n, copy_made = __fix_shape(x, n, axis, 'DCT')
if type == 1 and n < 2:
raise ValueError("DCT-I is not defined for size < 2")
overwrite_x = overwrite_x or copy_made
nm = _get_norm_mode(normalize)
if numpy.iscomplexobj(x0):
return (_raw_dct(x0.real, type, n, axis, nm, overwrite_x) + 1j *
_raw_dct(x0.imag, type, n, axis, nm, overwrite_x))
else:
return _raw_dct(x0, type, n, axis, nm, overwrite_x)
def dct(x, type=2, n=None, axis=-1, norm=None, overwrite_x=False):
"""
Return the Discrete Cosine Transform of arbitrary type sequence x.
Parameters
----------
x : array_like
The input array.
type : {1, 2, 3}, optional
Type of the DCT (see Notes). Default type is 2.
n : int, optional
Length of the transform. If ``n < x.shape[axis]``, `x` is
truncated. If ``n > x.shape[axis]``, `x` is zero-padded. The
default results in ``n = x.shape[axis]``.
axis : int, optional
Axis along which the dct is computed; the default is over the
last axis (i.e., ``axis=-1``).
norm : {None, 'ortho'}, optional
Normalization mode (see Notes). Default is None.
overwrite_x : bool, optional
If True, the contents of `x` can be destroyed; the default is False.
Returns
-------
y : ndarray of real
The transformed input array.
See Also
--------
idct : Inverse DCT
Notes
-----
For a single dimension array ``x``, ``dct(x, norm='ortho')`` is equal to
MATLAB ``dct(x)``.
There are theoretically 8 types of the DCT, only the first 3 types are
implemented in scipy. 'The' DCT generally refers to DCT type 2, and 'the'
Inverse DCT generally refers to DCT type 3.
**Type I**
There are several definitions of the DCT-I; we use the following
(for ``norm=None``)::
N-2
y[k] = x[0] + (-1)**k x[N-1] + 2 * sum x[n]*cos(pi*k*n/(N-1))
n=1
Only None is supported as normalization mode for DCT-I. Note also that the
DCT-I is only supported for input size > 1
**Type II**
There are several definitions of the DCT-II; we use the following
(for ``norm=None``)::
N-1
y[k] = 2* sum x[n]*cos(pi*k*(2n+1)/(2*N)), 0 <= k < N.
n=0
If ``norm='ortho'``, ``y[k]`` is multiplied by a scaling factor `f`::
f = sqrt(1/(4*N)) if k = 0,
f = sqrt(1/(2*N)) otherwise.
Which makes the corresponding matrix of coefficients orthonormal
(``OO' = Id``).
**Type III**
There are several definitions, we use the following
(for ``norm=None``)::
N-1
y[k] = x[0] + 2 * sum x[n]*cos(pi*(k+0.5)*n/N), 0 <= k < N.
n=1
or, for ``norm='ortho'`` and 0 <= k < N::
N-1
y[k] = x[0] / sqrt(N) + sqrt(2/N) * sum x[n]*cos(pi*(k+0.5)*n/N)
n=1
The (unnormalized) DCT-III is the inverse of the (unnormalized) DCT-II, up
to a factor `2N`. The orthonormalized DCT-III is exactly the inverse of
the orthonormalized DCT-II.
References
----------
.. [1] 'A Fast Cosine Transform in One and Two Dimensions', by <NAME>, `IEEE Transactions on acoustics, speech and signal
processing` vol. 28(1), pp. 27-34,
http://dx.doi.org/10.1109/TASSP.1980.1163351 (1980).
.. [2] Wikipedia, "Discrete cosine transform",
http://en.wikipedia.org/wiki/Discrete_cosine_transform
Examples
--------
The Type 1 DCT is equivalent to the FFT (though faster) for real,
even-symmetrical inputs. The output is also real and even-symmetrical.
Half of the FFT input is used to generate half of the FFT output:
# >>> from scipy.fftpack import fft, dct
# >>> fft(np.array([4., 3., 5., 10., 5., 3.])).real
# array([ 30., -8., 6., -2., 6., -8.])
# >>> dct(np.array([4., 3., 5., 10.]), 1)
array([ 30., -8., 6., -2.])
"""
if type == 1 and norm is not None:
raise NotImplementedError(
"Orthonormalization not yet supported for DCT-I")
return _dct(x, type, n, axis, normalize=norm, overwrite_x=overwrite_x)
def stMFCC(X, fbank, nceps):
"""
Computes the MFCCs of a frame, given the fft mag
ARGUMENTS:
X: fft magnitude abs(FFT)
fbank: filter bank (see mfccInitFilterBanks)
RETURN
ceps: MFCCs (13 element vector)
Note: MFCC calculation is, in general, taken from the scikits.talkbox library (MIT Licence),
# with a small number of modifications to make it more compact and suitable for the pyAudioAnalysis Lib
"""
mspec = numpy.log10(numpy.dot(X, fbank.T) + eps)
ceps = dct(mspec, type=2, norm='ortho', axis=-1)[:nceps]
return ceps
def stChromaFeatures(X, fs, nChroma, nFreqsPerChroma):
#TODO: 1 complexity
#TODO: 2 bug with large windows
chromaNames = ['A', 'A#', 'B', 'C', 'C#', 'D', 'D#', 'E', 'F', 'F#', 'G', 'G#']
spec = X**2
if nChroma.max()<nChroma.shape[0]:
C = numpy.zeros((nChroma.shape[0],))
C[nChroma] = spec
C /= nFreqsPerChroma[nChroma]
else:
I = numpy.nonzero(nChroma>nChroma.shape[0])[0][0]
C = numpy.zeros((nChroma.shape[0],))
C[nChroma[0:I-1]] = spec
C /= nFreqsPerChroma
finalC = numpy.zeros((12, 1))
newD = int(numpy.ceil(C.shape[0] / 12.0) * 12)
C2 = numpy.zeros((newD, ))
C2[0:C.shape[0]] = C
C2 = C2.reshape(int(C2.shape[0]/12), 12)
#for i in range(12):
# finalC[i] = numpy.sum(C[i:C.shape[0]:12])
finalC = numpy.matrix(numpy.sum(C2, axis=0)).T
finalC /= spec.sum()
return chromaNames, finalC
def stFeatureExtraction(signal, Fs, Win, Step):
"""
This function implements the shor-term windowing process. For each short-term window a set of features is extracted.
This results to a sequence of feature vectors, stored in a numpy matrix.
ARGUMENTS
signal: the input signal samples
Fs: the sampling freq (in Hz)
Win: the short-term window size (in samples)
Step: the short-term window step (in samples)
RETURNS
stFeatures: a numpy array (numOfFeatures x numOfShortTermWindows)
"""
Win = int(Win)
Step = int(Step)
# Signal normalization
signal = numpy.double(signal)
signal = signal / (2.0 ** 15)
DC = signal.mean()
MAX = (numpy.abs(signal)).max()
signal = (signal - DC) / MAX
N = len(signal) # total number of samples
curPos = 0
countFrames = 0
nFFT = Win / 2
[fbank, freqs] = mfccInitFilterBanks(Fs, nFFT) # compute the triangular filter banks used in the mfcc calculation
nChroma, nFreqsPerChroma = stChromaFeaturesInit(nFFT, Fs)
numOfTimeSpectralFeatures = 8
numOfHarmonicFeatures = 0
nceps = 13
numOfChromaFeatures = 13
totalNumOfFeatures = numOfTimeSpectralFeatures + nceps + numOfHarmonicFeatures + numOfChromaFeatures
# totalNumOfFeatures = numOfTimeSpectralFeatures + nceps + numOfHarmonicFeatures
stFeatures = numpy.array([], dtype=numpy.float64)
while (curPos + Win - 1 < N): # for each short-term window until the end of signal
countFrames += 1
x = signal[curPos:curPos+Win] # get current window
curPos = curPos + Step # update window position
X = abs(fft(x)) # get fft magnitude
X = X[0:int(nFFT)] # normalize fft
X = X / len(X)
if countFrames == 1:
Xprev = X.copy() # keep previous fft mag (used in spectral flux)
curFV = numpy.zeros((totalNumOfFeatures, 1))
curFV[0] = stZCR(x) # zero crossing rate
curFV[1] = stEnergy(x) # short-term energy
curFV[2] = stEnergyEntropy(x) # short-term entropy of energy
[curFV[3], curFV[4]] = stSpectralCentroidAndSpread(X, Fs) # spectral centroid and spread
curFV[5] = stSpectralEntropy(X) # spectral entropy
curFV[6] = stSpectralFlux(X, Xprev) # spectral flux
curFV[7] = stSpectralRollOff(X, 0.90, Fs) # spectral rolloff
curFV[numOfTimeSpectralFeatures:numOfTimeSpectralFeatures+nceps, 0] = stMFCC(X, fbank, nceps).copy() # MFCCs
chromaNames, chromaF = stChromaFeatures(X, Fs, nChroma, nFreqsPerChroma)
curFV[numOfTimeSpectralFeatures + nceps: numOfTimeSpectralFeatures + nceps + numOfChromaFeatures - 1] = chromaF
curFV[numOfTimeSpectralFeatures + nceps + numOfChromaFeatures - 1] = chromaF.std()
if countFrames == 1:
stFeatures = curFV # initialize feature matrix (if first frame)
else:
stFeatures = numpy.concatenate((stFeatures, curFV), 1) # update feature matrix
Xprev = X.copy()
return numpy.array(stFeatures)
def stHarmonic(frame, fs):
"""
Computes harmonic ratio and pitch
"""
M = numpy.round(0.016 * fs) - 1
R | |
img) for img in imgs]
boxes = [
cv2_transform.scale_boxes(self._crop_size, boxes[0], height, width)
]
imgs, boxes = cv2_transform.spatial_shift_crop_list(
self._crop_size, imgs, 1, boxes=boxes
)
if self._test_force_flip:
imgs, boxes = cv2_transform.horizontal_flip_list(
1, imgs, order="HWC", boxes=boxes
)
elif self._split == "test":
# Short side to test_scale. Non-local and STRG uses 256.
imgs = [cv2_transform.scale(self._crop_size, img) for img in imgs]
boxes = [
cv2_transform.scale_boxes(self._crop_size, boxes[0], height, width)
]
if self._test_force_flip:
imgs, boxes = cv2_transform.horizontal_flip_list(
1, imgs, order="HWC", boxes=boxes
)
else:
raise NotImplementedError("Unsupported split mode {}".format(self._split))
# Convert image to CHW keeping BGR order.
imgs = [cv2_transform.HWC2CHW(img) for img in imgs]
# Image [0, 255] -> [0, 1].
imgs = [img / 255.0 for img in imgs]
imgs = [
np.ascontiguousarray(
# img.reshape((3, self._crop_size, self._crop_size))
img.reshape((3, imgs[0].shape[1], imgs[0].shape[2]))
).astype(np.float32)
for img in imgs
]
# Do color augmentation (after divided by 255.0).
if self._split == "train" and self._use_color_augmentation:
if not self._pca_jitter_only:
imgs = cv2_transform.color_jitter_list(
imgs, img_brightness=0.4, img_contrast=0.4, img_saturation=0.4
)
imgs = cv2_transform.lighting_list(
imgs,
alphastd=0.1,
eigval=np.array(self._pca_eigval).astype(np.float32),
eigvec=np.array(self._pca_eigvec).astype(np.float32),
)
# Normalize images by mean and std.
imgs = [
cv2_transform.color_normalization(
img,
np.array(self._data_mean, dtype=np.float32),
np.array(self._data_std, dtype=np.float32),
)
for img in imgs
]
# Concat list of images to single ndarray.
imgs = np.concatenate([np.expand_dims(img, axis=1) for img in imgs], axis=1)
if not self._use_bgr:
# Convert image format from BGR to RGB.
imgs = imgs[::-1, ...]
imgs = np.ascontiguousarray(imgs)
imgs = torch.from_numpy(imgs)
boxes = cv2_transform.clip_boxes_to_image(
boxes[0], imgs[0].shape[1], imgs[0].shape[2]
)
return imgs, boxes
def _images_and_boxes_preprocessing(self, imgs, boxes):
"""
This function performs preprocessing for the input images and
corresponding boxes for one clip.
Args:
imgs (tensor): the images.
boxes (ndarray): the boxes for the current clip.
Returns:
imgs (tensor): list of preprocessed images.
boxes (ndarray): preprocessed boxes.
"""
# Image [0, 255] -> [0, 1].
imgs = imgs.float()
imgs = imgs / 255.0
height, width = imgs.shape[2], imgs.shape[3]
# The format of boxes is [x1, y1, x2, y2]. The input boxes are in the
# range of [0, 1].
boxes[:, [0, 2]] *= width
boxes[:, [1, 3]] *= height
boxes = transform.clip_boxes_to_image(boxes, height, width)
if self._split == "train":
# Train split
imgs, boxes = transform.random_short_side_scale_jitter(
imgs,
min_size=self._jitter_min_scale,
max_size=self._jitter_max_scale,
boxes=boxes,
)
imgs, boxes = transform.random_crop(imgs, self._crop_size, boxes=boxes)
# Random flip.
imgs, boxes = transform.horizontal_flip(0.5, imgs, boxes=boxes)
elif self._split == "val":
# Val split
# Resize short side to crop_size. Non-local and STRG uses 256.
imgs, boxes = transform.random_short_side_scale_jitter(
imgs, min_size=self._crop_size, max_size=self._crop_size, boxes=boxes
)
# Apply center crop for val split
imgs, boxes = transform.uniform_crop(
imgs, size=self._crop_size, spatial_idx=1, boxes=boxes
)
if self._test_force_flip:
imgs, boxes = transform.horizontal_flip(1, imgs, boxes=boxes)
elif self._split == "test":
# Test split
# Resize short side to crop_size. Non-local and STRG uses 256.
imgs, boxes = transform.random_short_side_scale_jitter(
imgs, min_size=self._crop_size, max_size=self._crop_size, boxes=boxes
)
if self._test_force_flip:
imgs, boxes = transform.horizontal_flip(1, imgs, boxes=boxes)
else:
raise NotImplementedError("{} split not supported yet!".format(self._split))
# Do color augmentation (after divided by 255.0).
if self._split == "train" and self._use_color_augmentation:
if not self._pca_jitter_only:
imgs = transform.color_jitter(
imgs, img_brightness=0.4, img_contrast=0.4, img_saturation=0.4
)
imgs = transform.lighting_jitter(
imgs,
alphastd=0.1,
eigval=np.array(self._pca_eigval).astype(np.float32),
eigvec=np.array(self._pca_eigvec).astype(np.float32),
)
# Normalize images by mean and std.
imgs = transform.color_normalization(
imgs,
np.array(self._data_mean, dtype=np.float32),
np.array(self._data_std, dtype=np.float32),
)
if self._use_bgr:
# Convert image format from RGB to BGR.
# Note that Kinetics pre-training uses RGB!
imgs = imgs[:, [2, 1, 0], ...]
boxes = transform.clip_boxes_to_image(boxes, self._crop_size, self._crop_size)
return imgs, boxes
def _load_frames_decord(self, video_filename, frame_number, fps):
assert frame_number > 0
vr = VideoReader(video_filename, height=320, width=568)
frames = frame_number - np.arange(
self.cfg.DATA.NUM_FRAMES * self.cfg.DATA.SAMPLING_RATE,
step=self.cfg.DATA.SAMPLING_RATE,
)[::-1]
frames[frames < 1] = 1
frames = frames.astype(int)
video_data = vr.get_batch(frames).permute(3, 0, 1, 2)
return video_data
def _load_frames_pyav(self, video_filename, frame_number, fps):
assert frame_number > 0
vr = PyAVVideoReader(video_filename, height=320)
frames = (
frame_number
- np.arange(
self.cfg.DATA.NUM_FRAMES * self.cfg.DATA.SAMPLING_RATE,
step=self.cfg.DATA.SAMPLING_RATE,
)[::-1]
)
frames[frames < 1] = 1
frames = frames.astype(int)
imgs = vr[frames]
return imgs
def _load_frames_pytorch_video(self, video_filename, frame_number, fps):
clip_duration = (
self.cfg.DATA.NUM_FRAMES * self.cfg.DATA.SAMPLING_RATE - 1
) / fps
clip_end_sec = frame_number / fps
clip_start_sec = clip_end_sec - clip_duration
# truncate if negative timestamp
clip_start_sec = np.min(clip_start_sec, 0)
video = EncodedVideo.from_path(video_filename, decode_audio=False)
video_data = video.get_clip(clip_start_sec, clip_end_sec)["video"]
video_data = uniform_temporal_subsample(video_data, self.cfg.DATA.NUM_FRAMES)
# video_data = short_side_scale(video_data, )
return video_data
def _retry_load_images_lmdb(self, video_id, frames, retry=10, backend="pytorch"):
"""
This function is to load images with support of retrying for failed load.
Args:
keys (list): paths of images needed to be loaded.
retry (int, optional): maximum time of loading retrying. Defaults to 10.
backend (str): `pytorch` or `cv2`.
Returns:
imgs (list): list of loaded images.
"""
for i in range(retry):
imgs = []
imgs = self._hlmdb.get_batch(video_id, frames)
if all(img is not None for img in imgs):
if backend == "pytorch":
imgs = torch.as_tensor(np.stack(imgs))
return imgs
else:
logger.warn("Reading failed. Will retry.")
time.sleep(1.0)
if i == retry - 1:
raise Exception("Failed to load frames from video {}: {}".format(video_id, frames))
def _sample_frames(self, frame):
frames = (
frame
- np.arange(
self.cfg.DATA.NUM_FRAMES * self.cfg.DATA.SAMPLING_RATE,
step=self.cfg.DATA.SAMPLING_RATE,
)[::-1]
)
frames[frames < 0] = 0
frames = frames.astype(int)
return frames
def _load_annotations(self, idx):
# get the idx-th annotation
ann = self._annotations['annotations'][idx]
uid = ann['uid']
# get video_id, frame_number, gt_boxes, gt_noun_labels, gt_verb_labels and gt_ttc_targets
video_id = ann["video_uid"]
frame_number = ann['frame']
if 'objects' in ann:
gt_boxes = np.vstack([x['box'] for x in ann['objects']])
gt_noun_labels = np.array([x['noun_category_id'] for x in ann['objects']])
gt_verb_labels = np.array([x['verb_category_id'] for x in ann['objects']])
gt_ttc_targets = np.array([x['time_to_contact'] for x in ann['objects']])
else:
gt_boxes = gt_noun_labels = gt_verb_labels = gt_ttc_targets = None
frame_width, frame_height = self._annotations['videos'][video_id]['frame_width'], self._annotations['videos'][video_id]['frame_height']
fps = self._annotations['videos'][video_id]['fps']
return uid, video_id, frame_width, frame_height, frame_number, fps, gt_boxes, gt_noun_labels, gt_verb_labels, gt_ttc_targets
def _load_detections(self, uid):
# get the object detections for the current example
object_detections = self._obj_detections[uid]
if len(object_detections)>0:
pred_boxes = np.vstack([x['box'] for x in object_detections])
pred_scores = np.array([x['score'] for x in object_detections])
pred_object_labels = np.array([x['noun_category_id'] for x in object_detections])
# exclude detections below the theshold
detected = (
pred_scores
>= self.cfg.EGO4D_STA.DETECTION_SCORE_THRESH
)
pred_boxes = pred_boxes[detected]
pred_object_labels = pred_object_labels[detected]
pred_scores = pred_scores[detected]
else:
pred_boxes = np.zeros((0,4))
pred_scores = pred_object_labels = np.array([])
return pred_boxes, pred_object_labels, pred_scores
def _load_frames(self, video_id, frame_number, fps):
if self.cfg.EGO4D_STA.VIDEO_LOAD_BACKEND == 'pytorchvideo':
frames = self._load_frames_pytorch_video(join(self.cfg.EGO4D_STA.VIDEO_DIR, video_id + '.mp4'), frame_number, fps)
elif self.cfg.EGO4D_STA.VIDEO_LOAD_BACKEND == 'decord':
frames = self._load_frames_decord(join(self.cfg.EGO4D_STA.VIDEO_DIR, video_id + '.mp4'), frame_number, fps)
elif self.cfg.EGO4D_STA.VIDEO_LOAD_BACKEND == 'pyav':
frames = self._load_frames_pyav(join(self.cfg.EGO4D_STA.VIDEO_DIR, video_id + '.mp4'), frame_number, fps)
elif self.cfg.EGO4D_STA.VIDEO_LOAD_BACKEND == 'lmdb':
# sample the list of frames in the clip
#key_list = self._sample_frame_keys(video_id, frame_number)
frames_list = self._sample_frames(frame_number)
# # retrieve frames
frames = self._retry_load_images_lmdb(
video_id, frames_list, backend="cv2"
)
return frames
def _preprocess_frames_and_boxes(self, frames, boxes):
if self.cfg.EGO4D_STA.VIDEO_LOAD_BACKEND in ['pytorchvideo',"decord"]:
video_tensor = frames.permute(1, 0, 2, 3)
video_tensor, boxes = self._images_and_boxes_preprocessing(
video_tensor, boxes=boxes
)
# T C H W -> C T H W.
video_tensor = video_tensor.permute(1, 0, 2, 3)
else:
# Preprocess images and boxes
video_tensor, boxes = self._images_and_boxes_preprocessing_cv2(
frames, boxes=boxes
)
return video_tensor, boxes
def __getitem__(self, idx):
"""
Generate corresponding clips, boxes, labels and metadata for given idx.
Args:
idx (int): the video index provided by the pytorch sampler.
Returns:
uid: the unique id of the annotation
imgs: the frames sampled from the video
pred_boxes: the list of boxes detected in the current frame. These are in the resolution of the input example.
verb_label: the verb label associated to the current frame
ttc_target: the ttc target
extra_data: a dictionary containing extra data fields:
'orig_pred_boxes': boxes at the original resolution
'pred_object_scores': associated prediction scores
'pred_object_labels': associated predicted object labels
'gt_detections': dictionary containing the ground truth predictions for the current frame
"""
uid, video_id, frame_width, frame_height, frame_number, fps, gt_boxes, gt_noun_labels, gt_verb_labels, gt_ttc_targets = self._load_annotations(idx)
pred_boxes, pred_object_labels, pred_scores = self._load_detections(uid)
frames = self._load_frames(video_id, frame_number, fps)
orig_pred_boxes = pred_boxes.copy()
nn = np.array([frame_width, frame_height]*2).reshape(1,-1)
pred_boxes/=nn
if gt_boxes is None: # unlabeled example
video_tensor, pred_boxes =self._preprocess_frames_and_boxes(frames, pred_boxes)
imgs = utils.pack_pathway_output(self.cfg, video_tensor)
extra_data = {
'orig_pred_boxes': orig_pred_boxes,
'pred_object_scores': pred_scores,
'pred_object_labels': pred_object_labels
}
return uid, imgs, pred_boxes, np.array([]), np.array([]), extra_data
else:
orig_gt_boxes = gt_boxes.copy()
gt_boxes/=nn
# put all boxes together
all_boxes = np.vstack([gt_boxes, pred_boxes])
video_tensor, all_boxes =self._preprocess_frames_and_boxes(frames, | |
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
class yc_SNSSAI_identifier_nst__nst_SNSSAI_identifier(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module nst - based on the path /nst/SNSSAI-identifier. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_path_helper', '_extmethods', '__slice_service_type','__slice_differentiator',)
_yang_name = 'SNSSAI-identifier'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__slice_differentiator = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="slice-differentiator", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='string', is_config=True)
self.__slice_service_type = YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={u'URLLC': {}, u'eMBB': {}, u'mMTC': {}},), is_leaf=True, yang_name="slice-service-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='network-slice-type', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'nst', u'SNSSAI-identifier']
def _get_slice_service_type(self):
"""
Getter method for slice_service_type, mapped from YANG variable /nst/SNSSAI_identifier/slice_service_type (network-slice-type)
YANG Description: Network slice service type
"""
return self.__slice_service_type
def _set_slice_service_type(self, v, load=False):
"""
Setter method for slice_service_type, mapped from YANG variable /nst/SNSSAI_identifier/slice_service_type (network-slice-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_slice_service_type is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_slice_service_type() directly.
YANG Description: Network slice service type
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={u'URLLC': {}, u'eMBB': {}, u'mMTC': {}},), is_leaf=True, yang_name="slice-service-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='network-slice-type', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """slice_service_type must be of a type compatible with network-slice-type""",
'defined-type': "nst:network-slice-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={u'URLLC': {}, u'eMBB': {}, u'mMTC': {}},), is_leaf=True, yang_name="slice-service-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='network-slice-type', is_config=True)""",
})
self.__slice_service_type = t
if hasattr(self, '_set'):
self._set()
def _unset_slice_service_type(self):
self.__slice_service_type = YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={u'URLLC': {}, u'eMBB': {}, u'mMTC': {}},), is_leaf=True, yang_name="slice-service-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='network-slice-type', is_config=True)
def _get_slice_differentiator(self):
"""
Getter method for slice_differentiator, mapped from YANG variable /nst/SNSSAI_identifier/slice_differentiator (string)
YANG Description: Network slice differentiator
"""
return self.__slice_differentiator
def _set_slice_differentiator(self, v, load=False):
"""
Setter method for slice_differentiator, mapped from YANG variable /nst/SNSSAI_identifier/slice_differentiator (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_slice_differentiator is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_slice_differentiator() directly.
YANG Description: Network slice differentiator
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name="slice-differentiator", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='string', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """slice_differentiator must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="slice-differentiator", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='string', is_config=True)""",
})
self.__slice_differentiator = t
if hasattr(self, '_set'):
self._set()
def _unset_slice_differentiator(self):
self.__slice_differentiator = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="slice-differentiator", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='string', is_config=True)
slice_service_type = __builtin__.property(_get_slice_service_type, _set_slice_service_type)
slice_differentiator = __builtin__.property(_get_slice_differentiator, _set_slice_differentiator)
_pyangbind_elements = OrderedDict([('slice_service_type', slice_service_type), ('slice_differentiator', slice_differentiator), ])
class yc_quality_of_service_nst__nst_quality_of_service(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module nst - based on the path /nst/quality-of-service. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_path_helper', '_extmethods', '__id','__resource_type','__priority_level','__packet_delay_budget','__packet_error_rate','__default_max_data_burst',)
_yang_name = 'quality-of-service'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__priority_level = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="priority-level", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='uint16', is_config=True)
self.__default_max_data_burst = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="default-max-data-burst", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='uint16', is_config=True)
self.__packet_error_rate = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="packet-error-rate", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='uint16', is_config=True)
self.__packet_delay_budget = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="packet-delay-budget", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='uint16', is_config=True)
self.__id = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='uint16', is_config=True)
self.__resource_type = YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={u'non-GBR': {}, u'GBR': {}, u'delay-critical-GBR': {}},), is_leaf=True, yang_name="resource-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='resource-type', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'nst', u'quality-of-service']
def _get_id(self):
"""
Getter method for id, mapped from YANG variable /nst/quality_of_service/id (uint16)
YANG Description: Quality of service identifier
"""
return self.__id
def _set_id(self, v, load=False):
"""
Setter method for id, mapped from YANG variable /nst/quality_of_service/id (uint16)
If this variable is read-only (config: false) in the
source YANG file, then _set_id is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_id() directly.
YANG Description: Quality of service identifier
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='uint16', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """id must be of a type compatible with uint16""",
'defined-type': "uint16",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='uint16', is_config=True)""",
})
self.__id = t
if hasattr(self, '_set'):
self._set()
def _unset_id(self):
self.__id = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='uint16', is_config=True)
def _get_resource_type(self):
"""
Getter method for resource_type, mapped from YANG variable /nst/quality_of_service/resource_type (resource-type)
YANG Description: Quality of service resource type
"""
return self.__resource_type
def _set_resource_type(self, v, load=False):
"""
Setter method for resource_type, mapped from YANG variable /nst/quality_of_service/resource_type (resource-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_resource_type is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_resource_type() directly.
YANG Description: Quality of service resource type
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={u'non-GBR': {}, u'GBR': {}, u'delay-critical-GBR': {}},), is_leaf=True, yang_name="resource-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='resource-type', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """resource_type must be of a type compatible with resource-type""",
'defined-type': "nst:resource-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={u'non-GBR': {}, u'GBR': {}, u'delay-critical-GBR': {}},), is_leaf=True, yang_name="resource-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='resource-type', is_config=True)""",
})
self.__resource_type = t
if hasattr(self, '_set'):
self._set()
def _unset_resource_type(self):
self.__resource_type = YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={u'non-GBR': {}, u'GBR': {}, u'delay-critical-GBR': {}},), is_leaf=True, yang_name="resource-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='resource-type', is_config=True)
def _get_priority_level(self):
"""
Getter method for priority_level, mapped from YANG variable /nst/quality_of_service/priority_level (uint16)
YANG Description: Priority level of the service
"""
return self.__priority_level
def _set_priority_level(self, v, load=False):
"""
Setter method for priority_level, mapped from YANG variable /nst/quality_of_service/priority_level (uint16)
If this variable is read-only (config: false) in the
source YANG file, then _set_priority_level is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_priority_level() directly.
YANG Description: Priority level of the service
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="priority-level", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:etsi:osm:yang:nst', defining_module='nst', yang_type='uint16', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """priority_level must be of a type compatible with uint16""",
'defined-type': "uint16",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="priority-level", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, | |
# -*- coding: utf-8 -*-
# File generated according to Generator/ClassesRef/Mesh/MeshSolution.csv
# WARNING! All changes made in this file will be lost!
"""Method code available at https://github.com/Eomys/pyleecan/tree/master/pyleecan/Methods/Mesh/MeshSolution
"""
from os import linesep
from sys import getsizeof
from logging import getLogger
from ._check import check_var, raise_
from ..Functions.get_logger import get_logger
from ..Functions.save import save
from ..Functions.copy import copy
from ..Functions.load import load_init_dict
from ..Functions.Load.import_class import import_class
from ._frozen import FrozenClass
# Import all class method
# Try/catch to remove unnecessary dependencies in unused method
try:
from ..Methods.Mesh.MeshSolution.get_mesh import get_mesh
except ImportError as error:
get_mesh = error
try:
from ..Methods.Mesh.MeshSolution.get_solution import get_solution
except ImportError as error:
get_solution = error
try:
from ..Methods.Mesh.MeshSolution.get_field import get_field
except ImportError as error:
get_field = error
try:
from ..Methods.Mesh.MeshSolution.plot_mesh import plot_mesh
except ImportError as error:
plot_mesh = error
try:
from ..Methods.Mesh.MeshSolution.plot_contour import plot_contour
except ImportError as error:
plot_contour = error
try:
from ..Methods.Mesh.MeshSolution.plot_deflection import plot_deflection
except ImportError as error:
plot_deflection = error
try:
from ..Methods.Mesh.MeshSolution.plot_deflection_animated import (
plot_deflection_animated,
)
except ImportError as error:
plot_deflection_animated = error
try:
from ..Methods.Mesh.MeshSolution.plot_glyph import plot_glyph
except ImportError as error:
plot_glyph = error
try:
from ..Methods.Mesh.MeshSolution.get_group import get_group
except ImportError as error:
get_group = error
from numpy import array, array_equal
from ._check import InitUnKnowClassError
from .Mesh import Mesh
from .Solution import Solution
class MeshSolution(FrozenClass):
"""Abstract class to associate a mesh with one or several solutions"""
VERSION = 1
# Check ImportError to remove unnecessary dependencies in unused method
# cf Methods.Mesh.MeshSolution.get_mesh
if isinstance(get_mesh, ImportError):
get_mesh = property(
fget=lambda x: raise_(
ImportError("Can't use MeshSolution method get_mesh: " + str(get_mesh))
)
)
else:
get_mesh = get_mesh
# cf Methods.Mesh.MeshSolution.get_solution
if isinstance(get_solution, ImportError):
get_solution = property(
fget=lambda x: raise_(
ImportError(
"Can't use MeshSolution method get_solution: " + str(get_solution)
)
)
)
else:
get_solution = get_solution
# cf Methods.Mesh.MeshSolution.get_field
if isinstance(get_field, ImportError):
get_field = property(
fget=lambda x: raise_(
ImportError(
"Can't use MeshSolution method get_field: " + str(get_field)
)
)
)
else:
get_field = get_field
# cf Methods.Mesh.MeshSolution.plot_mesh
if isinstance(plot_mesh, ImportError):
plot_mesh = property(
fget=lambda x: raise_(
ImportError(
"Can't use MeshSolution method plot_mesh: " + str(plot_mesh)
)
)
)
else:
plot_mesh = plot_mesh
# cf Methods.Mesh.MeshSolution.plot_contour
if isinstance(plot_contour, ImportError):
plot_contour = property(
fget=lambda x: raise_(
ImportError(
"Can't use MeshSolution method plot_contour: " + str(plot_contour)
)
)
)
else:
plot_contour = plot_contour
# cf Methods.Mesh.MeshSolution.plot_deflection
if isinstance(plot_deflection, ImportError):
plot_deflection = property(
fget=lambda x: raise_(
ImportError(
"Can't use MeshSolution method plot_deflection: "
+ str(plot_deflection)
)
)
)
else:
plot_deflection = plot_deflection
# cf Methods.Mesh.MeshSolution.plot_deflection_animated
if isinstance(plot_deflection_animated, ImportError):
plot_deflection_animated = property(
fget=lambda x: raise_(
ImportError(
"Can't use MeshSolution method plot_deflection_animated: "
+ str(plot_deflection_animated)
)
)
)
else:
plot_deflection_animated = plot_deflection_animated
# cf Methods.Mesh.MeshSolution.plot_glyph
if isinstance(plot_glyph, ImportError):
plot_glyph = property(
fget=lambda x: raise_(
ImportError(
"Can't use MeshSolution method plot_glyph: " + str(plot_glyph)
)
)
)
else:
plot_glyph = plot_glyph
# cf Methods.Mesh.MeshSolution.get_group
if isinstance(get_group, ImportError):
get_group = property(
fget=lambda x: raise_(
ImportError(
"Can't use MeshSolution method get_group: " + str(get_group)
)
)
)
else:
get_group = get_group
# save and copy methods are available in all object
save = save
copy = copy
# get_logger method is available in all object
get_logger = get_logger
def __init__(
self,
label=None,
mesh=-1,
is_same_mesh=True,
solution=-1,
group=None,
dimension=2,
init_dict=None,
init_str=None,
):
"""Constructor of the class. Can be use in three ways :
- __init__ (arg1 = 1, arg3 = 5) every parameters have name and default values
for pyleecan type, -1 will call the default constructor
- __init__ (init_dict = d) d must be a dictionnary with property names as keys
- __init__ (init_str = s) s must be a string
s is the file path to load
ndarray or list can be given for Vector and Matrix
object or dict can be given for pyleecan Object"""
if init_str is not None: # Load from a file
init_dict = load_init_dict(init_str)[1]
if init_dict is not None: # Initialisation by dict
assert type(init_dict) is dict
# Overwrite default value with init_dict content
if "label" in list(init_dict.keys()):
label = init_dict["label"]
if "mesh" in list(init_dict.keys()):
mesh = init_dict["mesh"]
if "is_same_mesh" in list(init_dict.keys()):
is_same_mesh = init_dict["is_same_mesh"]
if "solution" in list(init_dict.keys()):
solution = init_dict["solution"]
if "group" in list(init_dict.keys()):
group = init_dict["group"]
if "dimension" in list(init_dict.keys()):
dimension = init_dict["dimension"]
# Set the properties (value check and convertion are done in setter)
self.parent = None
self.label = label
self.mesh = mesh
self.is_same_mesh = is_same_mesh
self.solution = solution
self.group = group
self.dimension = dimension
# The class is frozen, for now it's impossible to add new properties
self._freeze()
def __str__(self):
"""Convert this object in a readeable string (for print)"""
MeshSolution_str = ""
if self.parent is None:
MeshSolution_str += "parent = None " + linesep
else:
MeshSolution_str += (
"parent = " + str(type(self.parent)) + " object" + linesep
)
MeshSolution_str += 'label = "' + str(self.label) + '"' + linesep
if len(self.mesh) == 0:
MeshSolution_str += "mesh = []" + linesep
for ii in range(len(self.mesh)):
tmp = self.mesh[ii].__str__().replace(linesep, linesep + "\t") + linesep
MeshSolution_str += "mesh[" + str(ii) + "] =" + tmp + linesep + linesep
MeshSolution_str += "is_same_mesh = " + str(self.is_same_mesh) + linesep
if len(self.solution) == 0:
MeshSolution_str += "solution = []" + linesep
for ii in range(len(self.solution)):
tmp = self.solution[ii].__str__().replace(linesep, linesep + "\t") + linesep
MeshSolution_str += "solution[" + str(ii) + "] =" + tmp + linesep + linesep
if len(self.group) == 0:
MeshSolution_str += "group = dict()"
for key, obj in self.group.items():
MeshSolution_str += (
"group[" + key + "] = " + str(self.group[key]) + linesep + linesep
)
MeshSolution_str += "dimension = " + str(self.dimension) + linesep
return MeshSolution_str
def __eq__(self, other):
"""Compare two objects (skip parent)"""
if type(other) != type(self):
return False
if other.label != self.label:
return False
if other.mesh != self.mesh:
return False
if other.is_same_mesh != self.is_same_mesh:
return False
if other.solution != self.solution:
return False
if (other.group is None and self.group is not None) or (
other.group is not None and self.group is None
):
return False
elif other.group is None and self.group is None:
pass
elif len(other.group) != len(self.group):
return False
else:
for key in other.group:
if key not in self.group or not array_equal(
other.group[key], self.group[key]
):
return False
if other.dimension != self.dimension:
return False
return True
def compare(self, other, name="self"):
"""Compare two objects and return list of differences"""
if type(other) != type(self):
return ["type(" + name + ")"]
diff_list = list()
if other._label != self._label:
diff_list.append(name + ".label")
if (other.mesh is None and self.mesh is not None) or (
other.mesh is not None and self.mesh is None
):
diff_list.append(name + ".mesh None mismatch")
elif self.mesh is None:
pass
elif len(other.mesh) != len(self.mesh):
diff_list.append("len(" + name + ".mesh)")
else:
for ii in range(len(other.mesh)):
diff_list.extend(
self.mesh[ii].compare(
other.mesh[ii], name=name + ".mesh[" + str(ii) + "]"
)
)
if other._is_same_mesh != self._is_same_mesh:
diff_list.append(name + ".is_same_mesh")
if (other.solution is None and self.solution is not None) or (
other.solution is not None and self.solution is None
):
diff_list.append(name + ".solution None mismatch")
elif self.solution is None:
pass
elif len(other.solution) != len(self.solution):
diff_list.append("len(" + name + ".solution)")
else:
for ii in range(len(other.solution)):
diff_list.extend(
self.solution[ii].compare(
other.solution[ii], name=name + ".solution[" + str(ii) + "]"
)
)
if (other.group is None and self.group is not None) or (
other.group is not None and self.group is None
):
diff_list.append(name + ".group None mismatch")
elif self.group is None:
pass
elif len(other.group) != len(self.group):
diff_list.append("len(" + name + ".group)")
else:
for key in other.group:
if key not in self.group or not array_equal(
other.group[key], self.group[key]
):
diff_list.append(name + ".group[" + str(key) + "]")
if other._dimension != self._dimension:
diff_list.append(name + ".dimension")
return diff_list
def __sizeof__(self):
"""Return the size in memory of the object (including all subobject)"""
S = 0 # Full size of the object
S += getsizeof(self.label)
if self.mesh is not None:
for value in self.mesh:
S += getsizeof(value)
S += getsizeof(self.is_same_mesh)
if self.solution is not None:
for value in self.solution:
S += getsizeof(value)
if self.group is not None:
for key, value in self.group.items():
S += getsizeof(value) + getsizeof(key)
S += getsizeof(self.dimension)
return S
def as_dict(self):
"""Convert this object in a json seriable dict (can be use in | |
float(k)
i += 1
cpx = self._cplex
self.cpx_adapter.multiobjsetobj(cpx._env._e, cpx._lp, objidx, objind=indices, objval=koefs,
priority=priority, weight=weight, abstol=abstol, reltol=reltol, objname=objname)
def _fast_update_linearct_coefs(self, ct_index, var_indices, coefs):
assert len(var_indices) == len(coefs)
num_coefs = len(coefs)
cpx = self._cplex
self.cpx_adapter.chgcoeflist(cpx._env._e, cpx._lp, [ct_index] * num_coefs, var_indices, coefs)
def _fast_set_rhs(self, ct_index, new_rhs):
cpx = self._cplex
self.cpx_adapter.chgrhs(cpx._env._e, cpx._lp, (ct_index,), (new_rhs,))
def _fast_set_col_name(self, col_index, new_name):
cpx = self._cplex
self.cpx_adapter.chgcolname(cpx._env._e, cpx._lp, [col_index], [new_name or ""])
def _fast_add_piecewise_constraint1290(self, vary, varx, preslope, breaksx, breaksy, postslope, name):
cpx = self._cplex
cpx_env = cpx._env
self.cpx_adapter.addpwl(cpx_env._e, cpx._lp,
vary, varx,
preslope, postslope,
len(breaksx), breaksx, breaksy,
name, cpx_env._apienc)
# fetch number of pwls, return it -1
pw_index = self.cpx_adapter.getnumpwl(cpx_env._e, cpx._lp) - 1
return pw_index
def _fast_add_piecewise_constraint12100(self, vary, varx, preslope, breaksx, breaksy, postslope, name):
cpx = self._cplex
cpx_env = cpx._env
self.cpx_adapter.addpwl(cpx_env._e, cpx._lp,
vary, varx,
preslope, postslope,
len(breaksx), breaksx, breaksy,
name)
# fetch number of pwls, return it -1
pw_index = self.cpx_adapter.getnumpwl(cpx_env._e, cpx._lp) - 1
return pw_index
# ---
def _switch_linear_expr(self, index, old_expr, new_expr):
# INTERNAL
# clears all linear coefs from an old expr, then set the new coefs
old_indices = [dv._index for dv in old_expr.iter_variables()]
old_zeros = [0] * len(old_indices)
self._fast_update_linearct_coefs(index, old_indices, old_zeros)
# now set new expr coefs
cpxlin = self.make_cpx_linear_from_one_expr(expr=new_expr)
self._fast_update_linearct_coefs(index, cpxlin[0], cpxlin[1])
def _switch_linear_exprs2(self, ct_index, old_left, old_right, new_left, new_right):
if old_left and old_right:
# step 1 zap old coefs to zero (with repeats?)
zap_indices = [dv._index for dv in old_left.iter_variables()]
zap_indices += [dv._index for dv in old_right.iter_variables()]
else:
zap_indices = None # self._fast_get_row_vars(ct_index)
if zap_indices:
self._fast_update_linearct_coefs(ct_index, zap_indices, [0] * len(zap_indices))
# step 2: install new coefs
new_ct_lin = self.make_cpx_linear_from_exprs(new_left, new_right)
self._fast_update_linearct_coefs(ct_index, new_ct_lin[0], new_ct_lin[1])
def update_linear_constraint(self, ct, event, *args):
ct_index = ct.index
assert ct_index >= 0
assert event
updated = False
if event is upd.ConstraintSense:
new_ct_cpxtype = args[0]._cplex_code
self._cplex.linear_constraints.set_senses(ct_index, new_ct_cpxtype)
updated = True
else:
if event in (upd.LinearConstraintCoef, upd.LinearConstraintGlobal):
if args:
self._switch_linear_exprs2(ct_index,
old_left=ct._left_expr, old_right=ct._right_expr,
new_left=args[0], new_right=args[1])
else:
self._switch_linear_exprs2(ct_index,
old_left=None, old_right=None,
new_left=ct._left_expr, new_right=ct._right_expr)
updated = True
if event in (upd.LinearConstraintRhs, upd.LinearConstraintGlobal):
if args:
new_ct_rhs = self.make_cpx_ct_rhs_from_exprs(left_expr=args[0], right_expr=args[1])
else:
new_ct_rhs = ct.cplex_num_rhs()
self._fast_set_rhs(ct_index, new_rhs=new_ct_rhs)
updated = True
if not updated: # pragma: no cover
self._unexpected_event(event, msg='update_linear-constraint')
def update_range_constraint(self, rngct, event, *args):
self._resync_if_needed()
rng_index = rngct.index
assert rng_index >= 0
cpx_linear = self._cplex.linear_constraints
if event == upd.RangeConstraintBounds:
new_lb, new_ub = args
offset = rngct.expr.get_constant()
cpx_rhs_value = new_ub - offset
msg = lambda: "Cannot update range values of {0!s} to [{1}..{2}]: domain is infeasible".format(rngct,
new_lb,
new_ub)
cpx_range_value = RangeConstraint.static_cplex_range_value(self._model, new_lb, new_ub, msg)
cpx_linear.set_rhs(rng_index, cpx_rhs_value)
cpx_linear.set_range_values(rng_index, cpx_range_value)
elif event == upd.RangeConstraintExpr:
old_expr = rngct.expr
new_expr = args[0]
if old_expr.get_constant() != new_expr.get_constant():
# need to change rhs but *not* range (ub-lb)
cpx_rhs_value = rngct.ub - new_expr.get_constant()
# TODO: check positive??
cpx_linear.set_rhs(rng_index, cpx_rhs_value)
# change expr linear components anyway
self._switch_linear_expr(rng_index, old_expr, new_expr)
else: # pragma: no cover
self._unexpected_event(event, msg='update_range_constraints')
def update_quadratic_constraint(self, qct, event, *args):
self._model.fatal('CPLEX cannot modify quadratic constraint: {0!s}', qct)
def update_extra_constraint(self, lct, qualifier, *args):
self._model.fatal('CPLEX cannot modify {1}: {0!s}', lct, qualifier)
def update_logical_constraint(self, lgct, event, *args):
self._resync_if_needed()
if isinstance(lgct, IndicatorConstraint):
self._model.fatal('CPLEX cannot modify linear constraint of indicator: ({0!s})', lgct)
elif isinstance(lgct, EquivalenceConstraint):
if not lgct.is_generated():
self._model.fatal('CPLEX cannot modify linear constraint used in equivalence: ({0!s})', lgct)
else:
self._model.fatal('Using the truth value of the constraint: ({0!s}) makes the constraint immutable',
lgct.linear_constraint)
else: # pragma: no cover
self._model.fatal('Unexpected type for logical constraint: {0!r}', lgct)
def update_constraint(self, ct, event, *args):
self._resync_if_needed()
if event and ct.index >= 0:
scope = ct.cplex_scope
if scope == CplexScope.LINEAR_CT_SCOPE:
self.update_linear_constraint(ct, event, *args)
elif scope == CplexScope.IND_CT_SCOPE:
self.update_logical_constraint(ct, event, *args)
elif scope == CplexScope.QUAD_CT_SCOPE:
self.update_quadratic_constraint(ct, event, *args)
else:
self._model.fatal('Unexpected scope in update_constraint: {0!r}', scope)
def set_objective_sense(self, sense):
self._resync_if_needed()
# --- set sense
self._cplex.objective.set_sense(sense.cplex_coef)
def _clear_objective_from_cplex(self, cpxobj):
self._clear_linear_objective_from_cplex(cpxobj)
self._clear_quad_objective_from_cplex(cpxobj)
def _clear_multiobj_from_cplex(self, cpx_multiobj):
numobj = cpx_multiobj.get_num()
for objidx in range(numobj):
cpx_linear = cpx_multiobj.get_linear(objidx)
zap_linear = [(idx, 0) for idx, k in enumerate(cpx_linear) if k]
if zap_linear:
cpx_multiobj.set_linear(objidx, zap_linear)
def _clear_linear_objective_from_cplex(self, cpxobj):
# clear linear part
cpx_linear = cpxobj.get_linear()
zap_linear = [(idx, 0) for idx, k in enumerate(cpx_linear) if k]
if zap_linear:
cpxobj.set_linear(zap_linear)
def _clear_quad_objective_from_cplex(self, cpxobj):
# quadratic
if cpxobj.get_num_quadratic_variables():
# need to check before calling get_quadratic() on non-qp -> crash
cpx_quads = cpxobj.get_quadratic()
if cpx_quads:
# reset all vars to zero
nb_vars = self._model.number_of_variables
zap_quads = [0.0] * nb_vars # needs a 0.0 as cplex explicitly tests for double...
cpxobj.set_quadratic(zap_quads)
def update_objective(self, expr, event, *args):
self._resync_if_needed()
cpxobj = self._cplex.objective
if event is upd.ExprConstant:
# update the constant
self._cplex.objective.set_offset(expr.constant)
elif event in frozenset([upd.LinExprCoef, upd.LinExprGlobal]):
self._clear_linear_objective_from_cplex(cpxobj)
self._set_linear_objective_coefs(cpxobj, linexpr=expr.get_linear_part())
if event is upd.LinExprGlobal:
cpxobj.set_offset(expr.constant)
elif event is upd.QuadExprQuadCoef:
# clear quad, set quad
self._clear_quad_objective_from_cplex(cpxobj)
self._set_quadratic_objective_coefs(cpxobj, expr)
elif event is upd.QuadExprGlobal:
# clear all
self._clear_linear_objective_from_cplex(cpxobj)
# set all
self._set_linear_objective_coefs(cpxobj, linexpr=expr.get_linear_part())
self._clear_quad_objective_from_cplex(cpxobj)
self._set_quadratic_objective_coefs(cpxobj, expr)
cpxobj.set_offset(expr.constant)
else: # pragma: no cover
self._unexpected_event(event, msg='update_objective')
def set_objective_expr(self, new_objexpr, old_objexpr):
self._resync_if_needed()
cpx_objective = self._cplex.objective
# old objective
if old_objexpr is new_objexpr:
# cannot use the old expression for clearing, it has been modified
self._clear_objective_from_cplex(cpxobj=cpx_objective)
elif old_objexpr is not None:
self._clear_objective(old_objexpr)
else:
# no clearing
pass
# # if a multi-objective has been defined, clear it
# cpx_multiobj = self._cplex.multiobj
# if cpx_multiobj is not None:
# self._clear_multiobj_from_cplex(cpx_multiobj=cpx_multiobj)
# --- set offset
cpx_objective.set_offset(float(new_objexpr.get_constant()))
# --- set coefficients
if new_objexpr.is_quad_expr():
self._fast_set_quadratic_objective(quad_expr=new_objexpr)
self._fast_set_linear_objective(new_objexpr.linear_part)
else:
self._fast_set_linear_objective(linexpr=new_objexpr)
def set_multi_objective_tolerances(self, abstols, reltols):
self._check_multi_objective_support()
cpx = self._cplex
if abstols is not None:
for obj_idx, abstol in enumerate(abstols):
assert abstol >= 0
cpx.multiobj.set_abstol(obj_idx, abstol=abstol)
if reltols is not None:
for obj_idx, reltol in enumerate(reltols):
assert reltol >= 0
cpx.multiobj.set_reltol(obj_idx, reltol=reltol)
def set_multi_objective_exprs(self, new_multiobjexprs, old_multiobjexprs, multiobj_params=None,
priorities=None, weights=None, abstols=None, reltols=None, objnames=None):
self._check_multi_objective_support()
cpx_multiobj = self._cplex.multiobj
if old_multiobjexprs:
self._clear_multiobj_from_cplex(cpx_multiobj=cpx_multiobj)
# --- set number of objectives
cpx_multiobj.set_num(len(new_multiobjexprs))
for objidx, new_objexpr in enumerate(new_multiobjexprs):
# --- set offset
cpx_multiobj.set_offset(objidx, float(new_objexpr.get_constant()))
# --- set coefficients
weight = self.DEFAULT_CPX_NO_WEIGHT_CHANGE
priority = self.DEFAULT_CPX_NO_PRIORITY_CHANGE
abstol = self.DEFAULT_CPX_NO_ABSTOL_CHANGE
reltol = self.DEFAULT_CPX_NO_RELTOL_CHANGE
objname = None
if priorities is not None and len(priorities) >= objidx + 1:
priority = priorities[objidx]
if weights is not None and len(weights) >= objidx + 1:
weight = weights[objidx]
if abstols is not None and len(abstols) >= objidx + 1:
abstol = abstols[objidx]
if reltols is not None and len(reltols) >= objidx + 1:
reltol = reltols[objidx]
if objnames is not None and len(objnames) >= objidx + 1:
objname = objnames[objidx]
self._fast_set_linear_multiobj(objidx, linexpr=new_objexpr,
weight=weight, priority=priority,
abstol=abstol, reltol=reltol,
objname=objname)
def _set_linear_objective_coefs(self, cpx_objective, linexpr):
# NOTE: convert to float as numpy doubles will crash cplex....
# index_coef_seq = [(dv._index, float(k)) for dv, k in linexpr.iter_terms()]
# if index_coef_seq:
# # if list is empty, cplex will crash.
# cpx_objective.set_linear(index_coef_seq)
self._fast_set_linear_objective(linexpr)
def _set_quadratic_objective_coefs(self, cpx_objective, quad_expr):
quad_obj_triplets = [(qv1._index, qv2._index, 2 * qk if qv1 is qv2 else qk) for qv1, qv2, qk in
quad_expr.iter_quad_triplets()]
if quad_obj_triplets:
# if list is empty, cplex will crash.
cpx_objective.set_quadratic_coefficients(quad_obj_triplets)
def _clear_objective(self, expr):
# INTERNAL
self._resync_if_needed()
if expr.is_constant():
pass # resetting offset will do.
elif expr.is_quad_expr():
# 1. reset quad part
cpx_objective = self._cplex.objective
# -- set quad coeff to 0 for all quad variable pairs
quad_reset_triplets = [(qvp.first._index, qvp.second._index, 0) for qvp, qk in expr.iter_quads()]
if quad_reset_triplets:
cpx_objective.set_quadratic_coefficients(quad_reset_triplets)
# 2. reset linear part
self._clear_linear_objective(expr.linear_part)
else:
self._clear_linear_objective(expr)
def _clear_multiobj(self, exprs):
# INTERNAL
self._resync_if_needed()
for objidx, expr in enumerate(exprs):
if expr.is_constant():
pass # resetting offset will do.
else:
self._clear_linear_multiobj(objidx, expr)
def _clear_linear_objective(self, linexpr):
# compute the sequence of var indices, then an array of zeroes
size = linexpr.number_of_terms()
if size:
indices = [-1] * size
i = 0
for dv, _ in linexpr.iter_terms():
indices[i] = dv._index
i += 1
zeros = [0] * size
cpx = self._cplex
self.cpx_adapter.chgobj(cpx._env._e, cpx._lp, indices, zeros)
def _clear_linear_multiobj(self, objidx, linexpr):
# compute the sequence of var indices, then an array of zeroes
size = linexpr.number_of_terms()
if size:
indices = [-1] * size
i = 0
for dv, _ in linexpr.iter_terms():
indices[i] = dv._index
i += 1
zeros = [0] * size
cpx = self._cplex
self.cpx_adapter.multiobjsetobj(cpx._env._e, cpx._lp, objidx, objind=indices, objval=zeros)
@staticmethod
def status2string(cplex_module, cpx_status): # pragma: no cover
''' Converts a CPLEX integer status value to a string'''
return cplex_module._internal._subinterfaces.SolutionInterface.status.__getitem__(cpx_status)
# Moved to | |
import numpy as np
import six
import logging
import os
import networkx as nx
from lxml import etree
from sumolib import checkBinary
from sumolib.net import readNet
import traci
from trafficgraphnn.genconfig import ConfigGenerator
from trafficgraphnn.utils import (
parse_detector_output_xml, parse_tls_output_xml, iterfy)
_logger = logging.getLogger(__name__)
if six.PY2:
try:
import subprocess32 as subprocess
except ImportError:
import subprocess
else:
import subprocess
class SumoNetwork(object):
def __init__(
self, netfile, lanewise=True, undirected_graph=False,
routefile=None, addlfiles=None, binfile='sumo'
):
self.netfile = netfile
self.net = readNet(netfile)
self.undirected_graph = undirected_graph
self.lanewise = lanewise
self.routefile = routefile
self.data_dfs = []
self.detector_def_files = []
self.tls_output_def_files = []
self.other_addl_files = []
if isinstance(addlfiles, six.string_types):
addlfiles = [addlfiles]
self.additional_files = addlfiles or []
self.classify_additional_files()
self.tls_list = self.net.getTrafficLights()
# tl.getLinks() returns a dict with a consistent ordering of movements
self.config_gen = self.get_config_generator()
self.binfile = checkBinary(binfile)
self.reset_graph()
@property
def net_dir(self):
return os.path.dirname(self.netfile)
@classmethod
def from_gen_config(
cls, config_gen, lanewise=True, undirected_graph=False,
):
return cls(
config_gen.net_output_file, lanewise=lanewise,
undirected_graph=undirected_graph, routefile=config_gen.routefile,
addlfiles=(
list(iterfy(config_gen.detector_def_files))
+ list(iterfy(config_gen.non_detector_addl_files))),
)
@classmethod
def from_preexisting_directory(
cls, directory, lanewise=True, undirected_graph=False):
net_name = os.path.basename(os.path.normpath(directory))
net_file = os.path.join(directory, net_name + '.net.xml')
addlfiles = [os.path.join(directory, net_name + '_e1.add.xml'),
os.path.join(directory, net_name + '_e2.add.xml'),
os.path.join(directory, 'tls_output.add.xml')]
routefile = os.path.join(directory, net_name + '_rand.routes.xml')
return cls(
net_file, lanewise=lanewise, undirected_graph=undirected_graph,
routefile=routefile, addlfiles=addlfiles)
def classify_additional_files(self):
for addlfile in self.additional_files:
tree = etree.iterparse(addlfile, tag=['e1Detector', 'inductionLoop',
'e2Detector', 'laneAreaDetector',
'timedEvent',])
_, element = next(tree)
if element.tag in ['e1Detector', 'inductionLoop',
'e2Detector', 'laneAreaDetector']:
if addlfile not in self.detector_def_files:
self.detector_def_files.append(addlfile)
elif element.tag == 'timedEvent' and element.get('type') == 'SaveTLSSwitchTimes':
if addlfile not in self.tls_output_def_files:
self.tls_output_def_files.append(addlfile)
else:
if addlfile not in self.other_addl_files:
self.other_addl_files.append(addlfile)
element.clear()
def set_routefile(self, routefile):
assert os.path.exists(routefile)
self.routefile = routefile
def add_additional_file(self, addlfile):
assert os.path.exists(addlfile)
self.additional_files.append(addlfile)
self.classify_additional_files()
def clear_additional_files(self):
self.additional_files = []
self.detector_def_files = []
self.tls_output_def_files = []
self.other_addl_files = []
def get_sumo_command(self, with_bin_file=True, queue_output_file=None,
seed=None, extra_args=None):
if self.routefile is None:
raise ValueError('Route file not set.')
sumo_args = [
'--net-file', self.netfile,
'--route-files', self.routefile,
'--no-step-log', # remove progress bar
'--collision.action', 'none', # don't don't teleport vehicles when they collide
'--time-to-teleport', '-1', # remove teleporting when vehicles queue for extended periods,
'--device.rerouting.probability', '.5', # give cars the ability to reroute themselves so queues won't grow unboundedly
'--device.rerouting.period', '60',
]
if extra_args is not None:
for arg in extra_args:
sumo_args.extend(iterfy(arg))
if len(self.additional_files) > 0:
sumo_args.extend(
['--additional-files', ','.join(self.additional_files)]
)
if queue_output_file is not None:
sumo_args.extend(
['--queue-output', queue_output_file]
)
if seed is not None:
assert type(seed) in six.integer_types
else:
_logger.warning('Seed not set, SUMO seed will be random.')
# assume sumo uses basic C 16-bit integers
seed = np.random.randint(np.iinfo(np.int16).max)
sumo_args.extend(['--seed', str(seed)])
if self.binfile == 'sumo-gui':
sumo_args.extend(['--start', '--quit'])
if with_bin_file:
return [self.binfile] + sumo_args
else:
return sumo_args # used for passing to traci.load()
def start(self):
traci.start(self.get_sumo_command())
def run(self, return_output=False, extra_args=None, **kwargs):
out = subprocess.check_output(
self.get_sumo_command(extra_args=extra_args, **kwargs))
if out is not None and len(out) > 0:
_logger.info('sumo returned: %s', out)
elif len(out) == 0:
_logger.info('sumo completed.')
if return_output:
return out
def sorted_lanes_for_edge(self, edge_id):
lanes = self.net.getEdge(edge_id).getLanes()
lanes.sort(key=lambda x: x.getIndex())
return [lane.getID() for lane in lanes]
def reset_graph(self, undirected=None, lanewise=None):
if undirected is not None:
self.undirected_graph = undirected
if lanewise is not None:
self.lanewise = lanewise
if self.lanewise:
self.graph = get_lane_graph(
self.netfile, undirected=self.undirected_graph,
detector_def_files=self.detector_def_files,
tls_output_def_files=self.tls_output_def_files)
else:
self.graph = get_edge_graph(
self.netfile, undirected=self.undirected_graph,
additional_files=self.additional_files)
def get_graph(self):
assert self.graph is not None
return self.graph
def set_new_graph(self, new_graph):
self.graph = new_graph
def lanes_with_detectors(self):
return [lane for lane, lane_data in self.graph.nodes.data('detectors')
if lane_data is not None]
def get_neighboring_lanes(self, lane_id, include_input_lane=False):
"""Get ids of lanes in the same edge as the passed one.
:param lane_id: ID of the lane to get the neighbors of.
:type lane_id: str
:param include_input_lane: Whether to include the input lane_id in the returned list.
:type include_input_lane: bool
:raises TypeError: If lane_id is not str
:return: list of neighboring lanes
:rtype: list
"""
if not isinstance(lane_id, str):
raise TypeError('Expected str, got %s', type(lane_id))
sumolib_lane = self.net.getLane(lane_id)
parent_edge = sumolib_lane.getEdge()
ids = [lane.getID() for lane in parent_edge.getLanes()]
if not include_input_lane:
ids.remove(lane_id)
return ids
def get_lane_graph_for_neighboring_lanes(self, include_self_adjacency=True):
"""Return networkx graph with edges connecting lanes in the same road (ie same Sumo `edge').
:param include_self_adjacency: If True, include A[i,i] = 1.
:type include_self_adjacency: bool
"""
if not self.lanewise:
raise ValueError('Cannot use this method for a non-lanewise graph.')
graph_copy = self._graph_shallow_copy(no_edges=True)
for lane in self.graph.nodes:
neigh_lanes = self.get_neighboring_lanes(
lane, include_input_lane=include_self_adjacency)
for neigh in neigh_lanes:
graph_copy.add_edge(lane, neigh)
return graph_copy
def _graph_shallow_copy(self, no_edges=False):
"""Utility function: Get copy of graph without data (nodes/edges only).
:param no_edges: If True, only copy over the nodes.
:type no_edges: bool
"""
copy = self.graph.fresh_copy()
copy.add_nodes_from(self.graph.nodes())
if not no_edges:
copy.add_edges_from(self.graph.edges())
return copy
def get_lane_graph_for_conn_type(self,
edge_classes,
edge_class_field='direction'):
"""Return networkx graph with edges for only certain class(es) of connection (e.g., direction: l, r).
:param edge_classes: List of strings of class(es) requested
:type edge_classes: list
:param edge_class_field: Field name in edge attribute dict to reference.
defaults to 'direction'
:type edge_class_field: str
"""
if not self.lanewise:
raise ValueError('Cannot use this method for a non-lanewise graph.')
edge_classes = iterfy(edge_classes)
assert all([isinstance(x, str) for x in edge_classes])
graph_copy = self._graph_shallow_copy(no_edges=True)
for in_lane, out_lane, edge_class in self.graph.edges.data(edge_class_field):
if edge_class in edge_classes:
graph_copy.add_edge(in_lane, out_lane)
return graph_copy
def get_lane_graph_for_thru_movements(self):
"""Gets the networkx graph with edges only for through (straight) movements.
:return: Adjacency matrix
:rtype: networkx.DiGraph
"""
return self.get_lane_graph_for_conn_type(['s'])
def get_lane_graph_for_turn_movements(self):
"""Gets the networkx graph with edges only for turn movements.
:return: Adjacency matrix
:rtype: networkx.DiGraph
"""
return self.get_lane_graph_for_conn_type(['l', 'r'])
def get_adjacency_matrix(self, undirected=False):
"""Gets the adjacency matrix for the loaded graph.
:param undirected: Whether to return the undirected (symmetric) matrix, defaults to False
:param undirected: bool, optional
:return: Adjacency matrix
:rtype: Scipy sparse matrix
"""
graph = self.get_graph()
if undirected:
graph = graph.to_undirected(as_view=True)
A = nx.adjacency_matrix(graph)
return A
def get_config_generator(self):
config_gen = ConfigGenerator(
os.path.splitext(
os.path.splitext(
os.path.basename(self.netfile))[0])[0],
net_config_dir=os.path.dirname(
os.path.dirname(self.netfile)),
)
return config_gen
def load_data_to_graph(self, features=None):
if self.graph is None:
return
self.data_dfs = []
detectors_in_files = {}
det_to_data = {}
for node, detector_data in self.graph.nodes.data('detectors'):
if detector_data is not None:
for det_id, data in detector_data.items():
if data['file'] not in detectors_in_files.keys():
detectors_in_files[data['file']] = []
detectors_in_files[data['file']].append(det_id)
det_to_data[det_id] = data
det_to_node = self.det_to_node_dict()
for file, det_list in detectors_in_files.items():
filename = os.path.join(os.path.dirname(self.netfile), file)
if not os.path.exists(filename):
continue
df = parse_detector_output_xml(filename, det_list, features)
df['node_id'] = df.index.get_level_values('det_id')
df['node_id'].replace(det_to_node, inplace=True)
df.set_index('node_id', append=True, inplace=True)
df = df.swaplevel('det_id', 'node_id')
self.data_dfs.append(df)
for det_id in det_list:
det_data = det_to_data[det_id]
det_data['data_series'] = df.xs(det_id, level='det_id')
for node_id, data in self.graph.nodes.data():
if 'detectors' in data:
data['data_series'] = [df.xs(node_id, level='node_id')
for df in self.data_dfs]
tlses_in_files = {}
for (in_node, out_node, data
) in self.graph.edges.data('tls_output_info'):
if data is not None:
file = data['dest']
if file not in tlses_in_files:
tlses_in_files[file] = set()
tlses_in_files[file].add(data['source'])
for file, tls_list in tlses_in_files.items():
filename = os.path.join(os.path.dirname(self.netfile), file)
df = parse_tls_output_xml(filename)
for from_lane, to_lane, data in self.graph.edges.data():
if data['tls'] in tls_list:
data['switch_times'] = df.xs(
(from_lane, to_lane),
level=('fromLane', 'toLane'), drop_level=False)
self.data_dfs.append(df)
# TODO? version where graph.nodes are roads instead of lanes
def get_lane_data_and_adj_matrix(self, node_ordering=None):
"""
Returns a tuple of (A, X), where A is a Scipy sparse matrix from the
networkx graph and X is a numpy ndarray of the data.
X will have dimensions time x node x feature
:param node_ordering: (optional) Iterable of node names. If passed a
value, this function will return A and X in that order. If None
(default) will return the order determined by networkx.
:type node_ordering: Iterable
"""
raise NotImplementedError
if self.graph is None:
raise ValueError('Graph not set.')
if len(self.data_dfs) == 0:
raise ValueError('No data loaded.')
graph = self.get_graph()
A = nx.adj_matrix(graph, node_ordering)
det_to_node = self.det_to_node_dict()
def det_to_node_dict(self):
if self.graph is None:
raise ValueError("Graph not set.")
graph = self.get_graph()
det_to_node = {}
for node, det_dict in graph.nodes('detectors'):
if det_dict is not None:
for det_id, _ in det_dict.items():
det_to_node[det_id] = node
return det_to_node
def get_lane_graph(netfile,
undirected=False,
detector_def_files=None,
tls_output_def_files=None):
net = readNet(netfile)
if undirected:
graph = nx.Graph()
else:
graph = nx.DiGraph()
for edge in net.getEdges():
for lane in edge.getLanes():
graph.add_node(lane.getID(), length=lane.getLength())
tls_to_edges = {}
for node in net.getNodes():
for conn in node.getConnections():
tls_id = conn.getTLSID()
if tls_id not in tls_to_edges:
tls_to_edges[tls_id] = []
edge_from_to = (conn.getFromLane().getID(),
conn.getToLane().getID())
graph.add_edge(
*edge_from_to,
direction=conn.getDirection(),
tls=tls_id)
tls_to_edges[tls_id].append(edge_from_to)
# sanity check
tls_to_edges_2 = {
tl.getID():
[tuple([lane.getID() for lane in conn[:-1]]) for | |
activation='sigmoid')(x)
else:
x = conv3d_model2.output
x = Dense(name='emb_' + data_types[1] + self._backbone,
units=attention_size,
activation='sigmoid')(x)
encoder_outputs.append(x)
##############################################
for i in range(2, core_size):
network_inputs.append(Input(shape=data_sizes[i], name='input_' + data_types[i]))
encoder_outputs.append(self._rnn(name='enc_' + data_types[i], r_sequence=return_sequence)(network_inputs[i]))
if len(encoder_outputs) > 1:
att_enc_out = []
x = Lambda(lambda x: K.expand_dims(x, axis=1))(encoder_outputs[0])
att_enc_out.append(x) # first output is from 3d conv netwrok
x = Lambda(lambda x: K.expand_dims(x, axis=1))(encoder_outputs[1])
att_enc_out.append(x) # second output is from 3d conv netwrok
# for recurrent branches apply many-to-one attention block
for i, enc_out in enumerate(encoder_outputs[2:]):
x = attention_3d_block(enc_out, dense_size=attention_size, modality='_'+data_types[i])
x = Dropout(0.5)(x)
x = Lambda(lambda x: K.expand_dims(x, axis=1))(x)
att_enc_out.append(x)
# aplly many-to-one attention block to the attended modalities
x = Concatenate(name='concat_modalities', axis=1)(att_enc_out)
encodings = attention_3d_block(x, dense_size=attention_size, modality='_modality')
#print(encodings.shape)
#print(weights_softmax.shape)
else:
encodings = encoder_outputs[0]
model_output = Dense(1, activation='sigmoid',
name='output_dense',
activity_regularizer=regularizers.l2(0.001))(encodings)
net_model = Model(inputs=network_inputs,
outputs=model_output)
net_model.summary()
plot_model(net_model, to_file='MASK_PCPA.png')
return net_model
class MASK_PCPA_2(ActionPredict):
"""
MASK_PCPA_2: pedestrian crossing prediction combining local context with global context
early fusion (fuse POSE,BOX,SPEED)
"""
def __init__(self,
num_hidden_units=256,
cell_type='gru',
**kwargs):
"""
Class init function
Args:
num_hidden_units: Number of recurrent hidden layers
cell_type: Type of RNN cell
**kwargs: Description
"""
super().__init__(**kwargs)
# Network parameters
self._num_hidden_units = num_hidden_units
self._rnn = self._gru if cell_type == 'gru' else self._lstm
self._rnn_cell = GRUCell if cell_type == 'gru' else LSTMCell
assert self._backbone in ['c3d', 'i3d'], 'Incorrect backbone {}! Should be C3D or I3D'.format(self._backbone)
self._3dconv = C3DNet if self._backbone == 'c3d' else I3DNet
self._3dconv2 = C3DNet2 if self._backbone == 'c3d' else I3DNet
def get_data(self, data_type, data_raw, model_opts):
assert model_opts['obs_length'] == 16
model_opts['normalize_boxes'] = False
self._generator = model_opts.get('generator', False)
data_type_sizes_dict = {}
process = model_opts.get('process', True)
dataset = model_opts['dataset']
data, neg_count, pos_count = self.get_data_sequence(data_type, data_raw, model_opts)
data_type_sizes_dict['box'] = data['box'].shape[1:]
if 'speed' in data.keys():
data_type_sizes_dict['speed'] = data['speed'].shape[1:]
# Store the type and size of each image
_data = []
data_sizes = []
data_types = []
model_opts_3d = model_opts.copy()
for d_type in model_opts['obs_input_type']:
if 'local' in d_type or 'context' in d_type or 'mask' in d_type:
if self._backbone == 'c3d':
model_opts_3d['target_dim'] = (112, 112)
model_opts_3d['process'] = False
features, feat_shape = self.get_context_data(model_opts_3d, data, data_type, d_type)
elif 'pose' in d_type:
path_to_pose, _ = get_path(save_folder='poses',
dataset=dataset,
save_root_folder='data/features')
features = get_pose(data['image'],
data['ped_id'],
data_type=data_type,
file_path=path_to_pose,
dataset=model_opts['dataset'])
feat_shape = features.shape[1:]
else:
features = data[d_type]
feat_shape = features.shape[1:]
_data.append(features)
data_sizes.append(feat_shape)
data_types.append(d_type)
# create the final data file to be returned
if self._generator:
_data = (DataGenerator(data=_data,
labels=data['crossing'],
data_sizes=data_sizes,
process=process,
global_pooling=self._global_pooling,
input_type_list=model_opts['obs_input_type'],
batch_size=model_opts['batch_size'],
shuffle=data_type != 'test',
to_fit=data_type != 'test'), data['crossing']) # set y to None
else:
_data = (_data, data['crossing'])
return {'data': _data,
'ped_id': data['ped_id'],
'tte': data['tte'],
'image': data['image'],
'data_params': {'data_types': data_types, 'data_sizes': data_sizes},
'count': {'neg_count': neg_count, 'pos_count': pos_count}}
def get_model(self, data_params):
return_sequence = True
data_sizes = data_params['data_sizes']
data_types = data_params['data_types']
network_inputs = []
encoder_outputs = []
core_size = len(data_sizes)
# conv3d_model = self._3dconv()
network_inputs.append(Input(shape=data_sizes[0], name='input_' + data_types[0]))
conv3d_model = self._3dconv(input_data=network_inputs[0])
# network_inputs.append(conv3d_model.input)
attention_size = self._num_hidden_units
if self._backbone == 'i3d':
x = Flatten(name='flatten_output')(conv3d_model.output)
x = Dense(name='emb_' + self._backbone,
units=attention_size,
activation='sigmoid')(x)
else:
x = conv3d_model.output
x = Dense(name='emb_' + self._backbone,
units=attention_size,
activation='sigmoid')(x)
encoder_outputs.append(x)
# image features from mask
# conv3d_model2 = self._3dconv2()
network_inputs.append(Input(shape=data_sizes[1], name='input2_' + data_types[1]))
conv3d_model2 = self._3dconv2(input_data=network_inputs[1])
# for layer in conv3d_model2.layers:
# layer.name = layer.name + str("_2")
# network_inputs.append(conv3d_model2.input)
attention_size = self._num_hidden_units
if self._backbone == 'i3d':
x = Flatten(name='flatten_output_2')(conv3d_model2.output)
x = Dense(name='emb_' + data_types[1] + self._backbone,
units=attention_size,
activation='sigmoid')(x)
else:
x = conv3d_model2.output
x = Dense(name='emb_' + data_types[1] + self._backbone,
units=attention_size,
activation='sigmoid')(x)
encoder_outputs.append(x)
##############################################
#### to do : earlyfusion
###
earlyfusion = []
for i in range(2, core_size):
network_inputs.append(Input(shape=data_sizes[i], name='input_' + data_types[i]))
# network_inputs[i] = tf.cast(network_inputs[i], tf.int32, name='input_int32'+ data_types[i])
# result = tf.nn.conv2d(network_inputs[i], 3, [1,1,1,1],'SAME')
earlyfusion.append(network_inputs[i])
# encoder_outputs.append(
# self._rnn(name='enc_' + data_types[i], r_sequence=return_sequence)(network_inputs[i]))
# network_inputs.append(Input(shape=data_sizes[i], name='input_' + data_types[i]))
x = Concatenate(name='concat_early', axis=2)(earlyfusion)
x = tf.nn.l2_normalize(x, 2, epsilon=1e-12, name='norm_earlyfusion')
encoder_outputs.append(self._rnn(name='enc_earlyfusion', r_sequence=return_sequence)(x))
if len(encoder_outputs) > 1:
att_enc_out = []
x = Lambda(lambda x: K.expand_dims(x, axis=1))(encoder_outputs[0])
att_enc_out.append(x) # first output is from 3d conv netwrok
x = Lambda(lambda x: K.expand_dims(x, axis=1))(encoder_outputs[1])
att_enc_out.append(x) # second output is from 3d conv netwrok
# for recurrent branches apply many-to-one attention block
for i, enc_out in enumerate(encoder_outputs[2:]):
x = attention_3d_block(enc_out, dense_size=attention_size, modality='_' + data_types[i])
x = Dropout(0.5)(x)
x = Lambda(lambda x: K.expand_dims(x, axis=1))(x)
att_enc_out.append(x)
# aplly many-to-one attention block to the attended modalities
x = Concatenate(name='concat_modalities', axis=1)(att_enc_out)
encodings = attention_3d_block(x, dense_size=attention_size, modality='_modality')
# print(encodings.shape)
# print(weights_softmax.shape)
else:
encodings = encoder_outputs[0]
model_output = Dense(1, activation='sigmoid',
name='output_dense',
activity_regularizer=regularizers.l2(0.001))(encodings)
net_model = Model(inputs=network_inputs,
outputs=model_output)
net_model.summary()
plot_model(net_model, to_file='MASK_PCPA_2.png')
return net_model
class MASK_PCPA_3(ActionPredict):
"""
MASK_PCPA_3: pedestrian crossing prediction combining local context with global context
hierachical fusion (fuse POSE,BOX,SPEED)
"""
def __init__(self,
num_hidden_units=256,
cell_type='gru',
**kwargs):
"""
Class init function
Args:
num_hidden_units: Number of recurrent hidden layers
cell_type: Type of RNN cell
**kwargs: Description
"""
super().__init__(**kwargs)
# Network parameters
self._num_hidden_units = num_hidden_units
self._rnn = self._gru if cell_type == 'gru' else self._lstm
self._rnn_cell = GRUCell if cell_type == 'gru' else LSTMCell
assert self._backbone in ['c3d', 'i3d'], 'Incorrect backbone {}! Should be C3D or I3D'.format(self._backbone)
self._3dconv = C3DNet if self._backbone == 'c3d' else I3DNet
self._3dconv2 = C3DNet2 if self._backbone == 'c3d' else I3DNet
def get_data(self, data_type, data_raw, model_opts):
assert model_opts['obs_length'] == 16
model_opts['normalize_boxes'] = False
self._generator = model_opts.get('generator', False)
data_type_sizes_dict = {}
process = model_opts.get('process', True)
dataset = model_opts['dataset']
data, neg_count, pos_count = self.get_data_sequence(data_type, data_raw, model_opts)
data_type_sizes_dict['box'] = data['box'].shape[1:]
if 'speed' in data.keys():
data_type_sizes_dict['speed'] = data['speed'].shape[1:]
# Store the type and size of each image
_data = []
data_sizes = []
data_types = []
model_opts_3d = model_opts.copy()
for d_type in model_opts['obs_input_type']:
if 'local' in d_type or 'context' in d_type or 'mask' in d_type:
if self._backbone == 'c3d':
model_opts_3d['target_dim'] = (112, 112)
model_opts_3d['process'] = False
features, feat_shape = self.get_context_data(model_opts_3d, data, data_type, d_type)
elif 'pose' in d_type:
path_to_pose, _ = get_path(save_folder='poses',
dataset=dataset,
save_root_folder='data/features')
features = get_pose(data['image'],
data['ped_id'],
data_type=data_type,
file_path=path_to_pose,
dataset=model_opts['dataset'])
feat_shape = features.shape[1:]
else:
features = data[d_type]
feat_shape = features.shape[1:]
_data.append(features)
data_sizes.append(feat_shape)
data_types.append(d_type)
# create the final data file to be returned
if self._generator:
_data = (DataGenerator(data=_data,
labels=data['crossing'],
data_sizes=data_sizes,
process=process,
global_pooling=self._global_pooling,
input_type_list=model_opts['obs_input_type'],
batch_size=model_opts['batch_size'],
shuffle=data_type != 'test',
to_fit=data_type != 'test'), data['crossing']) # set y to None
else:
_data = (_data, data['crossing'])
return {'data': _data,
'ped_id': data['ped_id'],
'tte': data['tte'],
'image': data['image'],
'data_params': {'data_types': data_types, 'data_sizes': data_sizes},
'count': {'neg_count': neg_count, 'pos_count': pos_count}}
def get_model(self, data_params):
return_sequence = True
data_sizes = data_params['data_sizes']
data_types = data_params['data_types']
network_inputs = []
encoder_outputs = []
core_size = len(data_sizes)
# conv3d_model = self._3dconv()
network_inputs.append(Input(shape=data_sizes[0], name='input_' + data_types[0]))
conv3d_model = self._3dconv(input_data=network_inputs[0])
# network_inputs.append(conv3d_model.input)
attention_size = self._num_hidden_units
if self._backbone == 'i3d':
x = Flatten(name='flatten_output')(conv3d_model.output)
x = Dense(name='emb_' + self._backbone,
units=attention_size,
activation='sigmoid')(x)
else:
x = conv3d_model.output
x = Dense(name='emb_' + self._backbone,
units=attention_size,
activation='sigmoid')(x)
encoder_outputs.append(x)
# image features from mask
# conv3d_model2 = self._3dconv2()
network_inputs.append(Input(shape=data_sizes[1], name='input2_' + data_types[1]))
conv3d_model2 = self._3dconv2(input_data=network_inputs[1])
# for layer in conv3d_model2.layers:
# layer.name = layer.name + str("_2")
# network_inputs.append(conv3d_model2.input)
attention_size = self._num_hidden_units
if self._backbone == 'i3d':
x = Flatten(name='flatten_output_2')(conv3d_model2.output)
x = Dense(name='emb_' + data_types[1] + self._backbone,
units=attention_size,
activation='sigmoid')(x)
else:
x = conv3d_model2.output
x = Dense(name='emb_' + data_types[1] + self._backbone,
units=attention_size,
activation='sigmoid')(x)
encoder_outputs.append(x)
##############################################
#### to do : hierfusion
###
for i in range(2, core_size):
network_inputs.append(Input(shape=data_sizes[i], name='input_' + data_types[i]))
# network_inputs[i] = tf.cast(network_inputs[i], tf.int32, name='input_int32'+ data_types[i])
# result = tf.nn.conv2d(network_inputs[i], 3, [1,1,1,1],'SAME')
x = self._rnn(name='enc_' + data_types[2], r_sequence=return_sequence)(network_inputs[2])
# hierfusion=x
current = [x,network_inputs[3]]
x = Concatenate(name='concat_early1', axis=2)(current)
x = self._rnn(name='enc1_' + data_types[3], r_sequence=return_sequence)(x)
# hierfusion.append(x)
current = [x,network_inputs[4]]
x = Concatenate(name='concat_early2', axis=2)(current)
x = self._rnn(name='enc2_' + data_types[4], r_sequence=return_sequence)(x)
encoder_outputs.append(x)
# encoder_outputs.append(
# self._rnn(name='enc_' + data_types[i], r_sequence=return_sequence)(network_inputs[i]))
# network_inputs.append(Input(shape=data_sizes[i], name='input_' + data_types[i]))
# x = Concatenate(name='concat_early', axis=2)(earlyfusion)
# x = tf.nn.l2_normalize(x, 2, epsilon=1e-12, name='norm_earlyfusion')
# encoder_outputs.append(self._rnn(name='enc_hierfusion', r_sequence=return_sequence)(x))
if len(encoder_outputs) > 1:
att_enc_out = []
x = Lambda(lambda x: K.expand_dims(x, axis=1))(encoder_outputs[0])
att_enc_out.append(x) # first output is from 3d conv netwrok
x = Lambda(lambda x: K.expand_dims(x, axis=1))(encoder_outputs[1])
att_enc_out.append(x) # second output is from 3d conv netwrok
# for recurrent branches apply many-to-one attention block
for i, enc_out in enumerate(encoder_outputs[2:]):
x = attention_3d_block(enc_out, dense_size=attention_size, modality='final_' + data_types[i])
x = Dropout(0.5)(x)
x = Lambda(lambda x: K.expand_dims(x, axis=1))(x)
att_enc_out.append(x)
# aplly many-to-one attention block to the attended modalities
x | |
"get_membership_status",
return_value=self.member_status) as patch_getmem,\
patch.object(self.syn, "get_team_open_invitations",
return_value=[]) as patch_get_invites,\
patch.object(self.syn, "getUserProfile",
return_value=self.profile) as patch_get_profile,\
patch.object(self.syn, "send_membership_invitation",
return_value=self.response) as patch_invitation:
invite = self.syn.invite_to_team(self.team, user=self.userid)
patch_getmem.assert_called_once_with(self.userid,
self.team.id)
patch_get_profile.assert_called_once_with(self.userid)
patch_get_invites.assert_called_once_with(self.team.id)
patch_invitation.assert_not_called()
assert invite is None
def test_invite_to_team__user_openinvite(self):
"""None returned when user already has an invitation"""
self.member_status['isMember'] = False
invite_body = {'inviteeId': self.userid}
with patch.object(self.syn, "get_membership_status",
return_value=self.member_status) as patch_getmem,\
patch.object(self.syn, "get_team_open_invitations",
return_value=[invite_body]) as patch_get_invites,\
patch.object(self.syn, "getUserProfile",
return_value=self.profile) as patch_get_profile,\
patch.object(self.syn, "_delete_membership_invitation") as patch_delete,\
patch.object(self.syn, "send_membership_invitation",
return_value=self.response) as patch_invitation:
invite = self.syn.invite_to_team(self.team, user=self.userid)
patch_getmem.assert_called_once_with(self.userid,
self.team.id)
patch_get_profile.assert_called_once_with(self.userid)
patch_get_invites.assert_called_once_with(self.team.id)
patch_invitation.assert_not_called()
patch_delete.assert_not_called()
assert invite is None
def test_invite_to_team__email_openinvite(self):
"""None returned when email already has an invitation"""
invite_body = {'inviteeEmail': self.email}
with patch.object(self.syn, "get_team_open_invitations",
return_value=[invite_body]) as patch_get_invites,\
patch.object(self.syn, "_delete_membership_invitation") as patch_delete,\
patch.object(self.syn, "send_membership_invitation",
return_value=self.response) as patch_invitation:
invite = self.syn.invite_to_team(self.team, inviteeEmail=self.email)
patch_get_invites.assert_called_once_with(self.team.id)
patch_invitation.assert_not_called()
patch_delete.assert_not_called()
assert invite is None
patch_delete.assert_not_called()
def test_invite_to_team__none_matching_invitation(self):
"""Invitation sent when no matching open invitations"""
invite_body = {'inviteeEmail': self.email + "foo"}
with patch.object(self.syn, "get_team_open_invitations",
return_value=[invite_body]) as patch_get_invites,\
patch.object(self.syn, "_delete_membership_invitation") as patch_delete,\
patch.object(self.syn, "send_membership_invitation",
return_value=self.response) as patch_invitation:
invite = self.syn.invite_to_team(self.team, inviteeEmail=self.email)
patch_get_invites.assert_called_once_with(self.team.id)
patch_delete.assert_not_called()
assert invite == self.response
patch_invitation.assert_called_once()
def test_invite_to_team__force_invite(self):
"""Invitation sent when force the invite, make sure open invitation
is deleted"""
open_invitations = {'inviteeEmail': self.email, 'id': '9938'}
with patch.object(self.syn, "get_team_open_invitations",
return_value=[open_invitations]) as patch_get_invites,\
patch.object(self.syn, "_delete_membership_invitation") as patch_delete,\
patch.object(self.syn, "send_membership_invitation",
return_value=self.response) as patch_invitation:
invite = self.syn.invite_to_team(self.team, inviteeEmail=self.email, force=True)
patch_get_invites.assert_called_once_with(self.team.id)
patch_delete.assert_called_once_with(open_invitations['id'])
assert invite == self.response
patch_invitation.assert_called_once()
class TestRestCalls:
"""Verifies the behavior of the rest[METHOD] functions on the synapse client."""
@pytest.fixture(autouse=True, scope='function')
def init_syn(self, syn):
self.syn = syn
def _method_test_complete_args(self, method, body_expected):
"""Verify we pass through to the unified _rest_call helper method with explicit args"""
uri = '/bar'
body = b'foo' if body_expected else None
endpoint = 'https://foo.com'
headers = {'foo': 'bar'}
retryPolicy = {'retry_status_codes': [500]}
requests_session = create_autospec(requests.Session)
kwargs = {'stream': True}
syn_args = [uri]
if body_expected:
syn_args.append(body)
syn_kwargs = {
'endpoint': endpoint,
'headers': headers,
'retryPolicy': retryPolicy,
'requests_session': requests_session,
}
syn_kwargs.update(kwargs)
syn_method = getattr(self.syn, f"rest{method.upper()}")
with patch.object(self.syn, '_rest_call') as mock_rest_call:
response = syn_method(*syn_args, **syn_kwargs)
mock_rest_call.assert_called_once_with(
method, uri, body, endpoint, headers, retryPolicy, requests_session, **kwargs
)
return response
def _method_test_default_args(self, method):
"""Verify we pass through to the unified _rest_call helper method with default args"""
uri = '/bar'
syn_args = [uri]
if method == 'post':
# restPOST has a required body positional arg
syn_args.append(None)
syn_method = getattr(self.syn, f"rest{method.upper()}")
with patch.object(self.syn, '_rest_call') as mock_rest_call:
response = syn_method(*syn_args)
mock_rest_call.assert_called_once_with(method, uri, None, None, None, {}, None)
return response
def test_get(self):
self._method_test_complete_args('get', False)
self._method_test_default_args('get')
def test_post(self):
self._method_test_complete_args('post', True)
self._method_test_default_args('post')
def test_put(self):
self._method_test_complete_args('put', True)
self._method_test_default_args('put')
def test_delete(self):
self._method_test_complete_args('delete', False)
self._method_test_default_args('delete')
def _rest_call_test(self, requests_session=None):
"""Verifies the behavior of the unified _rest_call function"""
method = 'post'
uri = '/bar'
data = b'data'
endpoint = 'https://foo.com'
headers = {'foo': 'bar'}
retryPolicy = {'retry_status_codes': [500]}
kwargs = {'stream': True}
requests_session = requests_session or self.syn._requests_session
with patch.object(self.syn, '_build_uri_and_headers') as mock_build_uri_and_headers, \
patch.object(self.syn, '_build_retry_policy') as mock_build_retry_policy, \
patch.object(self.syn, '_handle_synapse_http_error') as mock_handle_synapse_http_error, \
patch.object(requests_session, method) as mock_requests_call:
mock_build_uri_and_headers.return_value = (uri, headers)
mock_build_retry_policy.return_value = retryPolicy
response = self.syn._rest_call(method, uri, data, endpoint, headers, retryPolicy, requests_session,
**kwargs)
mock_build_uri_and_headers.assert_called_once_with(uri, endpoint=endpoint, headers=headers)
mock_build_retry_policy.assert_called_once_with(retryPolicy)
mock_handle_synapse_http_error.assert_called_once_with(response)
mock_requests_call.assert_called_once_with(uri, data=data, headers=headers, auth=self.syn.credentials, **kwargs)
return response
def test_rest_call__default_session(self):
self._rest_call_test()
def test_rest_call__custom_session(self):
session = create_autospec(requests.Session)
self._rest_call_test(session)
def _rest_call_auth_test(self, **kwargs):
method = 'get'
uri = '/foo'
data = b'data'
endpoint = 'https://foo.com'
headers = {'foo': 'bar'}
retryPolicy = {}
requests_session = MagicMock(spec=requests.Session)
response = MagicMock(spec=requests.Response)
response.status_code = 200
requests_session.get.return_value = response
self.syn._rest_call(method, uri, data, endpoint, headers, retryPolicy, requests_session, **kwargs)
return requests_session.get.call_args_list[0][1]['auth']
def test_rest_call__default_auth(self):
"""Verify that _rest_call will use the Synapse object's credentials unless overridden"""
assert self._rest_call_auth_test() is self.syn.credentials
def test_rest_call__passed_auth(self, mocker):
"""Verify that _rest_call will use a custom auth object if passed"""
auth = MagicMock(spec=synapseclient.core.credentials.cred_data.SynapseCredentials)
assert self._rest_call_auth_test(auth=auth) is auth
class TestSetAnnotations:
@pytest.fixture(autouse=True, scope='function')
def init_syn(self, syn):
self.syn = syn
def test_not_annotation(self):
with patch.object(self.syn, "restPUT") as mock_rest_put:
# pass in non-annotation object
pytest.raises(TypeError, self.syn.set_annotations, {})
mock_rest_put.assert_not_called()
def test_with_annotations(self):
with patch.object(self.syn, "restPUT") as mock_rest_put:
mock_rest_put.return_value = {'id': 'syn123',
'etag': '82196a4c-d383-439a-a08a-c07090a8c147',
'annotations': {'foo': {'type': 'STRING', 'value': ['bar']}}}
# pass in non-annotation object
self.syn.set_annotations(Annotations('syn123', '1d6c46e4-4d52-44e1-969f-e77b458d815a', {'foo': 'bar'}))
mock_rest_put.assert_called_once_with('/entity/syn123/annotations2',
body='{"id": "syn123",'
' "etag": "1d6c46e4-4d52-44e1-969f-e77b458d815a",'
' "annotations": {"foo": {"type": "STRING", '
'"value": ["bar"]}}}')
def test_get_unparseable_config():
"""Verify that if the synapseConfig is not parseable we fail
in an expected way and surface the underlying parse error."""
config_error_msg = 'bad config'
with patch('configparser.RawConfigParser.read') as read_config:
read_config.side_effect = configparser.Error(config_error_msg)
with pytest.raises(ValueError) as cm:
Synapse(debug=False, skip_checks=True, configPath='/foo')
# underlying error should be chained
assert (
config_error_msg ==
str(cm.value.__context__))
def test_get_config_file_caching():
"""Verify we read a config file once per Synapse and are not
parsing the file multiple times just on init."""
with patch('configparser.RawConfigParser.read') as read_config:
read_config.return_value = configparser.ConfigParser()
syn1 = Synapse(debug=False, skip_checks=True, configPath='/foo')
# additional calls shouldn't be returned via a cached value
config1a = syn1.getConfigFile('/foo')
config1b = syn1.getConfigFile('/foo')
assert config1a == config1b
assert 1 == read_config.call_count
# however a new instance should not be cached
Synapse(debug=False, skip_checks=True, configPath='/foo')
assert 2 == read_config.call_count
# but an additional call on that instance should be
assert 2 == read_config.call_count
def test_max_threads_bounded(syn):
"""Verify we disallow setting max threads higher than our cap."""
syn.max_threads = client.MAX_THREADS_CAP + 1
assert syn.max_threads == client.MAX_THREADS_CAP
syn.max_threads = 0
assert syn.max_threads == 1
@patch('synapseclient.Synapse._get_config_section_dict')
def test_get_transfer_config(mock_config_dict):
"""Verify reading transfer.maxThreads from synapseConfig"""
# note that RawConfigParser lower cases its option values so we
# simulate that behavior in our mocked values here
default_values = {'max_threads': client.DEFAULT_NUM_THREADS, 'use_boto_sts_transfers': False}
for config_dict, expected_values in [
# empty values get defaults
({}, default_values),
({'max_threads': '', 'use_boto_sts': ''}, default_values),
({'max_thraeds': None, 'use_boto_sts': None}, default_values),
# explicit values should be parsed
({'max_threads': '1', 'use_boto_sts': 'True'}, {'max_threads': 1, 'use_boto_sts_transfers': True}),
({'max_threads': '7', 'use_boto_sts': 'true'}, {'max_threads': 7, 'use_boto_sts_transfers': True}),
({'max_threads': '100', 'use_boto_sts': 'false'}, {'max_threads': 100, 'use_boto_sts_transfers': False}),
]:
mock_config_dict.return_value = config_dict
syn = Synapse(skip_checks=True)
for k, v in expected_values.items():
assert v == getattr(syn, k)
# invalid value for max threads should raise an error
for invalid_max_thread_value in ('not a number', '12.2', 'true'):
mock_config_dict.return_value = {'max_threads': invalid_max_thread_value}
with pytest.raises(ValueError):
Synapse(skip_checks=True)
# invalid value for use_boto_sts should raise an error
for invalid_max_thread_value in ('not true', '1.2', '0', 'falsey'):
mock_config_dict.return_value = {'use_boto_sts': invalid_max_thread_value}
with pytest.raises(ValueError):
Synapse(skip_checks=True)
@patch('synapseclient.Synapse._get_config_section_dict')
def test_transfer_config_values_overridable(mock_config_dict):
"""Verify we can override the default transfer config values by setting them directly on the Synapse object"""
mock_config_dict.return_value = {'max_threads': 24, 'use_boto_sts': False}
syn = Synapse(skip_checks=True)
assert 24 == syn.max_threads
assert not syn.use_boto_sts_transfers
syn.max_threads = 5
syn.use_boto_sts_transfers = True
assert 5 == syn.max_threads
assert syn.use_boto_sts_transfers
def test_store__needsUploadFalse__fileHandleId_not_in_local_state(syn):
returned_file_handle = {
'id': '1234'
}
parent_id = 'syn122'
synapse_id = 'syn123'
etag = 'db9bc70b-1eb6-4a21-b3e8-9bf51d964031'
returned_bundle = {'entity': {'name': 'fake_file.txt',
'id': synapse_id,
'etag': etag,
'concreteType': 'org.sagebionetworks.repo.model.FileEntity',
'dataFileHandleId': '123412341234'},
'entityType': 'file',
'fileHandles': [{'id': '123412341234',
'concreteType': 'org.sagebionetworks.repo.model.file.S3FileHandle'}],
'annotations': {'id': synapse_id, 'etag': etag, 'annotations': {}},
}
with patch.object(syn, '_getEntityBundle', return_value=returned_bundle), \
patch.object(synapseclient.client, 'upload_file_handle', return_value=returned_file_handle), \
patch.object(syn.cache, 'contains', return_value=True), \
patch.object(syn, '_updateEntity'), \
patch.object(syn, 'set_annotations'), \
patch.object(Entity, 'create'), \
patch.object(syn, 'get'):
f = File('/fake_file.txt', parent=parent_id)
syn.store(f)
# test passes if no KeyError exception is thrown
def test_store__existing_processed_as_update(syn):
"""Test that storing an entity without its id but that matches an existing
entity bundle will be processed as an entity update"""
file_handle_id = '123412341234'
returned_file_handle = {
'id': file_handle_id
}
parent_id = 'syn122'
synapse_id = 'syn123'
etag = 'db9bc70b-1eb6-4a21-b3e8-9bf51d964031'
file_name = 'fake_file.txt'
existing_bundle_annotations = {
'foo': {
'type': 'LONG',
'value': ['1']
},
# this annotation is not included in the passed which is interpreted as a deletion
'bar': {
'type': 'LONG',
'value': ['2']
},
}
new_annotations = {
'foo': [3],
'baz': [4],
}
returned_bundle = {
'entity': {
'name': file_name,
'id': synapse_id,
'etag': etag,
'concreteType': 'org.sagebionetworks.repo.model.FileEntity',
'dataFileHandleId': file_handle_id,
},
'entityType': 'file',
'fileHandles': [
{
'id': file_handle_id,
'concreteType': 'org.sagebionetworks.repo.model.file.S3FileHandle',
}
],
'annotations': {
'id': synapse_id,
'etag': etag,
'annotations': existing_bundle_annotations
},
}
expected_update_properties = {
'id': synapse_id,
'etag': etag,
'name': file_name,
'concreteType': 'org.sagebionetworks.repo.model.FileEntity',
'dataFileHandleId': file_handle_id,
'parentId': parent_id,
'versionComment': None,
}
expected_annotations = {
'foo': [3],
'baz': [4],
}
with patch.object(syn, '_getEntityBundle') as mock_get_entity_bundle, \
patch.object(synapseclient.client, 'upload_file_handle', return_value=returned_file_handle), \
patch.object(syn.cache, 'contains', return_value=True), \
patch.object(syn, '_createEntity') as mock_createEntity, \
patch.object(syn, '_updateEntity') as mock_updateEntity, \
patch.object(syn, 'findEntityId') as mock_findEntityId, \
patch.object(syn, 'set_annotations') as mock_set_annotations, \
patch.object(Entity, 'create'), \
patch.object(syn, 'get'):
mock_get_entity_bundle.return_value = | |
import random
import logging
import discord
from discord.ext import commands
from sqlalchemy import orm
from kaztron import KazCog
from kaztron.config import SectionView
from kaztron.driver.pagination import Pagination
from kaztron.theme import solarized
from kaztron.utils.checks import mod_only
from kaztron.utils.discord import Limits
from kaztron.utils.embeds import EmbedSplitter
from kaztron.utils.logging import message_log_str
from kaztron.utils.datetime import format_datetime, format_date
from kaztron.cog.quotedb.model import Quote
from kaztron.cog.quotedb import controller as c, model
from kaztron.utils.strings import format_list
logger = logging.getLogger(__name__)
# noinspection PyUnresolvedReferences
class QuoteConfig(SectionView):
"""
:ivar grab_search_max: Maximum number of messages in history to search for the grab command.
Default: 100.
:ivar show_channel: Whether to show the channel in quote records. Default: True.
:ivar date_format: One of 'seconds', 'datetime', or 'date'. How to format the date in quote
records. Default: 'datetime'.
"""
grab_search_max: int
show_channel: bool
datetime_format: str
class QuoteCog(KazCog):
"""!kazhelp
category: Commands
brief: Capture the best moments on the server!
description: |
The Quotes Database helps you capture the best moments on the server! Store your fellow
members' funniest moments so that you can revisit them time and time again.
contents:
- quote:
- get
- list
- add
- grab
- stats
- rem
- undo
- del
"""
cog_config: QuoteConfig
QUOTES_PER_PAGE = 15
EMBED_COLOR = solarized.blue
def __init__(self, bot):
super().__init__(bot, 'quotedb', QuoteConfig)
self.cog_config.set_defaults(
grab_search_max=100,
show_channel=True,
datetime_format='datetime'
)
def date_format_validator(s: str):
if s not in ('seconds', 'datetime', 'date'):
raise ValueError(
"config quotedb:datetime_format value invalid (seconds, datetime, date): {}"
.format(s))
return s
self.cog_config.set_converters('date_format', date_format_validator, date_format_validator)
def export_kazhelp_vars(self):
return {
'grab_search_max': "{:d}".format(self.cog_config.grab_search_max)
}
def make_single_embed(self, quote: Quote,
index: int=None, total: int=None, title: str=None):
quote_str = self.format_quote(quote, show_saved=False)
if title is None:
title = discord.Embed.Empty
em = discord.Embed(title=title, description=quote_str, color=self.EMBED_COLOR)
if index is None:
index = quote.get_index() + 1
if total is None:
total = len(quote.author.quotes)
em.set_footer(text="saved by {u} | {n:d}/{total:d}"
.format(u=quote.saved_by.name, n=index, total=total))
return em
async def send_quotes_list(self,
dest: discord.Channel,
quotes: Pagination,
user: model.User):
title = "Quotes by {}".format(user.name)
footer_text = "Page {:d}/{:d}".format(quotes.page + 1, quotes.total_pages)
es = EmbedSplitter(title=title, color=self.EMBED_COLOR, auto_truncate=True)
es.set_footer(text=footer_text)
start_index, end_index = quotes.get_page_indices()
for i, quote in enumerate(quotes.get_page_records()):
# Format strings for this quote
f_name = "#{:d}".format(start_index + i + 1)
f_message = self.format_quote(quote, show_saved=False) + '\n\\_\\_\\_'
es.add_field(name=f_name, value=f_message, inline=False)
await self.send_message(dest, embed=es)
def format_quote(self, quote: Quote, show_saved=True):
s_fmt = "[{0}] <#{1}> <{2}> {3}" if self.cog_config.show_channel else "[{0}] <{2}> {3}"
if self.cog_config.datetime_format == 'seconds':
timestamp_str = format_datetime(quote.timestamp, seconds=True)
elif self.cog_config.datetime_format == 'datetime':
timestamp_str = format_datetime(quote.timestamp, seconds=False)
elif self.cog_config.datetime_format == 'date':
timestamp_str = format_date(quote.timestamp)
else:
raise RuntimeError("Invalid date_format??")
s = s_fmt.format(
timestamp_str,
quote.channel_id,
quote.author.mention,
quote.message
)
if show_saved:
s += "\n*(saved by {})*".format(quote.saved_by.name)
return s
@commands.group(aliases=['quotes'], pass_context=True, invoke_without_command=True,
ignore_extra=False)
async def quote(self, ctx: commands.Context, user: str=None, *, search: str=None):
"""!kazhelp
description: >
Retrieve a quote matching a user and/or text search. Returns a random quote among all
matching results.
TIP: To search for a quote by index number, use {{!quote get}}.
parameters:
- name: user
type: "@user or string or \\"all\\""
optional: true
description: >
The user to find a quote for. This can be an @mention, user ID, part
of their name or nickname to search, or the special string "all" to find any user
(i.e. search only by keyword).
- name: search
type: string
optional: true
description: The text to search.
examples:
- command: .quote
description: Find a random quote.
- command: .quote Jane
description: Find a quote from any user whose name/nickname contains "Jane".
- command: .quote @JaneDoe#0921 flamingo
description: Find a quote by JaneDoe containing "flamingo".
- command: .quote Jane flamingo
description: Find a quote both matching user "Jane" and containing "flamingo".
"""
if user:
try:
db_user = c.query_user(self.server, user)
except ValueError: # not a valid user ID format
if user != 'all':
db_user = c.search_users(user)
else:
db_user = None
db_records = c.search_quotes(search, db_user)
quote = db_records[random.randint(0, len(db_records) - 1)]
logger.debug("Selected quote: {!r}".format(quote))
else:
quote = c.random_quote()
logger.info("Selected random quote id={:d} from all users".format(quote.quote_id))
number = quote.get_index() + 1
len_recs = len(quote.author.quotes)
em = self.make_single_embed(quote, index=number, total=len_recs)
await self.bot.say(embed=em)
@quote.command(name='get', pass_context=True)
async def quote_get(self, ctx: commands.Context, user: str, number: int):
"""!kazhelp
description: |
Retrieve a quote by index.
parameters:
- name: user
type: "@user"
description: >
The user to find a quote for. Should be an @mention or a discord ID.
- name: number
type: number
optional: true
description: >
The ID number of the quote to find (starting from 1), as shown by the {{!quote}} and
{{!quote list}} commands.
examples:
- command: .quote @JaneDoe#0921
description: Find a random quote by JaneDoe.
- command: .quote @JaneDoe#0921 4
description: Find the 4th quote by JaneDoe.
"""
db_user = c.query_user(self.server, user)
len_recs = len(db_user.quotes)
# no quotes for this user
if len_recs == 0:
logger.warning("User has no quotes.")
await self.bot.say("Sorry, {} has no quotes!".format(db_user.name))
return
logger.info("Requested quote {:d} by user {!r}".format(number, db_user))
if number < 1 or number > len_recs:
logger.warning("Invalid index {:d}".format(number))
await self.bot.say(
"Oops, I can't get quote {:d} for {}! Valid quotes are 1 to {:d}"
.format(number, db_user.name, len_recs))
return
quote = db_user.quotes[number - 1]
em = self.make_single_embed(quote, number, len_recs)
await self.bot.say(embed=em)
@quote.command(name='list', pass_context=True, ignore_extra=False)
async def quote_list(self, ctx: commands.Context, user: str, page: int=None):
"""!kazhelp
description: Retrieve a list of quotes. Always PMed.
parameters:
- name: user
type: "@user"
description: >
The user to find a quote for. Should be an @mention or a discord ID.
- name: page
type: number
optional: true
default: last page (most recent)
description: The page number to show, if there are more than 1 page of quotes.
examples:
- command: .quote list @JaneDoe#0921
description: List all quotes by JaneDoe.
- command: .quote list @JaneDoe#0921 4
description: List the 4th page of quotes by JaneDoe.
"""
db_user = c.query_user(self.server, user)
if len(db_user.quotes) == 0:
logger.warning("User has no quotes.")
await self.bot.say("Sorry, {} has no quotes!".format(db_user.name))
return
paginator = Pagination(db_user.quotes, self.QUOTES_PER_PAGE, align_end=True)
if page is not None:
paginator.page = max(0, min(paginator.total_pages - 1, page-1))
await self.send_quotes_list(ctx.message.author, paginator, db_user)
@quote.command(name='add', pass_context=True, no_pm=True)
async def quote_add(self, ctx: commands.Context, user: str, *, message: str):
"""!kazhelp
description: |
Add a new quote manually.
TIP: To automatically find and add a recent message, use {{!quote grab}}.
parameters:
- name: user
type: "@user"
description: >
The user being quoted. Should be an @mention or a discord ID.
- name: message
type: string
description: The quote text to add.
examples:
- command: .quote add @JaneDoe#0921 Ready for the mosh pit, shaka brah.
"""
if len(message) > Quote.MAX_MESSAGE_LEN:
raise commands.UserInputError("That quote is too long! Maximum length {:d} characters."
.format(Quote.MAX_MESSAGE_LEN))
quote = c.store_quote(
user=c.query_user(self.server, user),
saved_by=c.query_user(self.server, ctx.message.author.id),
channel_id=ctx.message.channel.id,
message=message,
timestamp=ctx.message.timestamp
)
message = "Added quote: {}".format(self.format_quote(quote))
logger.info(message)
await self.bot.say(embed=self.make_single_embed(quote, title="Added quote."))
await self.send_output(message)
@quote.command(name='grab', pass_context=True, no_pm=True)
async def quote_grab(self, ctx: commands.Context, user: discord.Member, *, search: str=None):
"""!kazhelp
description: |
Find the most recent matching message and add it as a quote.
This command searches the {{grab_search_max}} most recent messages in the channel. The
most recent message matching both the user and (if specified) search text is added as a
quote.
TIP: To manually add a quote, use {{!quote add}}.
parameters:
- name: user
type: "@user"
description: >
The user being quoted. Should be an @mention or a discord ID.
- name: search
type: string
optional: true
description: The quote text to find.
examples:
- command: .quote grab @JaneDoe#0921
description: Quote the most recent message from JaneDoe.
- command: .quote grab @JaneDoe#0921 mosh pit
description: Finds the most recent message from @JaneDoe containing "mosh pit".
"""
search_messages = self.bot.logs_from(ctx.message.channel, self.cog_config.grab_search_max)
async for message in search_messages: \
# type: discord.Message
# if requested author, and this message isn't the invoking one (in case of self-grab)
if message.author == user and message.id != ctx.message.id:
if not search or search.lower() in message.content.lower():
grabbed_message = message
break
else: # Nothing found
if search:
await self.bot.say(
"No message from {} matching '{}' found in the last {:d} messages"
.format(
user.nick if user.nick else user.name,
search,
self.cog_config.grab_search_max
)
)
else:
await self.bot.say("No message from {} found in the last {:d} messages"
.format(user.nick if user.nick else user.name, | |
<filename>lx16a.py<gh_stars>0
import serial
import weakref
#################################
# Created by: <NAME> #
# Questions? Bugs? Email me at #
# <EMAIL> #
# Version 0.8.0 #
# Published on November 4, 2018 #
#################################
# getServos() implementation from:
# http://effbot.org/pyfaq/how-do-i-get-a-list-of-all-instances-of-a-given-class.htm
class ServoError(Exception):
pass
class ServoTimeout(Exception):
pass
class LX16A:
controller = None
servos = set()
########### Initialization Functions ###########
# Must be called before use!
@staticmethod
def initialize(port):
LX16A.controller = serial.Serial(port=port, baudrate=115200, timeout=.01)
def __init__(self, ID):
if ID < 0 or ID > 253:
raise ServoError("Servo ID out of range")
self.ID = ID
self.angle = self.getPhysicalPos()
self.waitingAngle = self.angle
limits = self.angleLimitRead()
self.lowerLimit = limits[0]
self.upperLimit = limits[1]
LX16A.servos.add(weakref.ref(self))
############### Utility Functions ###############
@staticmethod
def checksum(packet):
s = ~sum(packet[2:])
return s & 255
@staticmethod
def toBytes(n):
if n < 0 or n > 65535:
raise ServoError("Input out of range")
return [n & 255, n // 256]
@staticmethod
def sendPacket(packet):
packet.append(LX16A.checksum(packet))
packet = bytes(packet)
LX16A.controller.write(packet)
@staticmethod
def checkPacket(packet):
if sum(packet) == 0:
raise ServoTimeout("Timeout")
if LX16A.checksum(packet[:-1]) != packet[-1]:
LX16A.controller.flushInput()
raise ServoError("Invalid checksum")
@staticmethod
def getServos():
dead = set()
for ref in LX16A.servos:
obj = ref()
if obj is not None:
yield obj
else:
dead.add(ref)
LX16A.servos -= dead
################ Write Commands ################
# Immediately after this command is sent,
# rotate to the specified angle at uniform
# speed, in the specified time
# Possible angle values (in degrees): [0, 240], int
# Possible time values (in milliseconds): [0, 30000], int
def moveTimeWrite(self, angle, time=0):
if angle < self.lowerLimit or angle > self.upperLimit:
raise ServoError("Angle out of range")
if time < 0 or time > 30000:
raise ServoError("Time out of range")
self.angle = angle
angle = int(angle * 25 / 6)
packet = [0x55, 0x55, self.ID, 7, 1, *LX16A.toBytes(angle), *LX16A.toBytes(time)]
LX16A.sendPacket(packet)
# This command is similar to servo.moveTimeWrite,
# except that the servo will not begin rotation
# until it receives the servo.moveStart command
# Possible angle values (in degrees): [0, 240], int
# Possible time values (in milliseconds): [0, 30000], int
def moveTimeWaitWrite(self, angle, time=0):
if angle < self.lowerLimit or angle > self.upperLimit:
raise ServoError("Angle out of range")
if time < 0 or time > 30000:
raise ServoError("Time out of range")
self.waitingAngle = angle
angle = int(angle * 25 / 6)
packet = [0x55, 0x55, self.ID, 7, 7, *LX16A.toBytes(angle), *LX16A.toBytes(time)]
LX16A.sendPacket(packet)
# To be used in conjunction with servo.moveTimeWaitWrite
# Read the documentation for that command
def moveStart(self):
packet = [0x55, 0x55, self.ID, 3, 11]
LX16A.sendPacket(packet)
self.angle = self.waitingAngle
# Immediately halts all rotation,
# regardless of the current state
def moveStop(self):
packet = [0x55, 0x55, self.ID, 3, 12]
LX16A.sendPacket(packet)
self.angle = self.getPhysicalPos()
# Changes the servo's ID to the
# parameter passed to this function
# !!! BE CAREFUL WITH THIS COMMAND !!!
# IT PERMANANTLY CHANGES THE ID OF THE SERVO
# EVEN AFTER THE PROGRAM TERMINATES
# AND AFTER THE SERVO POWERS DOWN
# !!! YOU HAVE BEEN WARNED !!!
# The ID of all servos is 1 by default
# Possible ID values: [0, 253], int
def IDWrite(self, ID):
if ID < 0 or ID > 253:
raise ServoError("ID out of range")
packet = [0x55, 0x55, self.ID, 4, 13, ID]
LX16A.sendPacket(packet)
self.ID = ID
# Adds a constant offset to the angle of rotation
# For example, if the offset is -125 (-30 degrees),
# and the servo is commanded to rotate to position
# 500 (120 degrees), it will rotate to position 375
# (90 degrees)
# The offset resets back to 0 when the servo powers off
# However, it can be permanently set using servo.angleOffsetWrite
# The offset is 0 by default
# Possible angle values (in degrees): [-30, 30], int
def angleOffsetAdjust(self, offset):
if offset < -30 or offset > 30:
raise ServoError("Offset out of range")
angle = int(angle * 25 / 6)
if offset < 0:
offset += 256
packet = [0x55, 0x55, self.ID, 4, 17, offset]
LX16A.sendPacket(packet)
# Permanently applies the offset angle set by
# servo.AngleOffsetAdjust. After the servo powers
# down, the offset will default to the set angle
def angleOffsetWrite(self):
packet = [0x55, 0x55, self.ID, 3, 18]
LX16A.sendPacket(packet)
# Permanently sets a restriction on the rotation
# angle. If the current angle is outside of the bounds,
# nothing will change. But once the angle enters the legal range,
# it will not be allowed to exceed the limits until they are extended
# After restrictions are applied, the angles will not scale
# For example, if the bounds are set to [120, 240], the angle 0
# does not mean a rotation of halfway
# The lower bound must always be less than the upper bound
# The default angle limits are 0 and 240
# Possible lower values (in degrees): [0, 240], int
# Possible upper values (in degrees): [0, 240], int
def angleLimitWrite(self, lower, upper):
if lower < 0 or lower > 240:
raise ServoError("Lower bound out of range")
if upper < 0 or upper > 240:
raise ServoError("Upper bound out of range")
if lower >= upper:
raise ServoError("Lower bound must be less than upper bound")
self.lowerLimit = lower
self.upperLimit = upper
lower = int(lower * 25 / 6)
upper = int(upper * 25 / 6)
packet = [0x55, 0x55, self.ID, 7, 20, *LX16A.toBytes(lower), *LX16A.toBytes(upper)]
LX16A.sendPacket(packet)
# Sets the lower and upper bounds on the input voltage
# If the input voltage exceeds these bounds, the LED
# on the servo will flash and the servo will not rotate
# Possible lower values (in millivolts): [4500, 12000], int
# Possible higher values (in millivolts): [4500, 12000], int
def vInLimitWrite(self, lower, upper):
if lower < 4500 or lower > 12000:
raise ServoError("Lower bound out of range")
if upper < 4500 or upper > 12000:
raise ServoError("Upper bound out of range")
if lower >= upper:
raise ServoError("Lower bound must be less than upper bound")
packet = [0x55, 0x55, self.ID, 7, 22, *LX16A.toBytes(lower), *LX16A.toBytes(upper)]
LX16A.sendPacket(packet)
# Sets the maximum internal temperature
# If the servo temperature exceeds the limit, the LED
# on the servo will flash and the servo will not rotate
# Default maximum temperature is 85 degrees
# Possible temperature values (in degrees celcius): [50, 100], int
def tempMaxLimitWrite(self, temp):
if temp < 50 or temp > 100:
raise ServoError("Temperature limit out of range")
packet = [0x55, 0x55, self.ID, 4, 24, temp]
LX16A.sendPacket(packet)
# The LX-16A has two modes:
# Servo mode (with precise angle control)
# Motor mode (with continuous rotation)
# This command sets the servo to servo mode
def servoMode(self):
packet = [0x55, 0x55, self.ID, 7, 29, 0, 0, 0, 0]
LX16A.sendPacket(packet)
# This command sets the servo to motor mode
# The speed parameter controls how fast
# the servo spins
# -1000 is full speed backwards, and
# 1000 is full speed forwards
# Possible speed values: [-1000, 1000], int
def motorMode(self, speed):
if speed < -1000 or speed > 1000:
raise ServoError("Speed out of range")
if speed < 0:
speed += 65536
packet = [0x55, 0x55, self.ID, 7, 29, 1, 0, *LX16A.toBytes(speed)]
LX16A.sendPacket(packet)
# Controls the power state of the servo
# In the power down state, the servo consumes
# less power, but will also not respond to commands
# It will respond once powered on
# Possible power values:
# 0 for power down, 1 for power on
def loadOrUnloadWrite(self, power):
if power != 0 and power != 1:
raise ServoError("Power must be 0 or 1")
packet = [0x55, 0x55, self.ID, 4, 31, power]
LX16A.sendPacket(packet)
# Controls the error LED on the back of the servo
# Possible power values:
# 1 means always off (will not report errors)
# 0 means always on (able to report errors)
def LEDCtrlWrite(self, power):
if power != 0 and power != 1:
raise ServoError("Power must be 0 or 1")
packet = [0x55, 0x55, self.ID, 4, 33, power]
LX16A.sendPacket(packet)
# Controls what conditions will cause
# the error LED to flash
# If temp is true, the LED will flash
# when the temperature limit is exceeded
# If volt is true, the LED will flash
# when the input voltage is outside the bounds
# If lock is true, the LED will flash
# when the internal rotor is locked
def LEDErrorWrite(self, temp, volt, lock):
val = 0
val += 1 if temp else 0
val += 2 if volt else 0
val += 4 if lock else 0
packet = [0x55, 0x55, self.ID, 4, 35, val]
LX16A.sendPacket(packet)
# Sends the servo.moveStart command to all connected servos
@staticmethod
def moveStartAll():
packet = [0x55, 0x55, 254, 3, 11]
LX16A.sendPacket(packet)
for servo in LX16A.getServos():
servo.angle = servo.waitingAngle
# Sends the servo.moveStop command to all connected servos
@staticmethod
def moveStopAll():
packet = [0x55, 0x55, 254, 3, 12]
LX16A.sendPacket(packet)
for servo in LX16A.getServos():
servo.angle = servo.getPhysicalPos()
# Rotates multiple servos simultaneously
# It is better to use this rather than successive
# calls to servo.moveTimeWrite, as it takes time to send
# a command to a servo, so there would be a delay
# servos is a list of servos (no repeats)
# data is a list of (angle, time) pairs
# Possible angle values (in degrees): [0, 240], int
# Possible time values (in milliseconds): [0, 30000], int
@staticmethod
def moveTimeWriteList(servos, data):
if len(servos) != len(data):
raise ServoError("There must be a bijection between the servo list and the command list")
IDList = [servo.ID for servo in servos]
if len(set(IDList)) != | |
the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:ivar entries: List of log entry messages.
:vartype entries: list[~azure.mgmt.web.v2020_06_01.models.MSDeployLogEntry]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'entries': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'entries': {'key': 'properties.entries', 'type': '[MSDeployLogEntry]'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
**kwargs
):
super(MSDeployLog, self).__init__(kind=kind, **kwargs)
self.entries = None
class MSDeployLogEntry(msrest.serialization.Model):
"""MSDeploy log entry.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar time: Timestamp of log entry.
:vartype time: ~datetime.datetime
:ivar type: Log entry type. Possible values include: "Message", "Warning", "Error".
:vartype type: str or ~azure.mgmt.web.v2020_06_01.models.MSDeployLogEntryType
:ivar message: Log entry message.
:vartype message: str
"""
_validation = {
'time': {'readonly': True},
'type': {'readonly': True},
'message': {'readonly': True},
}
_attribute_map = {
'time': {'key': 'time', 'type': 'iso-8601'},
'type': {'key': 'type', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(MSDeployLogEntry, self).__init__(**kwargs)
self.time = None
self.type = None
self.message = None
class MSDeployStatus(ProxyOnlyResource):
"""MSDeploy ARM response.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:ivar deployer: Username of deployer.
:vartype deployer: str
:ivar provisioning_state: Provisioning state. Possible values include: "accepted", "running",
"succeeded", "failed", "canceled".
:vartype provisioning_state: str or
~azure.mgmt.web.v2020_06_01.models.MSDeployProvisioningState
:ivar start_time: Start time of deploy operation.
:vartype start_time: ~datetime.datetime
:ivar end_time: End time of deploy operation.
:vartype end_time: ~datetime.datetime
:ivar complete: Whether the deployment operation has completed.
:vartype complete: bool
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'deployer': {'readonly': True},
'provisioning_state': {'readonly': True},
'start_time': {'readonly': True},
'end_time': {'readonly': True},
'complete': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'deployer': {'key': 'properties.deployer', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'start_time': {'key': 'properties.startTime', 'type': 'iso-8601'},
'end_time': {'key': 'properties.endTime', 'type': 'iso-8601'},
'complete': {'key': 'properties.complete', 'type': 'bool'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
**kwargs
):
super(MSDeployStatus, self).__init__(kind=kind, **kwargs)
self.deployer = None
self.provisioning_state = None
self.start_time = None
self.end_time = None
self.complete = None
class NameIdentifier(msrest.serialization.Model):
"""Identifies an object.
:param name: Name of the object.
:type name: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] = None,
**kwargs
):
super(NameIdentifier, self).__init__(**kwargs)
self.name = name
class NameIdentifierCollection(msrest.serialization.Model):
"""Collection of domain name identifiers.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param value: Required. Collection of resources.
:type value: list[~azure.mgmt.web.v2020_06_01.models.NameIdentifier]
:ivar next_link: Link to next page of resources.
:vartype next_link: str
"""
_validation = {
'value': {'required': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[NameIdentifier]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["NameIdentifier"],
**kwargs
):
super(NameIdentifierCollection, self).__init__(**kwargs)
self.value = value
self.next_link = None
class NameValuePair(msrest.serialization.Model):
"""Name value pair.
:param name: Pair name.
:type name: str
:param value: Pair value.
:type value: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] = None,
value: Optional[str] = None,
**kwargs
):
super(NameValuePair, self).__init__(**kwargs)
self.name = name
self.value = value
class NetworkAccessControlEntry(msrest.serialization.Model):
"""Network access control entry.
:param action: Action object. Possible values include: "Permit", "Deny".
:type action: str or ~azure.mgmt.web.v2020_06_01.models.AccessControlEntryAction
:param description: Description of network access control entry.
:type description: str
:param order: Order of precedence.
:type order: int
:param remote_subnet: Remote subnet.
:type remote_subnet: str
"""
_attribute_map = {
'action': {'key': 'action', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'order': {'key': 'order', 'type': 'int'},
'remote_subnet': {'key': 'remoteSubnet', 'type': 'str'},
}
def __init__(
self,
*,
action: Optional[Union[str, "AccessControlEntryAction"]] = None,
description: Optional[str] = None,
order: Optional[int] = None,
remote_subnet: Optional[str] = None,
**kwargs
):
super(NetworkAccessControlEntry, self).__init__(**kwargs)
self.action = action
self.description = description
self.order = order
self.remote_subnet = remote_subnet
class NetworkFeatures(ProxyOnlyResource):
"""Full view of network features for an app (presently VNET integration and Hybrid Connections).
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:ivar virtual_network_name: The Virtual Network name.
:vartype virtual_network_name: str
:ivar virtual_network_connection: The Virtual Network summary view.
:vartype virtual_network_connection: ~azure.mgmt.web.v2020_06_01.models.VnetInfo
:ivar hybrid_connections: The Hybrid Connections summary view.
:vartype hybrid_connections:
list[~azure.mgmt.web.v2020_06_01.models.RelayServiceConnectionEntity]
:ivar hybrid_connections_v2: The Hybrid Connection V2 (Service Bus) view.
:vartype hybrid_connections_v2: list[~azure.mgmt.web.v2020_06_01.models.HybridConnection]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'virtual_network_name': {'readonly': True},
'virtual_network_connection': {'readonly': True},
'hybrid_connections': {'readonly': True},
'hybrid_connections_v2': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'virtual_network_name': {'key': 'properties.virtualNetworkName', 'type': 'str'},
'virtual_network_connection': {'key': 'properties.virtualNetworkConnection', 'type': 'VnetInfo'},
'hybrid_connections': {'key': 'properties.hybridConnections', 'type': '[RelayServiceConnectionEntity]'},
'hybrid_connections_v2': {'key': 'properties.hybridConnectionsV2', 'type': '[HybridConnection]'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
**kwargs
):
super(NetworkFeatures, self).__init__(kind=kind, **kwargs)
self.virtual_network_name = None
self.virtual_network_connection = None
self.hybrid_connections = None
self.hybrid_connections_v2 = None
class NetworkTrace(msrest.serialization.Model):
"""Network trace.
:param path: Local file path for the captured network trace file.
:type path: str
:param status: Current status of the network trace operation, same as Operation.Status
(InProgress/Succeeded/Failed).
:type status: str
:param message: Detailed message of a network trace operation, e.g. error message in case of
failure.
:type message: str
"""
_attribute_map = {
'path': {'key': 'path', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
*,
path: Optional[str] = None,
status: Optional[str] = None,
message: Optional[str] = None,
**kwargs
):
super(NetworkTrace, self).__init__(**kwargs)
self.path = path
self.status = status
self.message = message
class Nonce(ProxyOnlyResource):
"""Nonce.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param validate_nonce:
:type validate_nonce: bool
:param nonce_expiration_interval:
:type nonce_expiration_interval: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'validate_nonce': {'key': 'properties.validateNonce', 'type': 'bool'},
'nonce_expiration_interval': {'key': 'properties.nonceExpirationInterval', 'type': 'str'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
validate_nonce: Optional[bool] = None,
nonce_expiration_interval: Optional[str] = None,
**kwargs
):
super(Nonce, self).__init__(kind=kind, **kwargs)
self.validate_nonce = validate_nonce
self.nonce_expiration_interval = nonce_expiration_interval
class OpenIdConnectClientCredential(ProxyOnlyResource):
"""OpenIdConnectClientCredential.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:ivar method: Default value: "ClientSecretPost".
:vartype method: str
:param client_secret_setting_name:
:type client_secret_setting_name: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'method': {'constant': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'method': {'key': 'properties.method', 'type': 'str'},
'client_secret_setting_name': {'key': 'properties.clientSecretSettingName', 'type': 'str'},
}
method = "ClientSecretPost"
def __init__(
self,
*,
kind: Optional[str] = None,
client_secret_setting_name: Optional[str] = None,
**kwargs
):
super(OpenIdConnectClientCredential, self).__init__(kind=kind, **kwargs)
self.client_secret_setting_name = client_secret_setting_name
class OpenIdConnectConfig(ProxyOnlyResource):
"""OpenIdConnectConfig.
Variables are | |
import datetime
import numpy as np
import pytest
from testfixtures import LogCapture
from greykite.common.constants import LOGGER_NAME
from greykite.common.constants import PREDICTED_COL
from greykite.common.constants import PREDICTED_LOWER_COL
from greykite.common.constants import PREDICTED_UPPER_COL
from greykite.common.constants import TIME_COL
from greykite.common.constants import VALUE_COL
from greykite.common.testing_utils import generate_df_for_tests
from greykite.framework.templates.autogen.forecast_config import EvaluationPeriodParam
from greykite.framework.templates.autogen.forecast_config import ForecastConfig
from greykite.framework.templates.autogen.forecast_config import MetadataParam
from greykite.framework.templates.autogen.forecast_config import ModelComponentsParam
from greykite.framework.templates.forecaster import Forecaster
from greykite.framework.templates.silverkite_multistage_template import SilverkiteMultistageTemplate
from greykite.framework.templates.silverkite_multistage_template_config import SILVERKITE_TWO_STAGE
from greykite.framework.templates.silverkite_multistage_template_config import SilverkiteMultistageTemplateConfig
from greykite.framework.templates.simple_silverkite_template import SimpleSilverkiteTemplate
from greykite.sklearn.estimator.simple_silverkite_estimator import SimpleSilverkiteEstimator
from greykite.sklearn.uncertainty.uncertainty_methods import UncertaintyMethodEnum
@pytest.fixture
def df():
df = generate_df_for_tests(
freq="H",
periods=24 * 7 * 8,
train_start_date=datetime.datetime(2018, 1, 1),
conti_year_origin=2018)["df"]
df["regressor"] = np.arange(len(df))
return df
@pytest.fixture
def silverkite_multistage_configs():
configs = [
SilverkiteMultistageTemplateConfig(
train_length="30D",
fit_length=None,
agg_func="nanmean",
agg_freq="D",
model_template="SILVERKITE",
model_components=ModelComponentsParam(
seasonality={
"yearly_seasonality": 12,
"quarterly_seasonality": 5,
"monthly_seasonality": 5,
"weekly_seasonality": 4,
"daily_seasonality": 0,
},
growth={
"growth_term": "linear"
},
events={
"holidays_to_model_separately": "auto",
"holiday_lookup_countries": "auto",
"holiday_pre_num_days": 1,
"holiday_post_num_days": 1,
"holiday_pre_post_num_dict": None,
"daily_event_df_dict": None,
},
changepoints={
"changepoints_dict": None,
"seasonality_changepoints_dict": None
},
autoregression={
"autoreg_dict": "auto"
},
regressors={
"regressor_cols": []
},
lagged_regressors={
"lagged_regressor_dict": None
},
uncertainty={
"uncertainty_dict": None
},
custom={
"fit_algorithm_dict": {
"fit_algorithm": "ridge",
"fit_algorithm_params": None,
},
"feature_sets_enabled": "auto", # "auto" based on data freq and size
"max_daily_seas_interaction_order": 0,
"max_weekly_seas_interaction_order": 2,
"extra_pred_cols": [],
"min_admissible_value": None,
"max_admissible_value": None,
}
)
),
SilverkiteMultistageTemplateConfig(
train_length="7D",
fit_length=None,
agg_func="nanmean",
agg_freq=None,
model_template="SILVERKITE",
model_components=ModelComponentsParam(
seasonality={
"yearly_seasonality": 0,
"quarterly_seasonality": 0,
"monthly_seasonality": 0,
"weekly_seasonality": 0,
"daily_seasonality": 12,
},
growth={
"growth_term": None
},
events={
"holidays_to_model_separately": [],
"holiday_lookup_countries": [],
"holiday_pre_num_days": 0,
"holiday_post_num_days": 0,
"holiday_pre_post_num_dict": None,
"daily_event_df_dict": None,
},
changepoints={
"changepoints_dict": None,
"seasonality_changepoints_dict": None
},
autoregression={
"autoreg_dict": "auto"
},
regressors={
"regressor_cols": []
},
lagged_regressors={
"lagged_regressor_dict": None
},
uncertainty={
"uncertainty_dict": None
},
custom={
"fit_algorithm_dict": {
"fit_algorithm": "ridge",
"fit_algorithm_params": None,
},
"feature_sets_enabled": "auto", # "auto" based on data freq and size
"max_daily_seas_interaction_order": 5,
"max_weekly_seas_interaction_order": 2,
"extra_pred_cols": [],
"min_admissible_value": None,
"max_admissible_value": None,
}
)
)
]
return configs
@pytest.fixture
def forecast_config(silverkite_multistage_configs):
forecast_config = ForecastConfig(
model_template="SILVERKITE_TWO_STAGE",
forecast_horizon=12,
metadata_param=MetadataParam(
time_col=TIME_COL,
value_col=VALUE_COL,
freq="H"
),
model_components_param=ModelComponentsParam(
custom=dict(
silverkite_multistage_configs=silverkite_multistage_configs
)
),
evaluation_period_param=EvaluationPeriodParam(
cv_max_splits=1,
cv_horizon=12,
test_horizon=12
)
)
return forecast_config
def test_get_regressor_cols(df, forecast_config):
"""Tests the `self.get_regressor_cols` method."""
template = SilverkiteMultistageTemplate()
df["reg1"] = 1
df["reg2"] = 2
template.df = df
template.config = forecast_config
forecast_config.model_components_param.custom[
"silverkite_multistage_configs"][0].model_components.regressors["regressor_cols"] = ["reg1"]
forecast_config.model_components_param.custom[
"silverkite_multistage_configs"][1].model_components.regressors["regressor_cols"] = ["reg2"]
regressor_cols = template.get_regressor_cols()
assert set(regressor_cols) == {"reg1", "reg2"}
def test_get_lagged_regressor_info(df, forecast_config):
template = SilverkiteMultistageTemplate()
df["reg1"] = 1
df["reg2"] = 2
template.df = df
template.config = forecast_config
forecast_config.model_components_param.custom[
"silverkite_multistage_configs"][0].model_components.lagged_regressors["lagged_regressor_dict"] = [{
"reg1": {
"lag_dict": {"orders": [12]},
"series_na_fill_func": lambda s: s.bfill().ffill()}
}]
forecast_config.model_components_param.custom[
"silverkite_multistage_configs"][1].model_components.lagged_regressors["lagged_regressor_dict"] = [{
"reg2": {
"lag_dict": {"orders": [12]},
"series_na_fill_func": lambda s: s.bfill().ffill()}
}]
lagged_regressor_info = template.get_lagged_regressor_info()
assert lagged_regressor_info == dict(
lagged_regressor_cols=["reg1", "reg2"],
overall_min_lag_order=12.0,
overall_max_lag_order=12.0
)
def test_get_hyperparameter_grid(df, forecast_config):
template = SilverkiteMultistageTemplate()
# Error when `self.config` is not available.
with pytest.raises(
ValueError,
match="Forecast config must be provided"):
template.get_hyperparameter_grid()
template.df = df
# Adds a list of length 2 to each submodel.
# The result hyperparameter grid should have 2 * 2 = 4 grids.
forecast_config.model_components_param.custom[
"silverkite_multistage_configs"][0].model_components.seasonality["weekly_seasonality"] = [1, 2]
forecast_config.model_components_param.custom[
"silverkite_multistage_configs"][1].model_components.seasonality["daily_seasonality"] = [10, 12]
template.config = forecast_config
hyperparameter_grid = template.get_hyperparameter_grid()
assert hyperparameter_grid["estimator__forecast_horizon"] == [12]
assert hyperparameter_grid["estimator__freq"] == ["H"]
assert len(hyperparameter_grid["estimator__model_configs"]) == 4
assert hyperparameter_grid["estimator__model_configs"][0][0].estimator_params["weekly_seasonality"] == 1
assert hyperparameter_grid["estimator__model_configs"][0][1].estimator_params["daily_seasonality"] == 10
assert hyperparameter_grid["estimator__model_configs"][1][0].estimator_params["weekly_seasonality"] == 1
assert hyperparameter_grid["estimator__model_configs"][1][1].estimator_params["daily_seasonality"] == 12
assert hyperparameter_grid["estimator__model_configs"][2][0].estimator_params["weekly_seasonality"] == 2
assert hyperparameter_grid["estimator__model_configs"][2][1].estimator_params["daily_seasonality"] == 10
assert hyperparameter_grid["estimator__model_configs"][3][0].estimator_params["weekly_seasonality"] == 2
assert hyperparameter_grid["estimator__model_configs"][3][1].estimator_params["daily_seasonality"] == 12
def test_get_hyperparameter_grid_same_template(df, forecast_config):
# Tests the behavior of using the same ``model_template`` to override.
template = SilverkiteMultistageTemplate()
template.df = df
# Sets weekly seasonality to 5.
forecast_config.model_components_param.custom[
"silverkite_multistage_configs"][1].model_components.seasonality["weekly_seasonality"] = 5
# Removes the daily seasonality specification.
del forecast_config.model_components_param.custom[
"silverkite_multistage_configs"][1].model_components.seasonality["daily_seasonality"]
template.config = forecast_config
hyperparameter_grid = template.get_hyperparameter_grid()
# The original template has daily seasonality 12 and no weekly seasonality.
# The second model was overriden with the same ``model_template``, which is ``SILVERKITE``,
# so the hyperparameter_grid should have both daily seasonality 12 and weekly seasonality 5.
assert hyperparameter_grid["estimator__model_configs"][0][1].estimator_params["daily_seasonality"] == 12
assert hyperparameter_grid["estimator__model_configs"][0][1].estimator_params["weekly_seasonality"] == 5
def test_get_hyperparameter_grid_different_template(df, forecast_config):
# Tests the behavior of using the different ``model_template`` to override.
template = SilverkiteMultistageTemplate()
template.df = df
# Sets the model template to be ``SILVERKITE_EMPTY``.
forecast_config.model_components_param.custom[
"silverkite_multistage_configs"][1].model_template = "SILVERKITE_EMPTY"
# Sets weekly seasonality to 5.
forecast_config.model_components_param.custom[
"silverkite_multistage_configs"][1].model_components.seasonality["weekly_seasonality"] = 5
# Removes the daily seasonality specification.
del forecast_config.model_components_param.custom[
"silverkite_multistage_configs"][1].model_components.seasonality["daily_seasonality"]
template.config = forecast_config
hyperparameter_grid = template.get_hyperparameter_grid()
# The original template has daily seasonality 12 and no weekly seasonality.
# The second model was overriden with a different ``model_template``, which is ``SILVERKITE_EMPTY``,
# so the hyperparameter_grid should have only weekly seasonality 5 and daily seasonality 0.
assert hyperparameter_grid["estimator__model_configs"][0][1].estimator_params["daily_seasonality"] == 0
assert hyperparameter_grid["estimator__model_configs"][0][1].estimator_params["weekly_seasonality"] == 5
def test_get_hyperparameter_grid_extra_configs(df, forecast_config):
"""Tests gets hyperparameter grid when the default and override have different lengths."""
# The empty template has no configs.
# The override components has two configs.
forecast_config.model_template = "SILVERKITE_MULTISTAGE_EMPTY"
template = SilverkiteMultistageTemplate()
template.df = df
template.config = forecast_config
# The grid should have exactly two configs which are the same as the override configs.
hyperparameter_grid = template.get_hyperparameter_grid()
assert hyperparameter_grid["estimator__model_configs"][0][0].estimator_params == {
'yearly_seasonality': 12,
'quarterly_seasonality': 5,
'monthly_seasonality': 5,
'weekly_seasonality': 4,
'daily_seasonality': 0,
'growth_term': 'linear',
'changepoints_dict': None,
'seasonality_changepoints_dict': None,
'holidays_to_model_separately': 'auto',
'holiday_lookup_countries': 'auto',
'holiday_pre_num_days': 1,
'holiday_post_num_days': 1,
'holiday_pre_post_num_dict': None,
'daily_event_df_dict': None,
'feature_sets_enabled': 'auto',
'fit_algorithm_dict': {
'fit_algorithm': 'ridge',
'fit_algorithm_params': None},
'max_daily_seas_interaction_order': 0,
'max_weekly_seas_interaction_order': 2,
'extra_pred_cols': [],
'drop_pred_cols': None,
'explicit_pred_cols': None,
'min_admissible_value': None,
'max_admissible_value': None,
'autoreg_dict': 'auto',
'simulation_num': 10,
'normalize_method': None,
'regressor_cols': [],
'lagged_regressor_dict': None,
'regression_weight_col': None,
'uncertainty_dict': None,
'origin_for_time_vars': None,
'train_test_thresh': None,
'training_fraction': None}
assert hyperparameter_grid["estimator__model_configs"][0][1].estimator_params == {
'yearly_seasonality': 0,
'quarterly_seasonality': 0,
'monthly_seasonality': 0,
'weekly_seasonality': 0,
'daily_seasonality': 12,
'growth_term': None,
'changepoints_dict': None,
'seasonality_changepoints_dict': None,
'holidays_to_model_separately': [],
'holiday_lookup_countries': [],
'holiday_pre_num_days': 0,
'holiday_post_num_days': 0,
'holiday_pre_post_num_dict': None,
'daily_event_df_dict': None,
'feature_sets_enabled': 'auto',
'fit_algorithm_dict': {
'fit_algorithm': 'ridge',
'fit_algorithm_params': None},
'max_daily_seas_interaction_order': 5,
'max_weekly_seas_interaction_order': 2,
'extra_pred_cols': [],
'drop_pred_cols': None,
'explicit_pred_cols': None,
'min_admissible_value': None,
'max_admissible_value': None,
'normalize_method': None,
'autoreg_dict': 'auto',
'simulation_num': 10,
'regressor_cols': [],
'lagged_regressor_dict': None,
'regression_weight_col': None,
'uncertainty_dict': None,
'origin_for_time_vars': None,
'train_test_thresh': None,
'training_fraction': None}
def test_get_silverkite_multistage_configs_override(df, forecast_config):
template = SilverkiteMultistageTemplate()
template.df = df
# Adds a list of length 2 to each submodel.
# The result hyperparameter grid should have 2 * 2 = 4 grids.
forecast_config.model_components_param.custom[
"silverkite_multistage_configs"][0].model_components.seasonality["weekly_seasonality"] = [1, 2]
forecast_config.model_components_param.custom[
"silverkite_multistage_configs"][1].model_components.seasonality["daily_seasonality"] = [10, 12]
template.config = forecast_config
default_model_components = template._SilverkiteMultistageTemplate__get_default_model_components(
forecast_config.model_template)
default_silverkite_multistage_configs = default_model_components.custom.get("silverkite_multistage_configs")
new_configs = template._SilverkiteMultistageTemplate__get_silverkite_multistage_configs_override(
custom=forecast_config.model_components_param.custom,
model_template="SILVERKITE_TWO_STAGE",
default_silverkite_multistage_configs=default_silverkite_multistage_configs
)
assert new_configs == [
SilverkiteMultistageTemplateConfig(
train_length='30D',
fit_length=None,
agg_func='nanmean',
agg_freq='D',
model_template='SILVERKITE',
model_components=ModelComponentsParam(
autoregression={
'autoreg_dict': 'auto'
},
changepoints={
'changepoints_dict': None,
'seasonality_changepoints_dict': None
},
custom={
'fit_algorithm_dict': {
'fit_algorithm': 'ridge',
'fit_algorithm_params': None
},
'feature_sets_enabled': 'auto',
'max_daily_seas_interaction_order': 0,
'max_weekly_seas_interaction_order': 2,
'extra_pred_cols': [],
'min_admissible_value': None,
'max_admissible_value': None
},
events={
'holidays_to_model_separately': 'auto',
'holiday_lookup_countries': 'auto',
'holiday_pre_num_days': 1,
'holiday_post_num_days': 1,
'holiday_pre_post_num_dict': None,
'daily_event_df_dict': None
},
growth={
'growth_term': 'linear'
},
hyperparameter_override={},
regressors={
'regressor_cols': []
},
lagged_regressors={
'lagged_regressor_dict': None
},
seasonality={
'yearly_seasonality': 12,
'quarterly_seasonality': 5,
'monthly_seasonality': 5,
'weekly_seasonality': [1, 2],
'daily_seasonality': 0},
uncertainty={
'uncertainty_dict': None
})),
SilverkiteMultistageTemplateConfig(
train_length='7D',
fit_length=None,
agg_func='nanmean',
agg_freq=None,
model_template='SILVERKITE',
model_components=ModelComponentsParam(
autoregression={
'autoreg_dict': 'auto'
},
changepoints={
'changepoints_dict': None,
'seasonality_changepoints_dict': None
},
custom={
'fit_algorithm_dict': {
'fit_algorithm': 'ridge',
'fit_algorithm_params': None
},
'feature_sets_enabled': 'auto',
'max_daily_seas_interaction_order': 5,
'max_weekly_seas_interaction_order': 2,
'extra_pred_cols': [],
'min_admissible_value': None,
'max_admissible_value': None
},
events={
'holidays_to_model_separately': [],
'holiday_lookup_countries': [],
'holiday_pre_num_days': 0,
'holiday_post_num_days': 0,
'holiday_pre_post_num_dict': None,
'daily_event_df_dict': None
},
growth={
'growth_term': None
},
hyperparameter_override={},
regressors={
'regressor_cols': []
},
lagged_regressors={
'lagged_regressor_dict': None
},
seasonality={
'yearly_seasonality': 0,
'quarterly_seasonality': 0,
'monthly_seasonality': 0,
'weekly_seasonality': 0,
'daily_seasonality': [10, 12]
},
uncertainty={
'uncertainty_dict': None
}))]
def test_get_estimators_and_params_from_template_configs(df, forecast_config):
template = SilverkiteMultistageTemplate()
template.df = df
# Adds a list of length 2 to each submodel.
# The result hyperparameter grid should have 2 * 2 = 4 grids.
forecast_config.model_components_param.custom[
"silverkite_multistage_configs"][0].model_components.seasonality["weekly_seasonality"] = [1, 2]
forecast_config.model_components_param.custom[
"silverkite_multistage_configs"][1].model_components.seasonality["daily_seasonality"] = [10, 12]
template.config = forecast_config
default_model_components = template._SilverkiteMultistageTemplate__get_default_model_components(
forecast_config.model_template)
default_silverkite_multistage_configs = default_model_components.custom.get("silverkite_multistage_configs")
new_configs = template._SilverkiteMultistageTemplate__get_silverkite_multistage_configs_override(
custom=forecast_config.model_components_param.custom,
model_template="SILVERKITE_TWO_STAGE",
default_silverkite_multistage_configs=default_silverkite_multistage_configs
)
estimator_list, estimator_params_list = template._SilverkiteMultistageTemplate__get_estimators_and_params_from_template_configs(
new_configs=new_configs
)
# We can't test ``time_properties``
for d in estimator_params_list:
del d["estimator__time_properties"]
assert estimator_list == [SimpleSilverkiteEstimator, SimpleSilverkiteEstimator]
assert estimator_params_list == [
{
'estimator__yearly_seasonality': [12],
'estimator__quarterly_seasonality': [5],
'estimator__monthly_seasonality': [5],
'estimator__weekly_seasonality': [1, 2],
'estimator__daily_seasonality': [0],
'estimator__growth_term': ['linear'],
'estimator__changepoints_dict': [None],
'estimator__seasonality_changepoints_dict': [None],
'estimator__holidays_to_model_separately': ['auto'],
'estimator__holiday_lookup_countries': ['auto'],
'estimator__holiday_pre_num_days': [1],
'estimator__holiday_post_num_days': [1],
'estimator__holiday_pre_post_num_dict': [None],
'estimator__daily_event_df_dict': [None],
'estimator__feature_sets_enabled': ['auto'],
'estimator__fit_algorithm_dict': [{
'fit_algorithm': 'ridge',
'fit_algorithm_params': None}],
'estimator__max_daily_seas_interaction_order': [0],
'estimator__max_weekly_seas_interaction_order': [2],
'estimator__extra_pred_cols': [[]],
'estimator__drop_pred_cols': [None],
'estimator__explicit_pred_cols': [None],
'estimator__min_admissible_value': [None],
'estimator__max_admissible_value': [None],
'estimator__normalize_method': [None],
'estimator__autoreg_dict': ['auto'],
'estimator__simulation_num': [10],
'estimator__regressor_cols': [[]],
'estimator__lagged_regressor_dict': [None],
'estimator__regression_weight_col': [None],
'estimator__uncertainty_dict': [None],
'estimator__origin_for_time_vars': [None],
'estimator__train_test_thresh': [None],
'estimator__training_fraction': [None]
},
{
'estimator__yearly_seasonality': [0],
| |
<filename>microbenthos/model/simulation.py
"""
Module to handle the simulation of microbenthos models
"""
from __future__ import division
import importlib
import logging
import time
from collections import deque
from fipy import PhysicalField, Variable
from .model import MicroBenthosModel
from ..utils import CreateMixin, snapshot_var
class Simulation(CreateMixin):
"""
This class enables the process of repeatedly solving the model's
equations for a (small) time step to a certain numerical accuracy,
and then incrementing the model clock. During the evolution of the
simulation, the state of the model as well as the simulation is yielded
repeatedly.
Numerically approximating the solution to a set of partial differential
equations requires that the solver system has a reasonable target accuracy
("residual") and enough attempts ("sweeps") to reach both a stable and
accurate approximation for a time step. This class attempts to abstract out
these optimizations for the user, by performing adaptive time-stepping. The
user needs to specify a worst-case residual ( :attr:`.max_residual`),
maximum number of sweeps per time-step ( :attr:`.max_sweeps`) and the range
of time-step values to explore during evolution (:attr:`.simtime_lims`).
During the evolution of the simulation, the time-step is penalized if the
max residual is overshot or max sweeps reached. If not, the reward is a bump
up in the time-step duration, allowing for faster evolution of the
simulation.
See Also:
The scheme of simulation :meth:`.evolution`.
The adaptive scheme to :meth:`.update_time_step`.
"""
schema_key = 'simulation'
FIPY_SOLVERS = ('scipy', 'pyAMG', 'trilinos', 'pysparse')
def __init__(self,
simtime_total = 6,
simtime_days = None,
simtime_lims=(0.1, 120),
snapshot_interval = 60,
fipy_solver = 'scipy',
max_sweeps=100,
max_residual=1e-12,
):
"""
Args:
simtime_total (float, PhysicalField): The number of hours for the
simulation to run
simtime_days (float): The number of days (in terms of the
model's irradiance cycle) the simulation should run for. Note
that specifying this
will override the given :attr:`.simtime_total` when the
:attr:`.model` is supplied.
simtime_lims (float, PhysicalField): The minimum and maximum
limits for the
:attr:`simtime_step` for adaptive time-stepping. This should
be supplied as a
pair of values, which are assumed to be in seconds and cast
into PhysicalField
internally. (default: 0.01, 240)
max_sweeps (int): Maximum number of sweeps to attempt per
timestep (default: 50)
max_residual (float): Maximum residual value for the solver at a
timestep (default:
1e-14)
snapshot_interval (int, float, :class:`PhysicalField`): the
duration in seconds
of the model clock between yielding snapshots of the model
state for exporters
(default: 60)
fipy_solver (str): Name of the fipy solver to use. One of
``('scipy', 'pyAMG', 'trilinos','pysparse')`` (default: "scipy")
"""
super(Simulation, self).__init__()
# the __init__ call is deliberately empty. will implement
# cooeperative inheritance only
# when necessary
self.logger = logging.getLogger(__name__)
self._started = False
self._solver = None
self._fipy_solver = None
self.fipy_solver = fipy_solver
self._simtime_lims = None
self._simtime_total = None
self._simtime_step = None
#: Numer of days to simulate in terms of the model's irradiance source
self.simtime_days = None
if simtime_days:
simtime_days = float(simtime_days)
if simtime_days <= 0:
raise ValueError('simtime_days should be >0, not {:.2f}'.format(
simtime_days))
self.simtime_days = simtime_days
self.simtime_lims = simtime_lims
self.simtime_total = simtime_total
self.simtime_step = self.simtime_lims[0]
self.snapshot_interval = PhysicalField(snapshot_interval, 's')
self.max_residual = float(max_residual)
if not (0 < self.max_residual < 1e-3):
raise ValueError(
'Max residual should be a small positive number, '
'not {:.3g}'.format(
self.max_residual))
self._residualQ = deque([], maxlen=10)
self._max_sweeps = None
self.max_sweeps = max_sweeps
self._sweepsQ = deque([], maxlen=5)
self._model = None
@property
def started(self):
"""
Returns:
bool: Flag for if the sim evolution has started
"""
return self._started
@property
def fipy_solver(self):
return self._fipy_solver
@fipy_solver.setter
def fipy_solver(self, val):
if val not in self.FIPY_SOLVERS:
raise ValueError(
'Solver {!r} not in {}'.format(val, self.FIPY_SOLVERS))
if self.started:
raise RuntimeError('Fipy solver cannot be changed after started')
self._fipy_solver = val
@property
def simtime_total(self):
"""
The number of hours of the model clock the simulation should be
evolved for.
The supplied value must be larger than the time-steps allowed. Also, it
may be over-ridden by supplying :attr:`.simtime_days`.
Returns:
PhysicalField: duration in hours
"""
return self._simtime_total
@simtime_total.setter
def simtime_total(self, val):
try:
val = PhysicalField(val, 'h')
except TypeError:
raise ValueError(
'simtime_total {!r} not compatible with time units'.format(val))
if val <= 0:
raise ValueError('simtime_total should be > 0')
if self.simtime_step is not None:
if val <= self.simtime_lims[0]:
raise ValueError(
'simtime_total {} should be > step {}'.format(val,
self.simtime_lims[
0]))
self._simtime_total = val
@property
def simtime_step(self):
"""
The current time-step duration. While setting, the supplied value will
be clipped to within :attr:`simtime_lims`.
Returns:
PhysicalField: in seconds
"""
return self._simtime_step
@simtime_step.setter
def simtime_step(self, val):
try:
val = PhysicalField(val, 's')
except TypeError:
raise ValueError(
'simtime_step {!r} not compatible with time units'.format(val))
dtMin, dtMax = self.simtime_lims
# val = min(max(val, dtMin), dtMax)
val = min(val, dtMax)
assert hasattr(val, 'unit')
if self.simtime_total is not None:
if self.simtime_total <= val:
raise ValueError(
'simtime_total {} should be > step {}'.format(
self.simtime_total, val))
self._simtime_step = val
@property
def simtime_lims(self):
"""
The limits for the time-step duration allowed during evolution.
This parameter determines the degree to which the simulation evolution
can be speeded up. In phases of the model evolution where the numerical
solution is reached within a few sweeps, the clock would run at the max
limit, whereas when a large number of sweeps are required, it would be
penalized towards the min limit.
A high max value enables faster evolution, but can also lead to
numerical inaccuracy ( higher residual) or solution breakdown (numerical
error) during :meth:`.run_timestep`. A small enough min value allows
recovery, but turning back the clock to the previous time step and
restarting with the min timestep and allowing subsequent relaxation.
Args:
vals (float, PhysicalField): the (min, max) durations in seconds
Returns:
lims (tuple): The (min, max) limits of :attr:`simtime_step` each
as a :class:`.PhysicalField`
"""
return self._simtime_lims
@simtime_lims.setter
def simtime_lims(self, vals):
if vals is None:
lmin = PhysicalField(0.1, 's')
# lmax = (self.simtime_total / 25.0).inUnitsOf('s').floor()
lmax = PhysicalField(120, 's')
else:
lmin, lmax = [PhysicalField(float(_), 's') for _ in vals]
if not (0 < lmin < lmax):
raise ValueError(
'simtime_lims ({}, {}) are not positive and in order'.format(
lmin, lmax))
self._simtime_lims = (lmin, lmax)
self.logger.debug('simtime_lims set: {}'.format(self._simtime_lims))
@property
def max_sweeps(self):
"""
The maximum number of sweeps allowed for a timestep
Args:
val (int): should be > 0
Returns:
int
"""
return self._max_sweeps
@max_sweeps.setter
def max_sweeps(self, val):
try:
val = int(val)
assert val > 0
self._max_sweeps = val
except:
raise ValueError('max_sweeps {} should be > 0'.format(val))
@property
def model(self) -> MicroBenthosModel:
"""
The model to run the simulation on. This is typically an instance of
:class:`~microbenthos.MicroBenthosModel` or its subclasses. The
interface it must
provide is:
* a method :meth:`create_full_equation()`
* an attribute :attr:`full_eqn` created by above method, which is a
:class:`~fipy.terms.binaryTerm._BinaryTerm` that has a
:meth:`sweep()` method.
* method :meth:`model.update_vars()` which is called before each
timestep
* method :meth:`model.clock.increment_time(dt)` which is called
after each timestep
Additionally, if :attr:`.simtime_days` is set, then setting the model
will try to find the ``"env.irradiance:`` object and use its
:attr:`.hours_total` attribute to set the :attr:`.simtime_total`.
Args:
model (:class:`~microbenthos.MicroBenthosModel`): model instance
Returns:
:class:`~microbenthos.MicroBenthosModel`
Raises:
RuntimeError: if model has already been set
ValueError: if modes interface does not match
ValueError: if model :attr:`.model.full_eqn` does not get created
even after :meth:`.model.create_full_equation` is called.
"""
return self._model
@model.setter
def model(self, m):
if self.model:
raise RuntimeError('Model already set')
full_eqn = getattr(m, 'full_eqn', None)
if full_eqn is None:
if hasattr(m, 'create_full_equation'):
m.create_full_equation()
full_eqn = getattr(m, 'full_eqn', None)
if full_eqn is None:
raise ValueError(
'Model {!r} (type={}) does not have a valid equation'.format(
m, type(m)))
def recursive_hasattr(obj, path, is_callable = False):
parts = path.split('.')
S = obj
FOUND = False
for p in parts:
if hasattr(S, p):
S = getattr(S, p)
FOUND = True
else:
FOUND = False
break
if not FOUND:
return False
else:
if is_callable:
return callable(S)
else:
return True
expected_attrs = ['clock', 'full_eqn']
expected_callables = ['full_eqn.sweep', 'update_vars',
'clock.increment_time']
failed_attrs = tuple(
filter(lambda x: not recursive_hasattr(m, x),
expected_attrs))
failed_callables = tuple(
filter(lambda x: not recursive_hasattr(m, x, is_callable=True),
expected_callables))
if failed_attrs:
self.logger.error(
'Model is missing required attributes: {}'.format(failed_attrs))
if failed_callables:
self.logger.error('Model is missing required callables: {}'.format(
failed_callables))
if failed_callables or failed_attrs:
raise | |
a common
# use-case for it.
return (str.format(
'unsatisfiable dependency: "{}" ({}) is installed,'
' but "{}" requires {}', node.name,
required_version, depender_name, version_spec),
new_pkgs)
else:
normal_version = _normalize_version_tag(required_version)
req_semver = semver.Version.coerce(normal_version)
if version_spec.startswith('branch='):
version_spec = version_spec[len('branch='):]
return (str.format(
'unsatisfiable dependency: "{}" ({}) is installed,'
' but "{}" requires {}', node.name,
required_version, depender_name, version_spec),
new_pkgs)
else:
try:
semver_spec = semver.Spec(version_spec)
except ValueError:
return (str.format(
'package "{}" has invalid semver spec: {}',
depender_name, version_spec), new_pkgs)
if req_semver not in semver_spec:
return (str.format(
'unsatisfiable dependency: "{}" ({}) is installed,'
' but "{}" requires {}', node.name,
required_version, depender_name, version_spec),
new_pkgs)
else:
# Choose best version that satisfies constraints
best_version = None
need_branch = False
need_version = False
def no_best_version_string(node):
rval = str.format(
'"{}" has no version satisfying dependencies:\n',
node.name)
for depender_name, version_spec in node.dependers.items():
rval += str.format('\t"{}" requires: "{}"\n',
depender_name, version_spec)
return rval
for _, version_spec in node.dependers.items():
if version_spec.startswith('branch='):
need_branch = True
elif version_spec != '*':
need_version = True
if need_branch and need_version:
return (no_best_version_string(node), new_pkgs)
if need_branch:
branch_name = None
for depender_name, version_spec in node.dependers.items():
if version_spec == '*':
continue
if not branch_name:
branch_name = version_spec[len('branch='):]
continue
if branch_name != version_spec[len('branch='):]:
return (no_best_version_string(node), new_pkgs)
if branch_name:
best_version = branch_name
else:
best_version = node.info.default_branch
elif need_version:
for version in node.info.versions[::-1]:
normal_version = _normalize_version_tag(version)
req_semver = semver.Version.coerce(normal_version)
satisfied = True
for depender_name, version_spec in node.dependers.items():
try:
semver_spec = semver.Spec(version_spec)
except ValueError:
return (str.format(
'package "{}" has invalid semver spec: {}',
depender_name, version_spec), new_pkgs)
if req_semver not in semver_spec:
satisfied = False
break
if satisfied:
best_version = version
break
if not best_version:
return (no_best_version_string(node), new_pkgs)
else:
# Must have been all '*' wildcards or no dependers
best_version = node.info.best_version()
new_pkgs.append((node.info, best_version, node.is_suggestion))
return ('', new_pkgs)
def bundle(self, bundle_file, package_list, prefer_existing_clones=False):
"""Creates a package bundle.
Args:
bundle_file (str): filesystem path of the zip file to create.
package_list (list of (str, str)): a list of (git URL, version)
string tuples to put in the bundle. If the version string is
empty, the latest available version of the package is used.
prefer_existing_clones (bool): if True and the package list contains
a package at a version that is already installed, then the
existing git clone of that package is put into the bundle
instead of cloning from the remote repository.
Returns:
str: empty string if the bundle is successfully created,
else an error string explaining what failed.
"""
bundle_dir = os.path.join(self.scratch_dir, 'bundle')
delete_path(bundle_dir)
make_dir(bundle_dir)
manifest_file = os.path.join(bundle_dir, 'manifest.txt')
config = configparser.ConfigParser(delimiters='=')
config.optionxform = str
config.add_section('bundle')
def match_package_url_and_version(git_url, version):
for ipkg in self.installed_packages():
if ipkg.package.git_url != git_url:
continue
if ipkg.status.current_version != version:
continue
return ipkg
return None
for git_url, version in package_list:
name = name_from_path(git_url)
clonepath = os.path.join(bundle_dir, name)
config.set('bundle', git_url, version)
if prefer_existing_clones:
ipkg = match_package_url_and_version(git_url, version)
if ipkg:
src = os.path.join(
self.package_clonedir, ipkg.package.name)
shutil.copytree(src, clonepath, symlinks=True)
clone = git.Repo(clonepath)
clone.git.reset(hard=True)
clone.git.clean('-f', '-x', '-d')
for modified_config in self.modified_config_files(ipkg):
dst = os.path.join(clonepath, modified_config[0])
shutil.copy2(modified_config[1], dst)
continue
try:
git_clone(git_url, clonepath, shallow=(not is_sha1(version)))
except git.exc.GitCommandError as error:
return 'failed to clone {}: {}'.format(git_url, error)
with open(manifest_file, 'w') as f:
config.write(f)
archive = shutil.make_archive(bundle_dir, 'gztar', bundle_dir)
delete_path(bundle_file)
shutil.move(archive, bundle_file)
return ''
def unbundle(self, bundle_file):
"""Installs all packages contained within a bundle.
Args:
bundle_file (str): the path to the bundle to install.
Returns:
str: an empty string if the operation was successful, else an error
message indicated what went wrong.
"""
LOG.debug('unbundle "%s"', bundle_file)
bundle_dir = os.path.join(self.scratch_dir, 'bundle')
delete_path(bundle_dir)
make_dir(bundle_dir)
try:
with tarfile.open(bundle_file) as tf:
tf.extractall(bundle_dir)
except Exception as error:
return str(error)
manifest_file = os.path.join(bundle_dir, 'manifest.txt')
config = configparser.ConfigParser(delimiters='=')
config.optionxform = str
if not config.read(manifest_file):
return 'invalid bundle: no manifest file'
if not config.has_section('bundle'):
return 'invalid bundle: no [bundle] section in manifest file'
manifest = config.items('bundle')
for git_url, version in manifest:
package = Package(git_url=git_url, name=git_url.split('/')[-1],
canonical=True)
clonepath = os.path.join(self.package_clonedir, package.name)
delete_path(clonepath)
shutil.move(os.path.join(bundle_dir, package.name), clonepath)
LOG.debug('unbundle installing "%s"', package.name)
error = self._install(package, version, use_existing_clone=True)
if error:
return error
return ''
def test(self, pkg_path, version=''):
"""Test a package.
Args:
pkg_path (str): the full git URL of a package or the shortened
path/name that refers to it within a package source. E.g. for
a package source called "zeek" with package named "foo" in
:file:`alice/zkg.index`, the following inputs may refer
to the package: "foo", "alice/foo", or "zeek/alice/foo".
version (str): if not given, then the latest git version tag is
used (or if no version tags exist, the default branch like
"main" or "master" is used). If given, it may be either a git
version tag or a git branch name.
Returns:
(str, bool, str): a tuple containing an error message string,
a boolean indicating whether the tests passed, as well as a path
to the directory in which the tests were run. In the case
where tests failed, the directory can be inspected to figure out
what went wrong. In the case where the error message string is
not empty, the error message indicates the reason why tests could
not be run.
"""
pkg_path = canonical_url(pkg_path)
LOG.debug('testing "%s"', pkg_path)
pkg_info = self.info(pkg_path, version=version, prefer_installed=False)
if pkg_info.invalid_reason:
return (pkg_info.invalid_reason, 'False', '')
if 'test_command' not in pkg_info.metadata:
return ('Package does not specify a test_command', False, '')
if not version:
version = pkg_info.metadata_version
package = pkg_info.package
test_dir = os.path.join(self.package_testdir, package.name)
clone_dir = os.path.join(test_dir, 'clones')
stage_script_dir = os.path.join(test_dir, 'scripts', 'packages')
stage_plugin_dir = os.path.join(test_dir, 'plugins', 'packages')
delete_path(test_dir)
make_dir(clone_dir)
make_dir(stage_script_dir)
make_dir(stage_plugin_dir)
request = [(package.qualified_name(), version)]
invalid_deps, new_pkgs = self.validate_dependencies(request, False)
if invalid_deps:
return (invalid_deps, False, test_dir)
pkgs = []
pkgs.append((pkg_info, version))
for info, version, _ in new_pkgs:
pkgs.append((info, version))
# Clone all packages, checkout right version, and build/install to
# staging area.
for info, version in pkgs:
clonepath = os.path.join(clone_dir, info.package.name)
try:
clone = _clone_package(info.package, clonepath, version)
except git.exc.GitCommandError as error:
LOG.warning('failed to clone git repo: %s', error)
return ('failed to clone {}'.format(info.package.git_url),
False, test_dir)
try:
git_checkout(clone, version)
except git.exc.GitCommandError as error:
LOG.warning('failed to checkout git repo version: %s', error)
return (str.format('failed to checkout {} of {}',
version, info.package.git_url),
False, test_dir)
fail_msg = self._stage(info.package, version,
clone, stage_script_dir, stage_plugin_dir)
if fail_msg:
return (fail_msg, False, test_dir)
# Finally, run tests (with correct environment set)
test_command = pkg_info.metadata['test_command']
zeek_config = find_program('zeek-config')
path_option = '--zeekpath'
if not zeek_config:
zeek_config = find_program('bro-config')
path_option = '--bropath'
zeekpath = os.environ.get('ZEEKPATH')
if not zeekpath:
zeekpath = os.environ.get('BROPATH')
pluginpath = os.environ.get('ZEEK_PLUGIN_PATH')
if not pluginpath:
pluginpath = os.environ.get('BRO_PLUGIN_PATH')
if zeek_config:
cmd = subprocess.Popen([zeek_config, path_option, '--plugin_dir'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
bufsize=1, universal_newlines=True)
line1 = read_zeek_config_line(cmd.stdout)
line2 = read_zeek_config_line(cmd.stdout)
if not zeekpath:
zeekpath = line1
if not pluginpath:
pluginpath = line2
else:
LOG.warning('zeek-config not found when running tests for %s',
package.name)
return ('no "zeek-config" or "bro-config" found in PATH', False, test_dir)
zeekpath = os.path.dirname(stage_script_dir) + ':' + zeekpath
pluginpath = os.path.dirname(stage_plugin_dir) + ':' + pluginpath
env = os.environ.copy()
env['ZEEKPATH'] = zeekpath
env['ZEEK_PLUGIN_PATH'] = pluginpath
env['BROPATH'] = zeekpath
env['BRO_PLUGIN_PATH'] = pluginpath
cwd = os.path.join(clone_dir, package.name)
outfile = os.path.join(cwd, 'zkg.test_command.stdout')
errfile = os.path.join(cwd, 'zkg.test_command.stderr')
LOG.debug('running test_command for %s with cwd="%s"'
' and ZEEKPATH/BROPATH="%s": %s',
package.name, cwd, zeekpath, test_command)
with open(outfile, 'w') as test_stdout, open(errfile, 'w') as test_stderr:
cmd = subprocess.Popen(test_command, shell=True, cwd=cwd, env=env,
stdout=test_stdout, stderr=test_stderr)
return ('', cmd.wait() == 0, test_dir)
def _stage(self, package, version, clone,
stage_script_dir, stage_plugin_dir):
metadata_file = _pick_metadata_file(clone.working_dir)
# First use raw parser so no value interpolation takes place.
raw_metadata_parser = configparser.RawConfigParser()
invalid_reason = _parse_package_metadata(
raw_metadata_parser, metadata_file)
if invalid_reason:
return invalid_reason
raw_metadata = _get_package_metadata(raw_metadata_parser)
requested_user_vars = user_vars(raw_metadata)
if requested_user_vars is None:
return "package has malformed 'user_vars' metadata field"
substitutions = {
'bro_dist': self.zeek_dist,
'zeek_dist': self.zeek_dist,
'package_base': self.package_clonedir,
}
substitutions.update(self.user_vars)
for k, v, _ in requested_user_vars:
val_from_env = os.environ.get(k)
if val_from_env:
substitutions[k] = val_from_env
if k not in substitutions:
substitutions[k] = v
metadata_parser = configparser.ConfigParser(defaults=substitutions)
invalid_reason = _parse_package_metadata(
metadata_parser, metadata_file)
if invalid_reason:
return invalid_reason
metadata = _get_package_metadata(metadata_parser)
LOG.debug('building "%s": version %s', package, version)
build_command = metadata.get('build_command', '')
if build_command:
LOG.debug('building "%s": running build_command: %s',
package, build_command)
bufsize = 4096
build = subprocess.Popen(build_command,
shell=True, cwd=clone.working_dir,
bufsize=bufsize,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
try:
| |
import sys, os, re
import numpy as np
import time
import argparse
import pickle
from random import *
import math
from hypredriver import hypredriver
sys.path.insert(0, os.path.abspath(__file__ + "/../hypre_driver/"))
solver = 3 # Bommer AMG
# max_setup_time = 1000.
# max_solve_time = 1000.
coeffs_c = "-c 1 1 1 " # specify c-coefficients in format "-c 1 1 1 "
coeffs_a = "-a 0 0 0 " # specify a-coefficients in format "-a 1 1 1 " leave as empty string for laplacian and Poisson problems
problem_name = "-laplacian " # "-difconv " for convection-diffusion problems to include the a coefficients
def main():
args = parse_args()
JOBID = args.jobid
TUNER_NAME = args.optimization
machine = args.machine
os.environ['MACHINE_NAME'] = machine
os.environ['TUNER_NAME'] = TUNER_NAME
# params = [(nx, ny, nz, coeffs_a, coeffs_c, problem_name, solver,
# Px, Py, Pz, strong_threshold,
# trunc_factor, P_max_elmts, coarsen_type, relax_type,
# smooth_type, smooth_num_levels, interp_type, agg_num_levels, nthreads, npernode)]
# failed3 params, Nproc = 475
# params = [(184, 165, 153, '-a 0 0 0 ', '-c 1 1 1 ', '-laplacian', 3, 19, 5, 5, 0.6243, 0.699, 9, 0, 6, 7, 2, 6, 3, 1, 32)]
# original problematic one
# params = [(184, 165, 153, '-a 0 0 0 ', '-c 1 1 1 ', '-laplacian', 3, 15, 5, 5, 0.6243, 0.699, 9, 0, 6, 7, 2, 6, 3, 1, 32)]
# failed
# params = [(184, 165, 153, '-a 0 0 0 ', '-c 1 1 1 ', '-laplacian', 3, 12, 5, 5, 0.6243, 0.699, 9, 0, 6, 7, 2, 6, 3, 1, 32)]
# failed
# params = [(184, 165, 153, '-a 0 0 0 ', '-c 1 1 1 ', '-laplacian', 3, 11, 5, 5, 0.6243, 0.699, 9, 0, 6, 7, 2, 6, 3, 1, 32)]
# failed
# params = [(184, 165, 153, '-a 0 0 0 ', '-c 1 1 1 ', '-laplacian', 3, 10, 5, 5, 0.6243, 0.699, 9, 0, 6, 7, 2, 6, 3, 1, 32)]
# works, 250
# params = [(184, 165, 153, '-a 0 0 0 ', '-c 1 1 1 ', '-laplacian', 3, 5, 5, 5, 0.6243, 0.699, 9, 0, 6, 7, 2, 6, 3, 1, 32)]
# works
# params = [(184, 165, 153, '-a 0 0 0 ', '-c 1 1 1 ', '-laplacian', 3, 10, 6, 5, 0.6243, 0.699, 9, 0, 6, 7, 2, 6, 3, 1, 32)]
# failed
# params = [(184, 165, 153, '-a 0 0 0 ', '-c 1 1 1 ', '-laplacian', 3, 20, 20, 1, 0.6243, 0.699, 9, 0, 6, 7, 2, 6, 3, 1, 32)]
# failed
# failed4 params
# params = [(184, 165, 153, '-a 0 0 0 ', '-c 1 1 1 ', '-laplacian', 3, 1, 298, 1, 0.44097267, 0.70830443, 2, 0, 8, 7, 3, 5, 5, 1, 32)]
# failed
# params = [(184, 165, 153, '-a 0 0 0 ', '-c 1 1 1 ', '-laplacian', 3, 2, 149, 1, 0.44097267, 0.70830443, 2, 0, 8, 7, 3, 5, 5, 1, 32)]
# failed
# params = [(184, 165, 153, '-a 0 0 0 ', '-c 1 1 1 ', '-laplacian', 3, 2, 50, 2, 0.44097267, 0.70830443, 2, 0, 8, 7, 3, 5, 5, 1, 32)]
# worked
# params = [(184, 165, 153, '-a 0 0 0 ', '-c 1 1 1 ', '-laplacian', 3, 2, 16, 9, 0.44097267, 0.70830443, 2, 0, 8, 7, 3, 5, 5, 1, 32)]
# failed
# params = [(184, 165, 153, '-a 0 0 0 ', '-c 1 1 1 ', '-laplacian', 3, 2, 60, 2, 0.44097267, 0.70830443, 2, 0, 8, 7, 3, 5, 5, 1, 32)]
# worked
# failed5 params
# params = [(184, 165, 153, '-a 0 0 0 ', '-c 1 1 1 ', '-laplacian', 3, 2, 177, 1, 0.30017979, 0.80794053, 12, 3, 6, 7, 5, 6, 2, 1, 32)]
# failed
# params = [(184, 165, 153, '-a 0 0 0 ', '-c 1 1 1 ', '-laplacian', 3, 2, 128, 1, 0.30017979, 0.80794053, 12, 3, 6, 7, 5, 6, 2, 1, 32)]
# worked, 256
# params = [(184, 165, 153, '-a 0 0 0 ', '-c 1 1 1 ', '-laplacian', 3, 2, 129, 1, 0.30017979, 0.80794053, 12, 3, 6, 7, 5, 6, 2, 1, 32)]
# worked, 258
# params = [(184, 165, 153, '-a 0 0 0 ', '-c 1 1 1 ', '-laplacian', 3, 2, 130, 1, 0.30017979, 0.80794053, 12, 3, 6, 7, 5, 6, 2, 1, 32)]
# failed, 260
# params = [(184, 165, 153, '-a 0 0 0 ', '-c 1 1 1 ', '-laplacian', 3, 1, 259, 1, 0.30017979, 0.80794053, 12, 3, 6, 7, 5, 6, 2, 1, 32)]
# failed 259
# params = [(184, 165, 153, '-a 0 0 0 ', '-c 1 1 1 ', '-laplacian', 3, 2, 43, 3, 0.30017979, 0.80794053, 12, 3, 6, 7, 5, 6, 2, 1, 32)]
# worked, 258
# params = [(184, 165, 153, '-a 0 0 0 ', '-c 1 1 1 ', '-laplacian', 3, 43, 3, 2, 0.30017979, 0.80794053, 12, 3, 6, 7, 5, 6, 2, 1, 32)]
# worked, 258
# params = [(184, 165, 153, '-a 0 0 0 ', '-c 1 1 1 ', '-laplacian', 3, 2, 65, 2, 0.30017979, 0.80794053, 12, 3, 6, 7, 5, 6, 2, 1, 32)]
# failed, 260
# params = [(184, 165, 153, '-a 0 0 0 ', '-c 1 1 1 ', '-laplacian ', 3, 1, 289, 1, 0.17927643772415036, 0.4794427825943006, 7, '10', '0', '6', 1, '6', 2, 1, 32)]
# worked, 289
# params = [(194, 168, 157, '-a 0 0 0 ', '-c 1 1 1 ', '-laplacian', 3, 162, 4, 1, 0.15118518819041837, 0.16462433400943832, 10, 6, 18, 6, 3, 8, 1, 1, 32)]
# failed
# params = [(194, 168, 157, '-a 0 0 0 ', '-c 1 1 1 ', '-laplacian', 3, 81, 4, 2, 0.15118518819041837, 0.16462433400943832, 10, 6, 18, 6, 3, 8, 1, 1, 32)]
# failed
# params = [(194, 168, 157, '-a 0 0 0 ', '-c 1 1 1 ', '-laplacian', 3, 8, 4, 2, 0.15118518819041837, 0.16462433400943832, 10, 6, 18, 6, 3, 8, 1, 1, 32)]
# failed
# params = [(172, 175, 159, '-a 0 0 0 ', '-c 1 1 1 ', '-laplacian', 3, 62, 2, 2, 0.9941, 0.348, 6, 4, 18, 6, 2, 3, 1, 1, 32)]
# worked
# params = [(173, 188, 188, '-a 0 0 0 ', '-c 1 1 1 ', '-laplacian', 3, 62, 1, 4, 0.7184, 0.5767, 4, 1, 6, 8, 1, 3, 5, 1, 32)]
# worked
# params = [(194, 168, 157, '-a 0 0 0 ', '-c 1 1 1 ', '-laplacian', 3, 8, 30, 1, 0.4115, 0.22, 12, 1, 16, 6, 4, 6, 1, 1, 32)]
# worked
# params = [(184, 165, 153, '-a 0 0 0 ', '-c 1 1 1 ', '-laplacian', 3, 1, 94, 2, 0., 0., 12, 1, 6, 5, 0, 12, 0, 1, 32)]
# worked, opentuner
# params = [(196, 166, 189, '-a 0 0 0 ', '-c 1 1 1 ', '-laplacian', 3, 2, 33, 3, 0.54664604, 0.3110243, 9, 3, -1, 6, 4, 6, 3, 1, 32)]
# worked, hpbandster
# test budget
params_task0 = [[(55, 66, 70, '-a 0 0 0 ', '-c 1 1 1 ', '-laplacian ', 3, 19, 1, 1, 0.3212934323637363, 0.9471727354342201, 2, '0', '18', '9', 4, '0', 3, 1, 30)],
[(55, 66, 70, '-a 0 0 0 ', '-c 1 1 1 ', '-laplacian ', 3, 8, 1, 3, 0.46207559273813614, 0.16093209390282756, 12, '10', '6', '9', 5, '12', 3, 1, 31)],
| |
from __future__ import annotations
import json
import tempfile
from enum import Enum
from pathlib import Path
from typing import Any, Dict, List, Optional, Union
import python_on_whales.components.image.cli_wrapper
from python_on_whales.client_config import (
ClientConfig,
DockerCLICaller,
ReloadableObject,
)
from python_on_whales.components.buildx.imagetools.cli_wrapper import ImagetoolsCLI
from python_on_whales.components.buildx.models import BuilderInspectResult
from python_on_whales.utils import ValidPath, format_dict_for_cli, run, to_list
class GetImageMethod(Enum):
TAG = 1
IIDFILE = 2
class Builder(ReloadableObject):
def __init__(
self,
client_config: ClientConfig,
reference: Optional[str],
is_immutable_id=False,
):
super().__init__(client_config, "name", reference, is_immutable_id)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.remove()
def _fetch_and_parse_inspect_result(
self, reference: Optional[str]
) -> BuilderInspectResult:
full_cmd = self.docker_cmd + ["buildx", "inspect"]
if reference is not None:
full_cmd.append(reference)
inspect_str = run(full_cmd)
return BuilderInspectResult.from_str(inspect_str)
@property
def name(self) -> str:
return self._get_immutable_id()
@property
def driver(self) -> str:
return self._get_inspect_result().driver
def remove(self):
"""Removes this builder. After this operation the builder cannot be used anymore.
If you use the builder as a context manager, it will call this function when
you exit the context manager.
```python
from python_on_whales import docker
buildx_builder = docker.buildx.create(use=True)
with buildx_builder:
docker.build(".")
# now the variable buildx_builder is not usable since we're out of the context manager.
# the .remove() method was called behind the scenes
# since it was the current builder, 'default' is now the current builder.
```
"""
BuildxCLI(self.client_config).remove(self)
ValidBuilder = Union[str, Builder]
class BuildxCLI(DockerCLICaller):
def __init__(self, client_config: ClientConfig):
super().__init__(client_config)
self.imagetools = ImagetoolsCLI(self.client_config)
def bake(
self,
targets: Union[str, List[str]] = [],
builder: Optional[ValidBuilder] = None,
files: Union[ValidPath, List[ValidPath]] = [],
load: bool = False,
cache: bool = True,
print: bool = False,
progress: Union[str, bool] = "auto",
pull: bool = False,
push: bool = False,
set: Dict[str, str] = {},
variables: Dict[str, str] = {},
) -> Dict[str, Dict[str, Dict[str, Any]]]:
"""Bake is similar to make, it allows you to build things declared in a file.
For example it allows you to build multiple docker image in parallel.
The CLI docs is [here](https://github.com/docker/buildx#buildx-bake-options-target)
and it contains a lot more information.
# Arguments
targets: Targets or groups of targets to build.
builder: The builder to use.
files: Build definition file(s)
load: Shorthand for `set=["*.output=type=docker"]`
cache: Whether to use the cache or not.
print: Do nothing, just returns the config.
progress: Set type of progress output (`"auto"`, `"plain"`, `"tty"`,
or `False`). Use plain to keep the container output on screen
pull: Always try to pull the newer version of the image
push: Shorthand for `set=["*.output=type=registry"]`
set: A list of overrides in the form `"targetpattern.key=value"`.
variables: A dict containing the values of the variables defined in the
hcl file. See <https://github.com/docker/buildx#hcl-variables-and-functions>
# Returns
The configuration used for the bake (files merged + override with
the arguments used in the function). It's the loaded json you would
obtain by running `docker buildx bake --print --load my_target` if
your command was `docker buildx bake --load my_target`. Some example here.
```python
from python_on_whales import docker
# returns the config used and runs the builds
config = docker.buildx.bake(["my_target1", "my_target2"], load=True)
assert config == {
"target": {
"my_target1": {
"context": "./",
"dockerfile": "Dockerfile",
"tags": ["pretty_image1:1.0.0"],
"target": "out1",
"output": ["type=docker"]
},
"my_target2": {
"context": "./",
"dockerfile": "Dockerfile",
"tags": ["pretty_image2:1.0.0"],
"target": "out2",
"output": ["type=docker"]
}
}
}
# returns the config only, doesn't run the builds
config = docker.buildx.bake(["my_target1", "my_target2"], load=True, print=True)
```
"""
full_cmd = self.docker_cmd + ["buildx", "bake"]
full_cmd.add_flag("--no-cache", not cache)
full_cmd.add_simple_arg("--builder", builder)
full_cmd.add_flag("--load", load)
full_cmd.add_flag("--pull", pull)
full_cmd.add_flag("--push", push)
full_cmd.add_flag("--print", print)
if progress != "auto" and isinstance(progress, str):
full_cmd += ["--progress", progress]
for file in to_list(files):
full_cmd.add_simple_arg("--file", file)
full_cmd.add_args_list("--set", format_dict_for_cli(set))
targets = to_list(targets)
env = dict(variables)
if print:
return json.loads(run(full_cmd + targets, env=env))
else:
run(full_cmd + targets, capture_stderr=progress is False, env=env)
return json.loads(run(full_cmd + ["--print"] + targets, env=env))
def build(
self,
context_path: ValidPath,
add_hosts: Dict[str, str] = {},
allow: List[str] = [],
build_args: Dict[str, str] = {},
builder: Optional[ValidBuilder] = None,
cache: bool = True,
cache_from: Union[str, Dict[str, str], None] = None,
cache_to: Union[str, Dict[str, str], None] = None,
file: Optional[ValidPath] = None,
labels: Dict[str, str] = {},
load: bool = False,
network: Optional[str] = None,
output: Dict[str, str] = {},
platforms: Optional[List[str]] = None,
progress: Union[str, bool] = "auto",
pull: bool = False,
push: bool = False,
secrets: Union[str, List[str]] = [],
ssh: Optional[str] = None,
tags: Union[str, List[str]] = [],
target: Optional[str] = None,
) -> Optional[python_on_whales.components.image.cli_wrapper.Image]:
"""Build a Docker image with builkit as backend.
Alias: `docker.build(...)`
A `python_on_whales.Image` is returned, even when using multiple tags.
That is because it will produce a single image with multiple tags.
If no image is loaded into the Docker daemon (if `push=True` for ex),
then `None` is returned.
# Arguments
context_path: The path of the build context.
add_hosts: Hosts to add. `add_hosts={"my_host1": "192.168.32.35"}`
allow: List of extra privileges.
Eg `allow=["network.host", "security.insecure"]`
build_args: The build arguments.
ex `build_args={"PY_VERSION": "3.7.8", "UBUNTU_VERSION": "20.04"}`.
builder: Specify which builder to use.
cache: Whether or not to use the cache
cache_from: Works only with the container driver. Loads the cache
(if needed) from a registry `cache_from="user/app:cache"` or
a directory on the client `cache_from="type=local,src=path/to/dir"`.
It's also possible to use a dict form for this argument. e.g.
`cache_from=dict(type="local", src="path/to/dir")`
cache_to: Works only with the container driver. Sends the resulting
docker cache either to a registry `cache_to="user/app:cache"`,
or to a local directory `cache_to="type=local,dest=path/to/dir"`.
It's also possible to use a dict form for this argument. e.g.
`cache_to=dict(type="local", dest="path/to/dir", mode="max")`
file: The path of the Dockerfile
labels: Dict of labels to add to the image.
`labels={"very-secure": "1", "needs-gpu": "0"}` for example.
load: Shortcut for `output=dict(type="docker")` If `True`,
`docker.buildx.build` will return a `python_on_whales.Image`.
network: which network to use when building the Docker image
output: Output destination
(format: `output={"type": "local", "dest": "path"}`
Possible output types are
`["local", "tar", "oci", "docker", "image", "registry"]`.
See [this link](https://github.com/docker/buildx#-o---outputpath-typetypekeyvalue)
for more details about each exporter.
platforms: List of target platforms when building the image. Ex:
`platforms=["linux/amd64", "linux/arm64"]`
progress: Set type of progress output (auto, plain, tty, or False).
Use plain to keep the container output on screen
pull: Always attempt to pull a newer version of the image
push: Shorthand for `output=dict(type="registry")`.
secrets: One or more secrets passed as string(s). For example
`secrets="id=aws,src=/home/my_user/.aws/credentials"`
ssh: SSH agent socket or keys to expose to the build
(format is `default|<id>[=<socket>|<key>[,<key>]]` as a string)
tags: Tag or tags to put on the resulting image.
target: Set the target build stage to build.
# Returns
A `python_on_whales.Image` if a Docker image is loaded
in the daemon after the build (the default behavior when
calling `docker.build(...)`). Otherwise, `None`.
"""
tags = to_list(tags)
full_cmd = self.docker_cmd + ["buildx", "build"]
if progress != "auto" and isinstance(progress, str):
full_cmd += ["--progress", progress]
full_cmd.add_args_list(
"--add-host", format_dict_for_cli(add_hosts, separator=":")
)
full_cmd.add_args_list("--allow", allow)
full_cmd.add_args_list("--build-arg", format_dict_for_cli(build_args))
full_cmd.add_simple_arg("--builder", builder)
full_cmd.add_args_list("--label", format_dict_for_cli(labels))
full_cmd.add_simple_arg("--ssh", ssh)
full_cmd.add_flag("--pull", pull)
full_cmd.add_flag("--push", push)
full_cmd.add_flag("--load", load)
full_cmd.add_simple_arg("--file", file)
full_cmd.add_simple_arg("--target", target)
if isinstance(cache_from, dict):
full_cmd.add_simple_arg("--cache-from", format_dict_for_buildx(cache_from))
else:
full_cmd.add_simple_arg("--cache-from", cache_from)
if isinstance(cache_to, dict):
full_cmd.add_simple_arg("--cache-to", format_dict_for_buildx(cache_to))
else:
full_cmd.add_simple_arg("--cache-to", cache_to)
full_cmd.add_args_list("--secret", to_list(secrets))
if output != {}:
full_cmd += ["--output", format_dict_for_buildx(output)]
if platforms is not None:
full_cmd += ["--platform", ",".join(platforms)]
full_cmd.add_simple_arg("--network", network)
full_cmd.add_flag("--no-cache", not cache)
full_cmd.add_args_list("--tag", tags)
will_load_image = self._build_will_load_image(builder, push, load, output)
# very special_case, must be fixed https://github.com/docker/buildx/issues/420
if (
will_load_image
and not tags
and self.inspect(builder).driver == "docker-container"
):
# we have no way of fetching the image because iidfile is wrong in this case.
will_load_image = False
if not will_load_image:
full_cmd.append(context_path)
run(full_cmd, capture_stderr=progress is False)
return
docker_image = python_on_whales.components.image.cli_wrapper.ImageCLI(
self.client_config
)
if self._method_to_get_image(builder) == GetImageMethod.TAG:
full_cmd.append(context_path)
run(full_cmd, capture_stderr=progress is False)
return docker_image.inspect(tags[0])
else:
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_dir = Path(tmp_dir)
iidfile = tmp_dir / "id_file.txt"
full_cmd.add_simple_arg("--iidfile", iidfile)
full_cmd.append(context_path)
run(full_cmd, capture_stderr=progress is False)
image_id = iidfile.read_text()
return docker_image.inspect(image_id)
def _build_will_load_image(
self,
builder: Optional[str],
push: bool,
load: bool,
output: Optional[Dict[str, str]],
) -> bool:
if load:
return True
if push:
return False
if output != {}:
if output.get("type") == "docker" and "dest" not in output:
return True
else:
return False
# now load push and output are not set.
if self.inspect(builder).driver == "docker":
return True
return False
def _method_to_get_image(self, builder: |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.